Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2017-2018 HUAWEI, Inc.
  4 *             https://www.huawei.com/
  5 * Copyright (C) 2021, Alibaba Cloud
  6 */
  7#include "internal.h"
  8#include <linux/sched/mm.h>
 
  9#include <trace/events/erofs.h>
 10
 11void erofs_unmap_metabuf(struct erofs_buf *buf)
 12{
 13	if (buf->kmap_type == EROFS_KMAP)
 14		kunmap_local(buf->base);
 15	buf->base = NULL;
 16	buf->kmap_type = EROFS_NO_KMAP;
 17}
 18
 19void erofs_put_metabuf(struct erofs_buf *buf)
 20{
 21	if (!buf->page)
 22		return;
 23	erofs_unmap_metabuf(buf);
 24	put_page(buf->page);
 25	buf->page = NULL;
 26}
 27
 28/*
 29 * Derive the block size from inode->i_blkbits to make compatible with
 30 * anonymous inode in fscache mode.
 31 */
 32void *erofs_bread(struct erofs_buf *buf, erofs_blk_t blkaddr,
 33		  enum erofs_kmap_type type)
 34{
 35	struct inode *inode = buf->inode;
 36	erofs_off_t offset = (erofs_off_t)blkaddr << inode->i_blkbits;
 37	pgoff_t index = offset >> PAGE_SHIFT;
 38	struct page *page = buf->page;
 39	struct folio *folio;
 40	unsigned int nofs_flag;
 41
 42	if (!page || page->index != index) {
 43		erofs_put_metabuf(buf);
 44
 45		nofs_flag = memalloc_nofs_save();
 46		folio = read_cache_folio(inode->i_mapping, index, NULL, NULL);
 47		memalloc_nofs_restore(nofs_flag);
 48		if (IS_ERR(folio))
 49			return folio;
 50
 51		/* should already be PageUptodate, no need to lock page */
 52		page = folio_file_page(folio, index);
 53		buf->page = page;
 54	}
 55	if (buf->kmap_type == EROFS_NO_KMAP) {
 56		if (type == EROFS_KMAP)
 57			buf->base = kmap_local_page(page);
 58		buf->kmap_type = type;
 59	} else if (buf->kmap_type != type) {
 60		DBG_BUGON(1);
 61		return ERR_PTR(-EFAULT);
 62	}
 63	if (type == EROFS_NO_KMAP)
 64		return NULL;
 65	return buf->base + (offset & ~PAGE_MASK);
 66}
 67
 68void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
 69{
 70	if (erofs_is_fscache_mode(sb))
 71		buf->inode = EROFS_SB(sb)->s_fscache->inode;
 72	else
 73		buf->inode = sb->s_bdev->bd_inode;
 74}
 75
 76void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
 77			 erofs_blk_t blkaddr, enum erofs_kmap_type type)
 78{
 79	erofs_init_metabuf(buf, sb);
 80	return erofs_bread(buf, blkaddr, type);
 
 81}
 82
 83static int erofs_map_blocks_flatmode(struct inode *inode,
 84				     struct erofs_map_blocks *map)
 
 85{
 
 86	erofs_blk_t nblocks, lastblk;
 87	u64 offset = map->m_la;
 88	struct erofs_inode *vi = EROFS_I(inode);
 89	struct super_block *sb = inode->i_sb;
 90	bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
 91
 92	nblocks = erofs_iblks(inode);
 
 
 93	lastblk = nblocks - tailendpacking;
 94
 
 
 
 
 
 
 
 95	/* there is no hole in flatmode */
 96	map->m_flags = EROFS_MAP_MAPPED;
 97	if (offset < erofs_pos(sb, lastblk)) {
 98		map->m_pa = erofs_pos(sb, vi->raw_blkaddr) + map->m_la;
 99		map->m_plen = erofs_pos(sb, lastblk) - offset;
 
100	} else if (tailendpacking) {
101		map->m_pa = erofs_iloc(inode) + vi->inode_isize +
102			vi->xattr_isize + erofs_blkoff(sb, offset);
 
 
 
103		map->m_plen = inode->i_size - offset;
104
105		/* inline data should be located in the same meta block */
106		if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
107			erofs_err(sb, "inline data cross block boundary @ nid %llu",
 
108				  vi->nid);
109			DBG_BUGON(1);
110			return -EFSCORRUPTED;
 
111		}
 
112		map->m_flags |= EROFS_MAP_META;
113	} else {
114		erofs_err(sb, "internal error @ nid: %llu (size %llu), m_la 0x%llx",
 
115			  vi->nid, inode->i_size, map->m_la);
116		DBG_BUGON(1);
117		return -EIO;
118	}
119	return 0;
120}
121
122int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
123{
124	struct super_block *sb = inode->i_sb;
125	struct erofs_inode *vi = EROFS_I(inode);
126	struct erofs_inode_chunk_index *idx;
127	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
128	u64 chunknr;
129	unsigned int unit;
130	erofs_off_t pos;
131	void *kaddr;
132	int err = 0;
133
134	trace_erofs_map_blocks_enter(inode, map, 0);
135	map->m_deviceid = 0;
136	if (map->m_la >= inode->i_size) {
137		/* leave out-of-bound access unmapped */
138		map->m_flags = 0;
139		map->m_plen = 0;
140		goto out;
141	}
142
143	if (vi->datalayout != EROFS_INODE_CHUNK_BASED) {
144		err = erofs_map_blocks_flatmode(inode, map);
145		goto out;
146	}
147
148	if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
149		unit = sizeof(*idx);			/* chunk index */
150	else
151		unit = EROFS_BLOCK_MAP_ENTRY_SIZE;	/* block map */
152
153	chunknr = map->m_la >> vi->chunkbits;
154	pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
155		    vi->xattr_isize, unit) + unit * chunknr;
156
157	kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP);
158	if (IS_ERR(kaddr)) {
159		err = PTR_ERR(kaddr);
160		goto out;
161	}
162	map->m_la = chunknr << vi->chunkbits;
163	map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits,
164			round_up(inode->i_size - map->m_la, sb->s_blocksize));
165
166	/* handle block map */
167	if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
168		__le32 *blkaddr = kaddr + erofs_blkoff(sb, pos);
169
170		if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
171			map->m_flags = 0;
172		} else {
173			map->m_pa = erofs_pos(sb, le32_to_cpu(*blkaddr));
174			map->m_flags = EROFS_MAP_MAPPED;
175		}
176		goto out_unlock;
177	}
178	/* parse chunk indexes */
179	idx = kaddr + erofs_blkoff(sb, pos);
180	switch (le32_to_cpu(idx->blkaddr)) {
181	case EROFS_NULL_ADDR:
182		map->m_flags = 0;
183		break;
184	default:
185		map->m_deviceid = le16_to_cpu(idx->device_id) &
186			EROFS_SB(sb)->device_id_mask;
187		map->m_pa = erofs_pos(sb, le32_to_cpu(idx->blkaddr));
188		map->m_flags = EROFS_MAP_MAPPED;
189		break;
190	}
191out_unlock:
192	erofs_put_metabuf(&buf);
193out:
194	if (!err)
195		map->m_llen = map->m_plen;
196	trace_erofs_map_blocks_exit(inode, map, 0, err);
 
197	return err;
198}
199
200int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
201{
202	struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
203	struct erofs_device_info *dif;
204	int id;
205
206	map->m_bdev = sb->s_bdev;
207	map->m_daxdev = EROFS_SB(sb)->dax_dev;
208	map->m_dax_part_off = EROFS_SB(sb)->dax_part_off;
209	map->m_fscache = EROFS_SB(sb)->s_fscache;
210
211	if (map->m_deviceid) {
212		down_read(&devs->rwsem);
213		dif = idr_find(&devs->tree, map->m_deviceid - 1);
214		if (!dif) {
215			up_read(&devs->rwsem);
216			return -ENODEV;
217		}
218		if (devs->flatdev) {
219			map->m_pa += erofs_pos(sb, dif->mapped_blkaddr);
220			up_read(&devs->rwsem);
221			return 0;
222		}
223		map->m_bdev = dif->bdev_handle ? dif->bdev_handle->bdev : NULL;
224		map->m_daxdev = dif->dax_dev;
225		map->m_dax_part_off = dif->dax_part_off;
226		map->m_fscache = dif->fscache;
227		up_read(&devs->rwsem);
228	} else if (devs->extra_devices && !devs->flatdev) {
229		down_read(&devs->rwsem);
230		idr_for_each_entry(&devs->tree, dif, id) {
231			erofs_off_t startoff, length;
232
233			if (!dif->mapped_blkaddr)
234				continue;
235			startoff = erofs_pos(sb, dif->mapped_blkaddr);
236			length = erofs_pos(sb, dif->blocks);
237
238			if (map->m_pa >= startoff &&
239			    map->m_pa < startoff + length) {
240				map->m_pa -= startoff;
241				map->m_bdev = dif->bdev_handle ?
242					      dif->bdev_handle->bdev : NULL;
243				map->m_daxdev = dif->dax_dev;
244				map->m_dax_part_off = dif->dax_part_off;
245				map->m_fscache = dif->fscache;
246				break;
247			}
248		}
249		up_read(&devs->rwsem);
250	}
251	return 0;
252}
253
254static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
255		unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
256{
257	int ret;
258	struct super_block *sb = inode->i_sb;
259	struct erofs_map_blocks map;
260	struct erofs_map_dev mdev;
261
262	map.m_la = offset;
263	map.m_llen = length;
264
265	ret = erofs_map_blocks(inode, &map);
266	if (ret < 0)
267		return ret;
268
269	mdev = (struct erofs_map_dev) {
270		.m_deviceid = map.m_deviceid,
271		.m_pa = map.m_pa,
272	};
273	ret = erofs_map_dev(sb, &mdev);
274	if (ret)
275		return ret;
276
277	iomap->offset = map.m_la;
278	if (flags & IOMAP_DAX)
279		iomap->dax_dev = mdev.m_daxdev;
280	else
281		iomap->bdev = mdev.m_bdev;
282	iomap->length = map.m_llen;
283	iomap->flags = 0;
284	iomap->private = NULL;
285
286	if (!(map.m_flags & EROFS_MAP_MAPPED)) {
287		iomap->type = IOMAP_HOLE;
288		iomap->addr = IOMAP_NULL_ADDR;
289		if (!iomap->length)
290			iomap->length = length;
291		return 0;
292	}
293
294	if (map.m_flags & EROFS_MAP_META) {
295		void *ptr;
296		struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
297
298		iomap->type = IOMAP_INLINE;
299		ptr = erofs_read_metabuf(&buf, sb,
300				erofs_blknr(sb, mdev.m_pa), EROFS_KMAP);
301		if (IS_ERR(ptr))
302			return PTR_ERR(ptr);
303		iomap->inline_data = ptr + erofs_blkoff(sb, mdev.m_pa);
304		iomap->private = buf.base;
305	} else {
306		iomap->type = IOMAP_MAPPED;
307		iomap->addr = mdev.m_pa;
308		if (flags & IOMAP_DAX)
309			iomap->addr += mdev.m_dax_part_off;
310	}
311	return 0;
312}
313
314static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
315		ssize_t written, unsigned int flags, struct iomap *iomap)
316{
317	void *ptr = iomap->private;
318
319	if (ptr) {
320		struct erofs_buf buf = {
321			.page = kmap_to_page(ptr),
322			.base = ptr,
323			.kmap_type = EROFS_KMAP,
324		};
325
326		DBG_BUGON(iomap->type != IOMAP_INLINE);
327		erofs_put_metabuf(&buf);
328	} else {
329		DBG_BUGON(iomap->type == IOMAP_INLINE);
330	}
331	return written;
332}
333
334static const struct iomap_ops erofs_iomap_ops = {
335	.iomap_begin = erofs_iomap_begin,
336	.iomap_end = erofs_iomap_end,
337};
338
339int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
340		 u64 start, u64 len)
341{
342	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
343#ifdef CONFIG_EROFS_FS_ZIP
344		return iomap_fiemap(inode, fieinfo, start, len,
345				    &z_erofs_iomap_report_ops);
346#else
347		return -EOPNOTSUPP;
348#endif
349	}
350	return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351}
352
353/*
354 * since we dont have write or truncate flows, so no inode
355 * locking needs to be held at the moment.
356 */
357static int erofs_read_folio(struct file *file, struct folio *folio)
358{
359	return iomap_read_folio(folio, &erofs_iomap_ops);
360}
 
361
362static void erofs_readahead(struct readahead_control *rac)
363{
364	return iomap_readahead(rac, &erofs_iomap_ops);
365}
366
367static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
368{
369	return iomap_bmap(mapping, block, &erofs_iomap_ops);
 
 
 
 
 
 
370}
371
372static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
373{
374	struct inode *inode = file_inode(iocb->ki_filp);
 
 
 
375
376	/* no need taking (shared) inode lock since it's a ro filesystem */
377	if (!iov_iter_count(to))
378		return 0;
379
380#ifdef CONFIG_FS_DAX
381	if (IS_DAX(inode))
382		return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
383#endif
384	if (iocb->ki_flags & IOCB_DIRECT) {
385		struct block_device *bdev = inode->i_sb->s_bdev;
386		unsigned int blksize_mask;
387
388		if (bdev)
389			blksize_mask = bdev_logical_block_size(bdev) - 1;
390		else
391			blksize_mask = i_blocksize(inode) - 1;
 
 
 
 
 
 
 
392
393		if ((iocb->ki_pos | iov_iter_count(to) |
394		     iov_iter_alignment(to)) & blksize_mask)
395			return -EINVAL;
396
397		return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
398				    NULL, 0, NULL, 0);
399	}
400	return filemap_read(iocb, to, 0);
401}
402
403/* for uncompressed (aligned) files and raw access for other files */
404const struct address_space_operations erofs_raw_access_aops = {
405	.read_folio = erofs_read_folio,
406	.readahead = erofs_readahead,
407	.bmap = erofs_bmap,
408	.direct_IO = noop_direct_IO,
409	.release_folio = iomap_release_folio,
410	.invalidate_folio = iomap_invalidate_folio,
411};
412
413#ifdef CONFIG_FS_DAX
414static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
415		unsigned int order)
416{
417	return dax_iomap_fault(vmf, order, NULL, NULL, &erofs_iomap_ops);
418}
419
420static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
421{
422	return erofs_dax_huge_fault(vmf, 0);
423}
 
 
424
425static const struct vm_operations_struct erofs_dax_vm_ops = {
426	.fault		= erofs_dax_fault,
427	.huge_fault	= erofs_dax_huge_fault,
428};
429
430static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
431{
432	if (!IS_DAX(file_inode(file)))
433		return generic_file_readonly_mmap(file, vma);
434
435	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
436		return -EINVAL;
437
438	vma->vm_ops = &erofs_dax_vm_ops;
439	vm_flags_set(vma, VM_HUGEPAGE);
440	return 0;
441}
442#else
443#define erofs_file_mmap	generic_file_readonly_mmap
444#endif
445
446const struct file_operations erofs_file_fops = {
447	.llseek		= generic_file_llseek,
448	.read_iter	= erofs_file_read_iter,
449	.mmap		= erofs_file_mmap,
450	.get_unmapped_area = thp_get_unmapped_area,
451	.splice_read	= filemap_splice_read,
452};
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2017-2018 HUAWEI, Inc.
  4 *             https://www.huawei.com/
 
  5 */
  6#include "internal.h"
  7#include <linux/prefetch.h>
  8
  9#include <trace/events/erofs.h>
 10
 11static void erofs_readendio(struct bio *bio)
 12{
 13	struct bio_vec *bvec;
 14	blk_status_t err = bio->bi_status;
 15	struct bvec_iter_all iter_all;
 
 
 16
 17	bio_for_each_segment_all(bvec, bio, iter_all) {
 18		struct page *page = bvec->bv_page;
 
 
 
 
 
 
 19
 20		/* page is already locked */
 21		DBG_BUGON(PageUptodate(page));
 22
 23		if (err)
 24			SetPageError(page);
 25		else
 26			SetPageUptodate(page);
 27
 28		unlock_page(page);
 29		/* page could be reclaimed now */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 30	}
 31	bio_put(bio);
 
 
 32}
 33
 34struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
 35{
 36	struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping;
 37	struct page *page;
 
 
 
 38
 39	page = read_cache_page_gfp(mapping, blkaddr,
 40				   mapping_gfp_constraint(mapping, ~__GFP_FS));
 41	/* should already be PageUptodate */
 42	if (!IS_ERR(page))
 43		lock_page(page);
 44	return page;
 45}
 46
 47static int erofs_map_blocks_flatmode(struct inode *inode,
 48				     struct erofs_map_blocks *map,
 49				     int flags)
 50{
 51	int err = 0;
 52	erofs_blk_t nblocks, lastblk;
 53	u64 offset = map->m_la;
 54	struct erofs_inode *vi = EROFS_I(inode);
 
 55	bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
 56
 57	trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
 58
 59	nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
 60	lastblk = nblocks - tailendpacking;
 61
 62	if (offset >= inode->i_size) {
 63		/* leave out-of-bound access unmapped */
 64		map->m_flags = 0;
 65		map->m_plen = 0;
 66		goto out;
 67	}
 68
 69	/* there is no hole in flatmode */
 70	map->m_flags = EROFS_MAP_MAPPED;
 71
 72	if (offset < blknr_to_addr(lastblk)) {
 73		map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
 74		map->m_plen = blknr_to_addr(lastblk) - offset;
 75	} else if (tailendpacking) {
 76		/* 2 - inode inline B: inode, [xattrs], inline last blk... */
 77		struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
 78
 79		map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
 80			vi->xattr_isize + erofs_blkoff(map->m_la);
 81		map->m_plen = inode->i_size - offset;
 82
 83		/* inline data should be located in one meta block */
 84		if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
 85			erofs_err(inode->i_sb,
 86				  "inline data cross block boundary @ nid %llu",
 87				  vi->nid);
 88			DBG_BUGON(1);
 89			err = -EFSCORRUPTED;
 90			goto err_out;
 91		}
 92
 93		map->m_flags |= EROFS_MAP_META;
 94	} else {
 95		erofs_err(inode->i_sb,
 96			  "internal error @ nid: %llu (size %llu), m_la 0x%llx",
 97			  vi->nid, inode->i_size, map->m_la);
 98		DBG_BUGON(1);
 99		err = -EIO;
100		goto err_out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101	}
102
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103out:
104	map->m_llen = map->m_plen;
105
106err_out:
107	trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
108	return err;
109}
110
111static inline struct bio *erofs_read_raw_page(struct bio *bio,
112					      struct address_space *mapping,
113					      struct page *page,
114					      erofs_off_t *last_block,
115					      unsigned int nblocks,
116					      unsigned int *eblks,
117					      bool ra)
118{
119	struct inode *const inode = mapping->host;
120	struct super_block *const sb = inode->i_sb;
121	erofs_off_t current_block = (erofs_off_t)page->index;
122	int err;
123
124	DBG_BUGON(!nblocks);
125
126	if (PageUptodate(page)) {
127		err = 0;
128		goto has_updated;
129	}
130
131	/* note that for readpage case, bio also equals to NULL */
132	if (bio &&
133	    (*last_block + 1 != current_block || !*eblks)) {
134submit_bio_retry:
135		submit_bio(bio);
136		bio = NULL;
137	}
138
139	if (!bio) {
140		struct erofs_map_blocks map = {
141			.m_la = blknr_to_addr(current_block),
142		};
143		erofs_blk_t blknr;
144		unsigned int blkoff;
145
146		err = erofs_map_blocks_flatmode(inode, &map, EROFS_GET_BLOCKS_RAW);
147		if (err)
148			goto err_out;
149
150		/* zero out the holed page */
151		if (!(map.m_flags & EROFS_MAP_MAPPED)) {
152			zero_user_segment(page, 0, PAGE_SIZE);
153			SetPageUptodate(page);
154
155			/* imply err = 0, see erofs_map_blocks */
156			goto has_updated;
 
 
157		}
 
 
 
 
158
159		/* for RAW access mode, m_plen must be equal to m_llen */
160		DBG_BUGON(map.m_plen != map.m_llen);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
162		blknr = erofs_blknr(map.m_pa);
163		blkoff = erofs_blkoff(map.m_pa);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
165		/* deal with inline page */
166		if (map.m_flags & EROFS_MAP_META) {
167			void *vsrc, *vto;
168			struct page *ipage;
169
170			DBG_BUGON(map.m_plen > PAGE_SIZE);
 
 
 
 
 
171
172			ipage = erofs_get_meta_page(inode->i_sb, blknr);
 
 
 
 
 
 
173
174			if (IS_ERR(ipage)) {
175				err = PTR_ERR(ipage);
176				goto err_out;
177			}
178
179			vsrc = kmap_atomic(ipage);
180			vto = kmap_atomic(page);
181			memcpy(vto, vsrc + blkoff, map.m_plen);
182			memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
183			kunmap_atomic(vto);
184			kunmap_atomic(vsrc);
185			flush_dcache_page(page);
186
187			SetPageUptodate(page);
188			/* TODO: could we unlock the page earlier? */
189			unlock_page(ipage);
190			put_page(ipage);
191
192			/* imply err = 0, see erofs_map_blocks */
193			goto has_updated;
194		}
195
196		/* pa must be block-aligned for raw reading */
197		DBG_BUGON(erofs_blkoff(map.m_pa));
198
199		/* max # of continuous pages */
200		if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
201			nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
202
203		*eblks = bio_max_segs(nblocks);
204		bio = bio_alloc(GFP_NOIO, *eblks);
205
206		bio->bi_end_io = erofs_readendio;
207		bio_set_dev(bio, sb->s_bdev);
208		bio->bi_iter.bi_sector = (sector_t)blknr <<
209			LOG_SECTORS_PER_BLOCK;
210		bio->bi_opf = REQ_OP_READ | (ra ? REQ_RAHEAD : 0);
211	}
212
213	err = bio_add_page(bio, page, PAGE_SIZE, 0);
214	/* out of the extent or bio is full */
215	if (err < PAGE_SIZE)
216		goto submit_bio_retry;
217	--*eblks;
218	*last_block = current_block;
219	return bio;
220
221err_out:
222	/* for sync reading, set page error immediately */
223	if (!ra) {
224		SetPageError(page);
225		ClearPageUptodate(page);
226	}
227has_updated:
228	unlock_page(page);
229
230	/* if updated manually, continuous pages has a gap */
231	if (bio)
232		submit_bio(bio);
233	return err ? ERR_PTR(err) : NULL;
234}
235
236/*
237 * since we dont have write or truncate flows, so no inode
238 * locking needs to be held at the moment.
239 */
240static int erofs_raw_access_readpage(struct file *file, struct page *page)
241{
242	erofs_off_t last_block;
243	unsigned int eblks;
244	struct bio *bio;
245
246	trace_erofs_readpage(page, true);
 
 
 
247
248	bio = erofs_read_raw_page(NULL, page->mapping,
249				  page, &last_block, 1, &eblks, false);
250
251	if (IS_ERR(bio))
252		return PTR_ERR(bio);
253
254	if (bio)
255		submit_bio(bio);
256	return 0;
257}
258
259static void erofs_raw_access_readahead(struct readahead_control *rac)
260{
261	erofs_off_t last_block;
262	unsigned int eblks;
263	struct bio *bio = NULL;
264	struct page *page;
265
266	trace_erofs_readpages(rac->mapping->host, readahead_index(rac),
267			readahead_count(rac), true);
 
 
 
 
 
 
 
 
 
268
269	while ((page = readahead_page(rac))) {
270		prefetchw(&page->flags);
271
272		bio = erofs_read_raw_page(bio, rac->mapping, page, &last_block,
273				readahead_count(rac), &eblks, true);
274
275		/* all the page errors are ignored when readahead */
276		if (IS_ERR(bio)) {
277			pr_err("%s, readahead error at page %lu of nid %llu\n",
278			       __func__, page->index,
279			       EROFS_I(rac->mapping->host)->nid);
280
281			bio = NULL;
282		}
 
283
284		put_page(page);
 
285	}
 
 
286
287	if (bio)
288		submit_bio(bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
289}
290
291static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
292{
293	struct inode *inode = mapping->host;
294	struct erofs_map_blocks map = {
295		.m_la = blknr_to_addr(block),
296	};
297
298	if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE) {
299		erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE;
 
 
300
301		if (block >> LOG_SECTORS_PER_BLOCK >= blks)
302			return 0;
303	}
 
304
305	if (!erofs_map_blocks_flatmode(inode, &map, EROFS_GET_BLOCKS_RAW))
306		return erofs_blknr(map.m_pa);
307
 
 
308	return 0;
309}
310
311/* for uncompressed (aligned) files and raw access for other files */
312const struct address_space_operations erofs_raw_access_aops = {
313	.readpage = erofs_raw_access_readpage,
314	.readahead = erofs_raw_access_readahead,
315	.bmap = erofs_bmap,
 
 
 
 
316};