Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/hpfs/file.c
4 *
5 * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
6 *
7 * file VFS functions
8 */
9
10#include "hpfs_fn.h"
11#include <linux/mpage.h>
12#include <linux/iomap.h>
13#include <linux/fiemap.h>
14
15#define BLOCKS(size) (((size) + 511) >> 9)
16
17static int hpfs_file_release(struct inode *inode, struct file *file)
18{
19 hpfs_lock(inode->i_sb);
20 hpfs_write_if_changed(inode);
21 hpfs_unlock(inode->i_sb);
22 return 0;
23}
24
25int hpfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
26{
27 struct inode *inode = file->f_mapping->host;
28 int ret;
29
30 ret = file_write_and_wait_range(file, start, end);
31 if (ret)
32 return ret;
33 return sync_blockdev(inode->i_sb->s_bdev);
34}
35
36/*
37 * generic_file_read often calls bmap with non-existing sector,
38 * so we must ignore such errors.
39 */
40
41static secno hpfs_bmap(struct inode *inode, unsigned file_secno, unsigned *n_secs)
42{
43 struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
44 unsigned n, disk_secno;
45 struct fnode *fnode;
46 struct buffer_head *bh;
47 if (BLOCKS(hpfs_i(inode)->mmu_private) <= file_secno) return 0;
48 n = file_secno - hpfs_inode->i_file_sec;
49 if (n < hpfs_inode->i_n_secs) {
50 *n_secs = hpfs_inode->i_n_secs - n;
51 return hpfs_inode->i_disk_sec + n;
52 }
53 if (!(fnode = hpfs_map_fnode(inode->i_sb, inode->i_ino, &bh))) return 0;
54 disk_secno = hpfs_bplus_lookup(inode->i_sb, inode, &fnode->btree, file_secno, bh);
55 if (disk_secno == -1) return 0;
56 if (hpfs_chk_sectors(inode->i_sb, disk_secno, 1, "bmap")) return 0;
57 n = file_secno - hpfs_inode->i_file_sec;
58 if (n < hpfs_inode->i_n_secs) {
59 *n_secs = hpfs_inode->i_n_secs - n;
60 return hpfs_inode->i_disk_sec + n;
61 }
62 *n_secs = 1;
63 return disk_secno;
64}
65
66void hpfs_truncate(struct inode *i)
67{
68 if (IS_IMMUTABLE(i)) return /*-EPERM*/;
69 hpfs_lock_assert(i->i_sb);
70
71 hpfs_i(i)->i_n_secs = 0;
72 i->i_blocks = 1 + ((i->i_size + 511) >> 9);
73 hpfs_i(i)->mmu_private = i->i_size;
74 hpfs_truncate_btree(i->i_sb, i->i_ino, 1, ((i->i_size + 511) >> 9));
75 hpfs_write_inode(i);
76 hpfs_i(i)->i_n_secs = 0;
77}
78
79static int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
80{
81 int r;
82 secno s;
83 unsigned n_secs;
84 hpfs_lock(inode->i_sb);
85 s = hpfs_bmap(inode, iblock, &n_secs);
86 if (s) {
87 if (bh_result->b_size >> 9 < n_secs)
88 n_secs = bh_result->b_size >> 9;
89 n_secs = hpfs_search_hotfix_map_for_range(inode->i_sb, s, n_secs);
90 if (unlikely(!n_secs)) {
91 s = hpfs_search_hotfix_map(inode->i_sb, s);
92 n_secs = 1;
93 }
94 map_bh(bh_result, inode->i_sb, s);
95 bh_result->b_size = n_secs << 9;
96 goto ret_0;
97 }
98 if (!create) goto ret_0;
99 if (iblock<<9 != hpfs_i(inode)->mmu_private) {
100 BUG();
101 r = -EIO;
102 goto ret_r;
103 }
104 if ((s = hpfs_add_sector_to_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1)) == -1) {
105 hpfs_truncate_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1);
106 r = -ENOSPC;
107 goto ret_r;
108 }
109 inode->i_blocks++;
110 hpfs_i(inode)->mmu_private += 512;
111 set_buffer_new(bh_result);
112 map_bh(bh_result, inode->i_sb, hpfs_search_hotfix_map(inode->i_sb, s));
113 ret_0:
114 r = 0;
115 ret_r:
116 hpfs_unlock(inode->i_sb);
117 return r;
118}
119
120static int hpfs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
121 unsigned flags, struct iomap *iomap, struct iomap *srcmap)
122{
123 struct super_block *sb = inode->i_sb;
124 unsigned int blkbits = inode->i_blkbits;
125 unsigned int n_secs;
126 secno s;
127
128 if (WARN_ON_ONCE(flags & (IOMAP_WRITE | IOMAP_ZERO)))
129 return -EINVAL;
130
131 iomap->bdev = inode->i_sb->s_bdev;
132 iomap->offset = offset;
133
134 hpfs_lock(sb);
135 s = hpfs_bmap(inode, offset >> blkbits, &n_secs);
136 if (s) {
137 n_secs = hpfs_search_hotfix_map_for_range(sb, s,
138 min_t(loff_t, n_secs, length));
139 if (unlikely(!n_secs)) {
140 s = hpfs_search_hotfix_map(sb, s);
141 n_secs = 1;
142 }
143 iomap->type = IOMAP_MAPPED;
144 iomap->flags = IOMAP_F_MERGED;
145 iomap->addr = (u64)s << blkbits;
146 iomap->length = (u64)n_secs << blkbits;
147 } else {
148 iomap->type = IOMAP_HOLE;
149 iomap->addr = IOMAP_NULL_ADDR;
150 iomap->length = 1 << blkbits;
151 }
152
153 hpfs_unlock(sb);
154 return 0;
155}
156
157static const struct iomap_ops hpfs_iomap_ops = {
158 .iomap_begin = hpfs_iomap_begin,
159};
160
161static int hpfs_read_folio(struct file *file, struct folio *folio)
162{
163 return mpage_read_folio(folio, hpfs_get_block);
164}
165
166static void hpfs_readahead(struct readahead_control *rac)
167{
168 mpage_readahead(rac, hpfs_get_block);
169}
170
171static int hpfs_writepages(struct address_space *mapping,
172 struct writeback_control *wbc)
173{
174 return mpage_writepages(mapping, wbc, hpfs_get_block);
175}
176
177static void hpfs_write_failed(struct address_space *mapping, loff_t to)
178{
179 struct inode *inode = mapping->host;
180
181 hpfs_lock(inode->i_sb);
182
183 if (to > inode->i_size) {
184 truncate_pagecache(inode, inode->i_size);
185 hpfs_truncate(inode);
186 }
187
188 hpfs_unlock(inode->i_sb);
189}
190
191static int hpfs_write_begin(struct file *file, struct address_space *mapping,
192 loff_t pos, unsigned len,
193 struct page **pagep, void **fsdata)
194{
195 int ret;
196
197 *pagep = NULL;
198 ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
199 hpfs_get_block,
200 &hpfs_i(mapping->host)->mmu_private);
201 if (unlikely(ret))
202 hpfs_write_failed(mapping, pos + len);
203
204 return ret;
205}
206
207static int hpfs_write_end(struct file *file, struct address_space *mapping,
208 loff_t pos, unsigned len, unsigned copied,
209 struct page *pagep, void *fsdata)
210{
211 struct inode *inode = mapping->host;
212 int err;
213 err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
214 if (err < len)
215 hpfs_write_failed(mapping, pos + len);
216 if (!(err < 0)) {
217 /* make sure we write it on close, if not earlier */
218 hpfs_lock(inode->i_sb);
219 hpfs_i(inode)->i_dirty = 1;
220 hpfs_unlock(inode->i_sb);
221 }
222 return err;
223}
224
225static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
226{
227 return generic_block_bmap(mapping, block, hpfs_get_block);
228}
229
230static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len)
231{
232 int ret;
233
234 inode_lock(inode);
235 len = min_t(u64, len, i_size_read(inode));
236 ret = iomap_fiemap(inode, fieinfo, start, len, &hpfs_iomap_ops);
237 inode_unlock(inode);
238
239 return ret;
240}
241
242const struct address_space_operations hpfs_aops = {
243 .dirty_folio = block_dirty_folio,
244 .invalidate_folio = block_invalidate_folio,
245 .read_folio = hpfs_read_folio,
246 .readahead = hpfs_readahead,
247 .writepages = hpfs_writepages,
248 .write_begin = hpfs_write_begin,
249 .write_end = hpfs_write_end,
250 .bmap = _hpfs_bmap,
251 .migrate_folio = buffer_migrate_folio,
252};
253
254const struct file_operations hpfs_file_ops =
255{
256 .llseek = generic_file_llseek,
257 .read_iter = generic_file_read_iter,
258 .write_iter = generic_file_write_iter,
259 .mmap = generic_file_mmap,
260 .release = hpfs_file_release,
261 .fsync = hpfs_file_fsync,
262 .splice_read = generic_file_splice_read,
263 .unlocked_ioctl = hpfs_ioctl,
264 .compat_ioctl = compat_ptr_ioctl,
265};
266
267const struct inode_operations hpfs_file_iops =
268{
269 .setattr = hpfs_setattr,
270 .fiemap = hpfs_fiemap,
271};
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/hpfs/file.c
4 *
5 * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
6 *
7 * file VFS functions
8 */
9
10#include "hpfs_fn.h"
11#include <linux/mpage.h>
12#include <linux/fiemap.h>
13
14#define BLOCKS(size) (((size) + 511) >> 9)
15
16static int hpfs_file_release(struct inode *inode, struct file *file)
17{
18 hpfs_lock(inode->i_sb);
19 hpfs_write_if_changed(inode);
20 hpfs_unlock(inode->i_sb);
21 return 0;
22}
23
24int hpfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
25{
26 struct inode *inode = file->f_mapping->host;
27 int ret;
28
29 ret = file_write_and_wait_range(file, start, end);
30 if (ret)
31 return ret;
32 return sync_blockdev(inode->i_sb->s_bdev);
33}
34
35/*
36 * generic_file_read often calls bmap with non-existing sector,
37 * so we must ignore such errors.
38 */
39
40static secno hpfs_bmap(struct inode *inode, unsigned file_secno, unsigned *n_secs)
41{
42 struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
43 unsigned n, disk_secno;
44 struct fnode *fnode;
45 struct buffer_head *bh;
46 if (BLOCKS(hpfs_i(inode)->mmu_private) <= file_secno) return 0;
47 n = file_secno - hpfs_inode->i_file_sec;
48 if (n < hpfs_inode->i_n_secs) {
49 *n_secs = hpfs_inode->i_n_secs - n;
50 return hpfs_inode->i_disk_sec + n;
51 }
52 if (!(fnode = hpfs_map_fnode(inode->i_sb, inode->i_ino, &bh))) return 0;
53 disk_secno = hpfs_bplus_lookup(inode->i_sb, inode, &fnode->btree, file_secno, bh);
54 if (disk_secno == -1) return 0;
55 if (hpfs_chk_sectors(inode->i_sb, disk_secno, 1, "bmap")) return 0;
56 n = file_secno - hpfs_inode->i_file_sec;
57 if (n < hpfs_inode->i_n_secs) {
58 *n_secs = hpfs_inode->i_n_secs - n;
59 return hpfs_inode->i_disk_sec + n;
60 }
61 *n_secs = 1;
62 return disk_secno;
63}
64
65void hpfs_truncate(struct inode *i)
66{
67 if (IS_IMMUTABLE(i)) return /*-EPERM*/;
68 hpfs_lock_assert(i->i_sb);
69
70 hpfs_i(i)->i_n_secs = 0;
71 i->i_blocks = 1 + ((i->i_size + 511) >> 9);
72 hpfs_i(i)->mmu_private = i->i_size;
73 hpfs_truncate_btree(i->i_sb, i->i_ino, 1, ((i->i_size + 511) >> 9));
74 hpfs_write_inode(i);
75 hpfs_i(i)->i_n_secs = 0;
76}
77
78static int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
79{
80 int r;
81 secno s;
82 unsigned n_secs;
83 hpfs_lock(inode->i_sb);
84 s = hpfs_bmap(inode, iblock, &n_secs);
85 if (s) {
86 if (bh_result->b_size >> 9 < n_secs)
87 n_secs = bh_result->b_size >> 9;
88 n_secs = hpfs_search_hotfix_map_for_range(inode->i_sb, s, n_secs);
89 if (unlikely(!n_secs)) {
90 s = hpfs_search_hotfix_map(inode->i_sb, s);
91 n_secs = 1;
92 }
93 map_bh(bh_result, inode->i_sb, s);
94 bh_result->b_size = n_secs << 9;
95 goto ret_0;
96 }
97 if (!create) goto ret_0;
98 if (iblock<<9 != hpfs_i(inode)->mmu_private) {
99 BUG();
100 r = -EIO;
101 goto ret_r;
102 }
103 if ((s = hpfs_add_sector_to_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1)) == -1) {
104 hpfs_truncate_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1);
105 r = -ENOSPC;
106 goto ret_r;
107 }
108 inode->i_blocks++;
109 hpfs_i(inode)->mmu_private += 512;
110 set_buffer_new(bh_result);
111 map_bh(bh_result, inode->i_sb, hpfs_search_hotfix_map(inode->i_sb, s));
112 ret_0:
113 r = 0;
114 ret_r:
115 hpfs_unlock(inode->i_sb);
116 return r;
117}
118
119static int hpfs_readpage(struct file *file, struct page *page)
120{
121 return mpage_readpage(page, hpfs_get_block);
122}
123
124static int hpfs_writepage(struct page *page, struct writeback_control *wbc)
125{
126 return block_write_full_page(page, hpfs_get_block, wbc);
127}
128
129static void hpfs_readahead(struct readahead_control *rac)
130{
131 mpage_readahead(rac, hpfs_get_block);
132}
133
134static int hpfs_writepages(struct address_space *mapping,
135 struct writeback_control *wbc)
136{
137 return mpage_writepages(mapping, wbc, hpfs_get_block);
138}
139
140static void hpfs_write_failed(struct address_space *mapping, loff_t to)
141{
142 struct inode *inode = mapping->host;
143
144 hpfs_lock(inode->i_sb);
145
146 if (to > inode->i_size) {
147 truncate_pagecache(inode, inode->i_size);
148 hpfs_truncate(inode);
149 }
150
151 hpfs_unlock(inode->i_sb);
152}
153
154static int hpfs_write_begin(struct file *file, struct address_space *mapping,
155 loff_t pos, unsigned len, unsigned flags,
156 struct page **pagep, void **fsdata)
157{
158 int ret;
159
160 *pagep = NULL;
161 ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
162 hpfs_get_block,
163 &hpfs_i(mapping->host)->mmu_private);
164 if (unlikely(ret))
165 hpfs_write_failed(mapping, pos + len);
166
167 return ret;
168}
169
170static int hpfs_write_end(struct file *file, struct address_space *mapping,
171 loff_t pos, unsigned len, unsigned copied,
172 struct page *pagep, void *fsdata)
173{
174 struct inode *inode = mapping->host;
175 int err;
176 err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
177 if (err < len)
178 hpfs_write_failed(mapping, pos + len);
179 if (!(err < 0)) {
180 /* make sure we write it on close, if not earlier */
181 hpfs_lock(inode->i_sb);
182 hpfs_i(inode)->i_dirty = 1;
183 hpfs_unlock(inode->i_sb);
184 }
185 return err;
186}
187
188static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
189{
190 return generic_block_bmap(mapping, block, hpfs_get_block);
191}
192
193static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len)
194{
195 return generic_block_fiemap(inode, fieinfo, start, len, hpfs_get_block);
196}
197
198const struct address_space_operations hpfs_aops = {
199 .set_page_dirty = __set_page_dirty_buffers,
200 .readpage = hpfs_readpage,
201 .writepage = hpfs_writepage,
202 .readahead = hpfs_readahead,
203 .writepages = hpfs_writepages,
204 .write_begin = hpfs_write_begin,
205 .write_end = hpfs_write_end,
206 .bmap = _hpfs_bmap
207};
208
209const struct file_operations hpfs_file_ops =
210{
211 .llseek = generic_file_llseek,
212 .read_iter = generic_file_read_iter,
213 .write_iter = generic_file_write_iter,
214 .mmap = generic_file_mmap,
215 .release = hpfs_file_release,
216 .fsync = hpfs_file_fsync,
217 .splice_read = generic_file_splice_read,
218 .unlocked_ioctl = hpfs_ioctl,
219 .compat_ioctl = compat_ptr_ioctl,
220};
221
222const struct inode_operations hpfs_file_iops =
223{
224 .setattr = hpfs_setattr,
225 .fiemap = hpfs_fiemap,
226};