Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/ufs/inode.c
4 *
5 * Copyright (C) 1998
6 * Daniel Pirkl <daniel.pirkl@email.cz>
7 * Charles University, Faculty of Mathematics and Physics
8 *
9 * from
10 *
11 * linux/fs/ext2/inode.c
12 *
13 * Copyright (C) 1992, 1993, 1994, 1995
14 * Remy Card (card@masi.ibp.fr)
15 * Laboratoire MASI - Institut Blaise Pascal
16 * Universite Pierre et Marie Curie (Paris VI)
17 *
18 * from
19 *
20 * linux/fs/minix/inode.c
21 *
22 * Copyright (C) 1991, 1992 Linus Torvalds
23 *
24 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
25 * Big-endian to little-endian byte-swapping/bitmaps by
26 * David S. Miller (davem@caip.rutgers.edu), 1995
27 */
28
29#include <linux/uaccess.h>
30
31#include <linux/errno.h>
32#include <linux/fs.h>
33#include <linux/time.h>
34#include <linux/stat.h>
35#include <linux/string.h>
36#include <linux/mm.h>
37#include <linux/buffer_head.h>
38#include <linux/mpage.h>
39#include <linux/writeback.h>
40#include <linux/iversion.h>
41
42#include "ufs_fs.h"
43#include "ufs.h"
44#include "swab.h"
45#include "util.h"
46
47static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4])
48{
49 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
50 int ptrs = uspi->s_apb;
51 int ptrs_bits = uspi->s_apbshift;
52 const long direct_blocks = UFS_NDADDR,
53 indirect_blocks = ptrs,
54 double_blocks = (1 << (ptrs_bits * 2));
55 int n = 0;
56
57
58 UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
59 if (i_block < direct_blocks) {
60 offsets[n++] = i_block;
61 } else if ((i_block -= direct_blocks) < indirect_blocks) {
62 offsets[n++] = UFS_IND_BLOCK;
63 offsets[n++] = i_block;
64 } else if ((i_block -= indirect_blocks) < double_blocks) {
65 offsets[n++] = UFS_DIND_BLOCK;
66 offsets[n++] = i_block >> ptrs_bits;
67 offsets[n++] = i_block & (ptrs - 1);
68 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
69 offsets[n++] = UFS_TIND_BLOCK;
70 offsets[n++] = i_block >> (ptrs_bits * 2);
71 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
72 offsets[n++] = i_block & (ptrs - 1);
73 } else {
74 ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
75 }
76 return n;
77}
78
79typedef struct {
80 void *p;
81 union {
82 __fs32 key32;
83 __fs64 key64;
84 };
85 struct buffer_head *bh;
86} Indirect;
87
88static inline int grow_chain32(struct ufs_inode_info *ufsi,
89 struct buffer_head *bh, __fs32 *v,
90 Indirect *from, Indirect *to)
91{
92 Indirect *p;
93 unsigned seq;
94 to->bh = bh;
95 do {
96 seq = read_seqbegin(&ufsi->meta_lock);
97 to->key32 = *(__fs32 *)(to->p = v);
98 for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++)
99 ;
100 } while (read_seqretry(&ufsi->meta_lock, seq));
101 return (p > to);
102}
103
104static inline int grow_chain64(struct ufs_inode_info *ufsi,
105 struct buffer_head *bh, __fs64 *v,
106 Indirect *from, Indirect *to)
107{
108 Indirect *p;
109 unsigned seq;
110 to->bh = bh;
111 do {
112 seq = read_seqbegin(&ufsi->meta_lock);
113 to->key64 = *(__fs64 *)(to->p = v);
114 for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++)
115 ;
116 } while (read_seqretry(&ufsi->meta_lock, seq));
117 return (p > to);
118}
119
120/*
121 * Returns the location of the fragment from
122 * the beginning of the filesystem.
123 */
124
125static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth)
126{
127 struct ufs_inode_info *ufsi = UFS_I(inode);
128 struct super_block *sb = inode->i_sb;
129 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
130 u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
131 int shift = uspi->s_apbshift-uspi->s_fpbshift;
132 Indirect chain[4], *q = chain;
133 unsigned *p;
134 unsigned flags = UFS_SB(sb)->s_flags;
135 u64 res = 0;
136
137 UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
138 uspi->s_fpbshift, uspi->s_apbmask,
139 (unsigned long long)mask);
140
141 if (depth == 0)
142 goto no_block;
143
144again:
145 p = offsets;
146
147 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
148 goto ufs2;
149
150 if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q))
151 goto changed;
152 if (!q->key32)
153 goto no_block;
154 while (--depth) {
155 __fs32 *ptr;
156 struct buffer_head *bh;
157 unsigned n = *p++;
158
159 bh = sb_bread(sb, uspi->s_sbbase +
160 fs32_to_cpu(sb, q->key32) + (n>>shift));
161 if (!bh)
162 goto no_block;
163 ptr = (__fs32 *)bh->b_data + (n & mask);
164 if (!grow_chain32(ufsi, bh, ptr, chain, ++q))
165 goto changed;
166 if (!q->key32)
167 goto no_block;
168 }
169 res = fs32_to_cpu(sb, q->key32);
170 goto found;
171
172ufs2:
173 if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q))
174 goto changed;
175 if (!q->key64)
176 goto no_block;
177
178 while (--depth) {
179 __fs64 *ptr;
180 struct buffer_head *bh;
181 unsigned n = *p++;
182
183 bh = sb_bread(sb, uspi->s_sbbase +
184 fs64_to_cpu(sb, q->key64) + (n>>shift));
185 if (!bh)
186 goto no_block;
187 ptr = (__fs64 *)bh->b_data + (n & mask);
188 if (!grow_chain64(ufsi, bh, ptr, chain, ++q))
189 goto changed;
190 if (!q->key64)
191 goto no_block;
192 }
193 res = fs64_to_cpu(sb, q->key64);
194found:
195 res += uspi->s_sbbase;
196no_block:
197 while (q > chain) {
198 brelse(q->bh);
199 q--;
200 }
201 return res;
202
203changed:
204 while (q > chain) {
205 brelse(q->bh);
206 q--;
207 }
208 goto again;
209}
210
211/*
212 * Unpacking tails: we have a file with partial final block and
213 * we had been asked to extend it. If the fragment being written
214 * is within the same block, we need to extend the tail just to cover
215 * that fragment. Otherwise the tail is extended to full block.
216 *
217 * Note that we might need to create a _new_ tail, but that will
218 * be handled elsewhere; this is strictly for resizing old
219 * ones.
220 */
221static bool
222ufs_extend_tail(struct inode *inode, u64 writes_to,
223 int *err, struct folio *locked_folio)
224{
225 struct ufs_inode_info *ufsi = UFS_I(inode);
226 struct super_block *sb = inode->i_sb;
227 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
228 unsigned lastfrag = ufsi->i_lastfrag; /* it's a short file, so unsigned is enough */
229 unsigned block = ufs_fragstoblks(lastfrag);
230 unsigned new_size;
231 void *p;
232 u64 tmp;
233
234 if (writes_to < (lastfrag | uspi->s_fpbmask))
235 new_size = (writes_to & uspi->s_fpbmask) + 1;
236 else
237 new_size = uspi->s_fpb;
238
239 p = ufs_get_direct_data_ptr(uspi, ufsi, block);
240 tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
241 new_size - (lastfrag & uspi->s_fpbmask), err,
242 locked_folio);
243 return tmp != 0;
244}
245
246/**
247 * ufs_inode_getfrag() - allocate new fragment(s)
248 * @inode: pointer to inode
249 * @index: number of block pointer within the inode's array.
250 * @new_fragment: number of new allocated fragment(s)
251 * @err: we set it if something wrong
252 * @new: we set it if we allocate new block
253 * @locked_folio: for ufs_new_fragments()
254 */
255static u64 ufs_inode_getfrag(struct inode *inode, unsigned index,
256 sector_t new_fragment, int *err,
257 int *new, struct folio *locked_folio)
258{
259 struct ufs_inode_info *ufsi = UFS_I(inode);
260 struct super_block *sb = inode->i_sb;
261 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
262 u64 tmp, goal, lastfrag;
263 unsigned nfrags = uspi->s_fpb;
264 void *p;
265
266 p = ufs_get_direct_data_ptr(uspi, ufsi, index);
267 tmp = ufs_data_ptr_to_cpu(sb, p);
268 if (tmp)
269 goto out;
270
271 lastfrag = ufsi->i_lastfrag;
272
273 /* will that be a new tail? */
274 if (new_fragment < UFS_NDIR_FRAGMENT && new_fragment >= lastfrag)
275 nfrags = (new_fragment & uspi->s_fpbmask) + 1;
276
277 goal = 0;
278 if (index) {
279 goal = ufs_data_ptr_to_cpu(sb,
280 ufs_get_direct_data_ptr(uspi, ufsi, index - 1));
281 if (goal)
282 goal += uspi->s_fpb;
283 }
284 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
285 goal, nfrags, err, locked_folio);
286
287 if (!tmp) {
288 *err = -ENOSPC;
289 return 0;
290 }
291
292 if (new)
293 *new = 1;
294 inode_set_ctime_current(inode);
295 if (IS_SYNC(inode))
296 ufs_sync_inode (inode);
297 mark_inode_dirty(inode);
298out:
299 return tmp + uspi->s_sbbase;
300}
301
302/**
303 * ufs_inode_getblock() - allocate new block
304 * @inode: pointer to inode
305 * @ind_block: block number of the indirect block
306 * @index: number of pointer within the indirect block
307 * @new_fragment: number of new allocated fragment
308 * (block will hold this fragment and also uspi->s_fpb-1)
309 * @err: see ufs_inode_getfrag()
310 * @new: see ufs_inode_getfrag()
311 * @locked_folio: see ufs_inode_getfrag()
312 */
313static u64 ufs_inode_getblock(struct inode *inode, u64 ind_block,
314 unsigned index, sector_t new_fragment, int *err,
315 int *new, struct folio *locked_folio)
316{
317 struct super_block *sb = inode->i_sb;
318 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
319 int shift = uspi->s_apbshift - uspi->s_fpbshift;
320 u64 tmp = 0, goal;
321 struct buffer_head *bh;
322 void *p;
323
324 if (!ind_block)
325 return 0;
326
327 bh = sb_bread(sb, ind_block + (index >> shift));
328 if (unlikely(!bh)) {
329 *err = -EIO;
330 return 0;
331 }
332
333 index &= uspi->s_apbmask >> uspi->s_fpbshift;
334 if (uspi->fs_magic == UFS2_MAGIC)
335 p = (__fs64 *)bh->b_data + index;
336 else
337 p = (__fs32 *)bh->b_data + index;
338
339 tmp = ufs_data_ptr_to_cpu(sb, p);
340 if (tmp)
341 goto out;
342
343 if (index && (uspi->fs_magic == UFS2_MAGIC ?
344 (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[index-1])) :
345 (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[index-1]))))
346 goal = tmp + uspi->s_fpb;
347 else
348 goal = bh->b_blocknr + uspi->s_fpb;
349 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
350 uspi->s_fpb, err, locked_folio);
351 if (!tmp)
352 goto out;
353
354 if (new)
355 *new = 1;
356
357 mark_buffer_dirty(bh);
358 if (IS_SYNC(inode))
359 sync_dirty_buffer(bh);
360 inode_set_ctime_current(inode);
361 mark_inode_dirty(inode);
362out:
363 brelse (bh);
364 UFSD("EXIT\n");
365 if (tmp)
366 tmp += uspi->s_sbbase;
367 return tmp;
368}
369
370/**
371 * ufs_getfrag_block() - `get_block_t' function, interface between UFS and
372 * read_folio, writepages and so on
373 */
374
375static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
376{
377 struct super_block *sb = inode->i_sb;
378 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
379 int err = 0, new = 0;
380 unsigned offsets[4];
381 int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets);
382 u64 phys64 = 0;
383 unsigned frag = fragment & uspi->s_fpbmask;
384
385 phys64 = ufs_frag_map(inode, offsets, depth);
386 if (!create)
387 goto done;
388
389 if (phys64) {
390 if (fragment >= UFS_NDIR_FRAGMENT)
391 goto done;
392 read_seqlock_excl(&UFS_I(inode)->meta_lock);
393 if (fragment < UFS_I(inode)->i_lastfrag) {
394 read_sequnlock_excl(&UFS_I(inode)->meta_lock);
395 goto done;
396 }
397 read_sequnlock_excl(&UFS_I(inode)->meta_lock);
398 }
399 /* This code entered only while writing ....? */
400
401 mutex_lock(&UFS_I(inode)->truncate_mutex);
402
403 UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
404 if (unlikely(!depth)) {
405 ufs_warning(sb, "ufs_get_block", "block > big");
406 err = -EIO;
407 goto out;
408 }
409
410 if (UFS_I(inode)->i_lastfrag < UFS_NDIR_FRAGMENT) {
411 unsigned lastfrag = UFS_I(inode)->i_lastfrag;
412 unsigned tailfrags = lastfrag & uspi->s_fpbmask;
413 if (tailfrags && fragment >= lastfrag) {
414 if (!ufs_extend_tail(inode, fragment,
415 &err, bh_result->b_folio))
416 goto out;
417 }
418 }
419
420 if (depth == 1) {
421 phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
422 &err, &new, bh_result->b_folio);
423 } else {
424 int i;
425 phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
426 &err, NULL, NULL);
427 for (i = 1; i < depth - 1; i++)
428 phys64 = ufs_inode_getblock(inode, phys64, offsets[i],
429 fragment, &err, NULL, NULL);
430 phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 1],
431 fragment, &err, &new, bh_result->b_folio);
432 }
433out:
434 if (phys64) {
435 phys64 += frag;
436 map_bh(bh_result, sb, phys64);
437 if (new)
438 set_buffer_new(bh_result);
439 }
440 mutex_unlock(&UFS_I(inode)->truncate_mutex);
441 return err;
442
443done:
444 if (phys64)
445 map_bh(bh_result, sb, phys64 + frag);
446 return 0;
447}
448
449static int ufs_writepages(struct address_space *mapping,
450 struct writeback_control *wbc)
451{
452 return mpage_writepages(mapping, wbc, ufs_getfrag_block);
453}
454
455static int ufs_read_folio(struct file *file, struct folio *folio)
456{
457 return block_read_full_folio(folio, ufs_getfrag_block);
458}
459
460int ufs_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
461{
462 return __block_write_begin(folio, pos, len, ufs_getfrag_block);
463}
464
465static void ufs_truncate_blocks(struct inode *);
466
467static void ufs_write_failed(struct address_space *mapping, loff_t to)
468{
469 struct inode *inode = mapping->host;
470
471 if (to > inode->i_size) {
472 truncate_pagecache(inode, inode->i_size);
473 ufs_truncate_blocks(inode);
474 }
475}
476
477static int ufs_write_begin(struct file *file, struct address_space *mapping,
478 loff_t pos, unsigned len,
479 struct folio **foliop, void **fsdata)
480{
481 int ret;
482
483 ret = block_write_begin(mapping, pos, len, foliop, ufs_getfrag_block);
484 if (unlikely(ret))
485 ufs_write_failed(mapping, pos + len);
486
487 return ret;
488}
489
490static int ufs_write_end(struct file *file, struct address_space *mapping,
491 loff_t pos, unsigned len, unsigned copied,
492 struct folio *folio, void *fsdata)
493{
494 int ret;
495
496 ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
497 if (ret < len)
498 ufs_write_failed(mapping, pos + len);
499 return ret;
500}
501
502static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
503{
504 return generic_block_bmap(mapping,block,ufs_getfrag_block);
505}
506
507const struct address_space_operations ufs_aops = {
508 .dirty_folio = block_dirty_folio,
509 .invalidate_folio = block_invalidate_folio,
510 .read_folio = ufs_read_folio,
511 .writepages = ufs_writepages,
512 .write_begin = ufs_write_begin,
513 .write_end = ufs_write_end,
514 .migrate_folio = buffer_migrate_folio,
515 .bmap = ufs_bmap
516};
517
518static void ufs_set_inode_ops(struct inode *inode)
519{
520 if (S_ISREG(inode->i_mode)) {
521 inode->i_op = &ufs_file_inode_operations;
522 inode->i_fop = &ufs_file_operations;
523 inode->i_mapping->a_ops = &ufs_aops;
524 } else if (S_ISDIR(inode->i_mode)) {
525 inode->i_op = &ufs_dir_inode_operations;
526 inode->i_fop = &ufs_dir_operations;
527 inode->i_mapping->a_ops = &ufs_aops;
528 } else if (S_ISLNK(inode->i_mode)) {
529 if (!inode->i_blocks) {
530 inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink;
531 inode->i_op = &simple_symlink_inode_operations;
532 } else {
533 inode->i_mapping->a_ops = &ufs_aops;
534 inode->i_op = &page_symlink_inode_operations;
535 inode_nohighmem(inode);
536 }
537 } else
538 init_special_inode(inode, inode->i_mode,
539 ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
540}
541
542static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
543{
544 struct ufs_inode_info *ufsi = UFS_I(inode);
545 struct super_block *sb = inode->i_sb;
546 umode_t mode;
547
548 /*
549 * Copy data to the in-core inode.
550 */
551 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
552 set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
553 if (inode->i_nlink == 0)
554 return -ESTALE;
555
556 /*
557 * Linux now has 32-bit uid and gid, so we can support EFT.
558 */
559 i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode));
560 i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
561
562 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
563 inode_set_atime(inode,
564 (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec),
565 0);
566 inode_set_ctime(inode,
567 (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec),
568 0);
569 inode_set_mtime(inode,
570 (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec),
571 0);
572 inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
573 inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
574 ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
575 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
576 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
577
578
579 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
580 memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
581 sizeof(ufs_inode->ui_u2.ui_addr));
582 } else {
583 memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
584 sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
585 ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
586 }
587 return 0;
588}
589
590static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
591{
592 struct ufs_inode_info *ufsi = UFS_I(inode);
593 struct super_block *sb = inode->i_sb;
594 umode_t mode;
595
596 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
597 /*
598 * Copy data to the in-core inode.
599 */
600 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
601 set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
602 if (inode->i_nlink == 0)
603 return -ESTALE;
604
605 /*
606 * Linux now has 32-bit uid and gid, so we can support EFT.
607 */
608 i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid));
609 i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid));
610
611 inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
612 inode_set_atime(inode, fs64_to_cpu(sb, ufs2_inode->ui_atime),
613 fs32_to_cpu(sb, ufs2_inode->ui_atimensec));
614 inode_set_ctime(inode, fs64_to_cpu(sb, ufs2_inode->ui_ctime),
615 fs32_to_cpu(sb, ufs2_inode->ui_ctimensec));
616 inode_set_mtime(inode, fs64_to_cpu(sb, ufs2_inode->ui_mtime),
617 fs32_to_cpu(sb, ufs2_inode->ui_mtimensec));
618 inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
619 inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
620 ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
621 /*
622 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
623 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
624 */
625
626 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
627 memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
628 sizeof(ufs2_inode->ui_u2.ui_addr));
629 } else {
630 memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
631 sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
632 ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
633 }
634 return 0;
635}
636
637struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
638{
639 struct ufs_inode_info *ufsi;
640 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
641 struct buffer_head * bh;
642 struct inode *inode;
643 int err = -EIO;
644
645 UFSD("ENTER, ino %lu\n", ino);
646
647 if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) {
648 ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
649 ino);
650 return ERR_PTR(-EIO);
651 }
652
653 inode = iget_locked(sb, ino);
654 if (!inode)
655 return ERR_PTR(-ENOMEM);
656 if (!(inode->i_state & I_NEW))
657 return inode;
658
659 ufsi = UFS_I(inode);
660
661 bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
662 if (!bh) {
663 ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
664 inode->i_ino);
665 goto bad_inode;
666 }
667 if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
668 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
669
670 err = ufs2_read_inode(inode,
671 ufs2_inode + ufs_inotofsbo(inode->i_ino));
672 } else {
673 struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
674
675 err = ufs1_read_inode(inode,
676 ufs_inode + ufs_inotofsbo(inode->i_ino));
677 }
678 brelse(bh);
679 if (err)
680 goto bad_inode;
681
682 inode_inc_iversion(inode);
683 ufsi->i_lastfrag =
684 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
685 ufsi->i_dir_start_lookup = 0;
686 ufsi->i_osync = 0;
687
688 ufs_set_inode_ops(inode);
689
690 UFSD("EXIT\n");
691 unlock_new_inode(inode);
692 return inode;
693
694bad_inode:
695 iget_failed(inode);
696 return ERR_PTR(err);
697}
698
699static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
700{
701 struct super_block *sb = inode->i_sb;
702 struct ufs_inode_info *ufsi = UFS_I(inode);
703
704 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
705 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
706
707 ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode));
708 ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
709
710 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
711 ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb,
712 inode_get_atime_sec(inode));
713 ufs_inode->ui_atime.tv_usec = 0;
714 ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb,
715 inode_get_ctime_sec(inode));
716 ufs_inode->ui_ctime.tv_usec = 0;
717 ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb,
718 inode_get_mtime_sec(inode));
719 ufs_inode->ui_mtime.tv_usec = 0;
720 ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
721 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
722 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
723
724 if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) {
725 ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
726 ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
727 }
728
729 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
730 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
731 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
732 } else if (inode->i_blocks) {
733 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
734 sizeof(ufs_inode->ui_u2.ui_addr));
735 }
736 else {
737 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
738 sizeof(ufs_inode->ui_u2.ui_symlink));
739 }
740
741 if (!inode->i_nlink)
742 memset (ufs_inode, 0, sizeof(struct ufs_inode));
743}
744
745static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
746{
747 struct super_block *sb = inode->i_sb;
748 struct ufs_inode_info *ufsi = UFS_I(inode);
749
750 UFSD("ENTER\n");
751 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
752 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
753
754 ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode));
755 ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode));
756
757 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
758 ufs_inode->ui_atime = cpu_to_fs64(sb, inode_get_atime_sec(inode));
759 ufs_inode->ui_atimensec = cpu_to_fs32(sb,
760 inode_get_atime_nsec(inode));
761 ufs_inode->ui_ctime = cpu_to_fs64(sb, inode_get_ctime_sec(inode));
762 ufs_inode->ui_ctimensec = cpu_to_fs32(sb,
763 inode_get_ctime_nsec(inode));
764 ufs_inode->ui_mtime = cpu_to_fs64(sb, inode_get_mtime_sec(inode));
765 ufs_inode->ui_mtimensec = cpu_to_fs32(sb,
766 inode_get_mtime_nsec(inode));
767
768 ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
769 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
770 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
771
772 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
773 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
774 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
775 } else if (inode->i_blocks) {
776 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
777 sizeof(ufs_inode->ui_u2.ui_addr));
778 } else {
779 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
780 sizeof(ufs_inode->ui_u2.ui_symlink));
781 }
782
783 if (!inode->i_nlink)
784 memset (ufs_inode, 0, sizeof(struct ufs2_inode));
785 UFSD("EXIT\n");
786}
787
788static int ufs_update_inode(struct inode * inode, int do_sync)
789{
790 struct super_block *sb = inode->i_sb;
791 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
792 struct buffer_head * bh;
793
794 UFSD("ENTER, ino %lu\n", inode->i_ino);
795
796 if (inode->i_ino < UFS_ROOTINO ||
797 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
798 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
799 return -1;
800 }
801
802 bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
803 if (!bh) {
804 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
805 return -1;
806 }
807 if (uspi->fs_magic == UFS2_MAGIC) {
808 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
809
810 ufs2_update_inode(inode,
811 ufs2_inode + ufs_inotofsbo(inode->i_ino));
812 } else {
813 struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data;
814
815 ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
816 }
817
818 mark_buffer_dirty(bh);
819 if (do_sync)
820 sync_dirty_buffer(bh);
821 brelse (bh);
822
823 UFSD("EXIT\n");
824 return 0;
825}
826
827int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
828{
829 return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
830}
831
832int ufs_sync_inode (struct inode *inode)
833{
834 return ufs_update_inode (inode, 1);
835}
836
837void ufs_evict_inode(struct inode * inode)
838{
839 int want_delete = 0;
840
841 if (!inode->i_nlink && !is_bad_inode(inode))
842 want_delete = 1;
843
844 truncate_inode_pages_final(&inode->i_data);
845 if (want_delete) {
846 inode->i_size = 0;
847 if (inode->i_blocks &&
848 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
849 S_ISLNK(inode->i_mode)))
850 ufs_truncate_blocks(inode);
851 ufs_update_inode(inode, inode_needs_sync(inode));
852 }
853
854 invalidate_inode_buffers(inode);
855 clear_inode(inode);
856
857 if (want_delete)
858 ufs_free_inode(inode);
859}
860
861struct to_free {
862 struct inode *inode;
863 u64 to;
864 unsigned count;
865};
866
867static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
868{
869 if (ctx->count && ctx->to != from) {
870 ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count);
871 ctx->count = 0;
872 }
873 ctx->count += count;
874 ctx->to = from + count;
875}
876
877#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
878
879/*
880 * used only for truncation down to direct blocks.
881 */
882static void ufs_trunc_direct(struct inode *inode)
883{
884 struct ufs_inode_info *ufsi = UFS_I(inode);
885 struct super_block *sb = inode->i_sb;
886 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
887 unsigned int new_frags, old_frags;
888 unsigned int old_slot, new_slot;
889 unsigned int old_tail, new_tail;
890 struct to_free ctx = {.inode = inode};
891
892 UFSD("ENTER: ino %lu\n", inode->i_ino);
893
894 new_frags = DIRECT_FRAGMENT;
895 // new_frags = first fragment past the new EOF
896 old_frags = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
897 // old_frags = first fragment past the old EOF or covered by indirects
898
899 if (new_frags >= old_frags) // expanding - nothing to free
900 goto done;
901
902 old_tail = ufs_fragnum(old_frags);
903 old_slot = ufs_fragstoblks(old_frags);
904 new_tail = ufs_fragnum(new_frags);
905 new_slot = ufs_fragstoblks(new_frags);
906
907 if (old_slot == new_slot) { // old_tail > 0
908 void *p = ufs_get_direct_data_ptr(uspi, ufsi, old_slot);
909 u64 tmp = ufs_data_ptr_to_cpu(sb, p);
910 if (!tmp)
911 ufs_panic(sb, __func__, "internal error");
912 if (!new_tail) {
913 write_seqlock(&ufsi->meta_lock);
914 ufs_data_ptr_clear(uspi, p);
915 write_sequnlock(&ufsi->meta_lock);
916 }
917 ufs_free_fragments(inode, tmp + new_tail, old_tail - new_tail);
918 } else {
919 unsigned int slot = new_slot;
920
921 if (new_tail) {
922 void *p = ufs_get_direct_data_ptr(uspi, ufsi, slot++);
923 u64 tmp = ufs_data_ptr_to_cpu(sb, p);
924 if (!tmp)
925 ufs_panic(sb, __func__, "internal error");
926
927 ufs_free_fragments(inode, tmp + new_tail,
928 uspi->s_fpb - new_tail);
929 }
930 while (slot < old_slot) {
931 void *p = ufs_get_direct_data_ptr(uspi, ufsi, slot++);
932 u64 tmp = ufs_data_ptr_to_cpu(sb, p);
933 if (!tmp)
934 continue;
935 write_seqlock(&ufsi->meta_lock);
936 ufs_data_ptr_clear(uspi, p);
937 write_sequnlock(&ufsi->meta_lock);
938
939 free_data(&ctx, tmp, uspi->s_fpb);
940 }
941
942 free_data(&ctx, 0, 0);
943
944 if (old_tail) {
945 void *p = ufs_get_direct_data_ptr(uspi, ufsi, slot);
946 u64 tmp = ufs_data_ptr_to_cpu(sb, p);
947 if (!tmp)
948 ufs_panic(sb, __func__, "internal error");
949 write_seqlock(&ufsi->meta_lock);
950 ufs_data_ptr_clear(uspi, p);
951 write_sequnlock(&ufsi->meta_lock);
952
953 ufs_free_fragments(inode, tmp, old_tail);
954 }
955 }
956done:
957 UFSD("EXIT: ino %lu\n", inode->i_ino);
958}
959
960static void free_full_branch(struct inode *inode, u64 ind_block, int depth)
961{
962 struct super_block *sb = inode->i_sb;
963 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
964 struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize);
965 unsigned i;
966
967 if (!ubh)
968 return;
969
970 if (--depth) {
971 for (i = 0; i < uspi->s_apb; i++) {
972 void *p = ubh_get_data_ptr(uspi, ubh, i);
973 u64 block = ufs_data_ptr_to_cpu(sb, p);
974 if (block)
975 free_full_branch(inode, block, depth);
976 }
977 } else {
978 struct to_free ctx = {.inode = inode};
979
980 for (i = 0; i < uspi->s_apb; i++) {
981 void *p = ubh_get_data_ptr(uspi, ubh, i);
982 u64 block = ufs_data_ptr_to_cpu(sb, p);
983 if (block)
984 free_data(&ctx, block, uspi->s_fpb);
985 }
986 free_data(&ctx, 0, 0);
987 }
988
989 ubh_bforget(ubh);
990 ufs_free_blocks(inode, ind_block, uspi->s_fpb);
991}
992
993static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth)
994{
995 struct super_block *sb = inode->i_sb;
996 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
997 unsigned i;
998
999 if (--depth) {
1000 for (i = from; i < uspi->s_apb ; i++) {
1001 void *p = ubh_get_data_ptr(uspi, ubh, i);
1002 u64 block = ufs_data_ptr_to_cpu(sb, p);
1003 if (block) {
1004 write_seqlock(&UFS_I(inode)->meta_lock);
1005 ufs_data_ptr_clear(uspi, p);
1006 write_sequnlock(&UFS_I(inode)->meta_lock);
1007 ubh_mark_buffer_dirty(ubh);
1008 free_full_branch(inode, block, depth);
1009 }
1010 }
1011 } else {
1012 struct to_free ctx = {.inode = inode};
1013
1014 for (i = from; i < uspi->s_apb; i++) {
1015 void *p = ubh_get_data_ptr(uspi, ubh, i);
1016 u64 block = ufs_data_ptr_to_cpu(sb, p);
1017 if (block) {
1018 write_seqlock(&UFS_I(inode)->meta_lock);
1019 ufs_data_ptr_clear(uspi, p);
1020 write_sequnlock(&UFS_I(inode)->meta_lock);
1021 ubh_mark_buffer_dirty(ubh);
1022 free_data(&ctx, block, uspi->s_fpb);
1023 }
1024 }
1025 free_data(&ctx, 0, 0);
1026 }
1027 if (IS_SYNC(inode) && ubh_buffer_dirty(ubh))
1028 ubh_sync_block(ubh);
1029 ubh_brelse(ubh);
1030}
1031
1032static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
1033{
1034 int err = 0;
1035 struct super_block *sb = inode->i_sb;
1036 struct address_space *mapping = inode->i_mapping;
1037 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1038 unsigned i, end;
1039 sector_t lastfrag;
1040 struct folio *folio;
1041 struct buffer_head *bh;
1042 u64 phys64;
1043
1044 lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
1045
1046 if (!lastfrag)
1047 goto out;
1048
1049 lastfrag--;
1050
1051 folio = ufs_get_locked_folio(mapping, lastfrag >>
1052 (PAGE_SHIFT - inode->i_blkbits));
1053 if (IS_ERR(folio)) {
1054 err = -EIO;
1055 goto out;
1056 }
1057
1058 end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1);
1059 bh = folio_buffers(folio);
1060 for (i = 0; i < end; ++i)
1061 bh = bh->b_this_page;
1062
1063 err = ufs_getfrag_block(inode, lastfrag, bh, 1);
1064
1065 if (unlikely(err))
1066 goto out_unlock;
1067
1068 if (buffer_new(bh)) {
1069 clear_buffer_new(bh);
1070 clean_bdev_bh_alias(bh);
1071 /*
1072 * we do not zeroize fragment, because of
1073 * if it maped to hole, it already contains zeroes
1074 */
1075 set_buffer_uptodate(bh);
1076 mark_buffer_dirty(bh);
1077 folio_mark_dirty(folio);
1078 }
1079
1080 if (lastfrag >= UFS_IND_FRAGMENT) {
1081 end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
1082 phys64 = bh->b_blocknr + 1;
1083 for (i = 0; i < end; ++i) {
1084 bh = sb_getblk(sb, i + phys64);
1085 lock_buffer(bh);
1086 memset(bh->b_data, 0, sb->s_blocksize);
1087 set_buffer_uptodate(bh);
1088 mark_buffer_dirty(bh);
1089 unlock_buffer(bh);
1090 sync_dirty_buffer(bh);
1091 brelse(bh);
1092 }
1093 }
1094out_unlock:
1095 ufs_put_locked_folio(folio);
1096out:
1097 return err;
1098}
1099
1100static void ufs_truncate_blocks(struct inode *inode)
1101{
1102 struct ufs_inode_info *ufsi = UFS_I(inode);
1103 struct super_block *sb = inode->i_sb;
1104 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1105 unsigned offsets[4];
1106 int depth;
1107 int depth2;
1108 unsigned i;
1109 struct ufs_buffer_head *ubh[3];
1110 void *p;
1111 u64 block;
1112
1113 if (inode->i_size) {
1114 sector_t last = (inode->i_size - 1) >> uspi->s_bshift;
1115 depth = ufs_block_to_path(inode, last, offsets);
1116 if (!depth)
1117 return;
1118 } else {
1119 depth = 1;
1120 }
1121
1122 for (depth2 = depth - 1; depth2; depth2--)
1123 if (offsets[depth2] != uspi->s_apb - 1)
1124 break;
1125
1126 mutex_lock(&ufsi->truncate_mutex);
1127 if (depth == 1) {
1128 ufs_trunc_direct(inode);
1129 offsets[0] = UFS_IND_BLOCK;
1130 } else {
1131 /* get the blocks that should be partially emptied */
1132 p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++);
1133 for (i = 0; i < depth2; i++) {
1134 block = ufs_data_ptr_to_cpu(sb, p);
1135 if (!block)
1136 break;
1137 ubh[i] = ubh_bread(sb, block, uspi->s_bsize);
1138 if (!ubh[i]) {
1139 write_seqlock(&ufsi->meta_lock);
1140 ufs_data_ptr_clear(uspi, p);
1141 write_sequnlock(&ufsi->meta_lock);
1142 break;
1143 }
1144 p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++);
1145 }
1146 while (i--)
1147 free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
1148 }
1149 for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) {
1150 p = ufs_get_direct_data_ptr(uspi, ufsi, i);
1151 block = ufs_data_ptr_to_cpu(sb, p);
1152 if (block) {
1153 write_seqlock(&ufsi->meta_lock);
1154 ufs_data_ptr_clear(uspi, p);
1155 write_sequnlock(&ufsi->meta_lock);
1156 free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
1157 }
1158 }
1159 read_seqlock_excl(&ufsi->meta_lock);
1160 ufsi->i_lastfrag = DIRECT_FRAGMENT;
1161 read_sequnlock_excl(&ufsi->meta_lock);
1162 mark_inode_dirty(inode);
1163 mutex_unlock(&ufsi->truncate_mutex);
1164}
1165
1166static int ufs_truncate(struct inode *inode, loff_t size)
1167{
1168 int err = 0;
1169
1170 UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
1171 inode->i_ino, (unsigned long long)size,
1172 (unsigned long long)i_size_read(inode));
1173
1174 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1175 S_ISLNK(inode->i_mode)))
1176 return -EINVAL;
1177 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1178 return -EPERM;
1179
1180 err = ufs_alloc_lastblock(inode, size);
1181
1182 if (err)
1183 goto out;
1184
1185 block_truncate_page(inode->i_mapping, size, ufs_getfrag_block);
1186
1187 truncate_setsize(inode, size);
1188
1189 ufs_truncate_blocks(inode);
1190 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1191 mark_inode_dirty(inode);
1192out:
1193 UFSD("EXIT: err %d\n", err);
1194 return err;
1195}
1196
1197int ufs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
1198 struct iattr *attr)
1199{
1200 struct inode *inode = d_inode(dentry);
1201 unsigned int ia_valid = attr->ia_valid;
1202 int error;
1203
1204 error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
1205 if (error)
1206 return error;
1207
1208 if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
1209 error = ufs_truncate(inode, attr->ia_size);
1210 if (error)
1211 return error;
1212 }
1213
1214 setattr_copy(&nop_mnt_idmap, inode, attr);
1215 mark_inode_dirty(inode);
1216 return 0;
1217}
1218
1219const struct inode_operations ufs_file_inode_operations = {
1220 .setattr = ufs_setattr,
1221};
1/*
2 * linux/fs/ufs/inode.c
3 *
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
7 *
8 * from
9 *
10 * linux/fs/ext2/inode.c
11 *
12 * Copyright (C) 1992, 1993, 1994, 1995
13 * Remy Card (card@masi.ibp.fr)
14 * Laboratoire MASI - Institut Blaise Pascal
15 * Universite Pierre et Marie Curie (Paris VI)
16 *
17 * from
18 *
19 * linux/fs/minix/inode.c
20 *
21 * Copyright (C) 1991, 1992 Linus Torvalds
22 *
23 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
24 * Big-endian to little-endian byte-swapping/bitmaps by
25 * David S. Miller (davem@caip.rutgers.edu), 1995
26 */
27
28#include <asm/uaccess.h>
29
30#include <linux/errno.h>
31#include <linux/fs.h>
32#include <linux/time.h>
33#include <linux/stat.h>
34#include <linux/string.h>
35#include <linux/mm.h>
36#include <linux/buffer_head.h>
37#include <linux/writeback.h>
38
39#include "ufs_fs.h"
40#include "ufs.h"
41#include "swab.h"
42#include "util.h"
43
44static u64 ufs_frag_map(struct inode *inode, sector_t frag, bool needs_lock);
45
46static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t offsets[4])
47{
48 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
49 int ptrs = uspi->s_apb;
50 int ptrs_bits = uspi->s_apbshift;
51 const long direct_blocks = UFS_NDADDR,
52 indirect_blocks = ptrs,
53 double_blocks = (1 << (ptrs_bits * 2));
54 int n = 0;
55
56
57 UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
58 if (i_block < direct_blocks) {
59 offsets[n++] = i_block;
60 } else if ((i_block -= direct_blocks) < indirect_blocks) {
61 offsets[n++] = UFS_IND_BLOCK;
62 offsets[n++] = i_block;
63 } else if ((i_block -= indirect_blocks) < double_blocks) {
64 offsets[n++] = UFS_DIND_BLOCK;
65 offsets[n++] = i_block >> ptrs_bits;
66 offsets[n++] = i_block & (ptrs - 1);
67 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
68 offsets[n++] = UFS_TIND_BLOCK;
69 offsets[n++] = i_block >> (ptrs_bits * 2);
70 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
71 offsets[n++] = i_block & (ptrs - 1);
72 } else {
73 ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
74 }
75 return n;
76}
77
78/*
79 * Returns the location of the fragment from
80 * the beginning of the filesystem.
81 */
82
83static u64 ufs_frag_map(struct inode *inode, sector_t frag, bool needs_lock)
84{
85 struct ufs_inode_info *ufsi = UFS_I(inode);
86 struct super_block *sb = inode->i_sb;
87 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
88 u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
89 int shift = uspi->s_apbshift-uspi->s_fpbshift;
90 sector_t offsets[4], *p;
91 int depth = ufs_block_to_path(inode, frag >> uspi->s_fpbshift, offsets);
92 u64 ret = 0L;
93 __fs32 block;
94 __fs64 u2_block = 0L;
95 unsigned flags = UFS_SB(sb)->s_flags;
96 u64 temp = 0L;
97
98 UFSD(": frag = %llu depth = %d\n", (unsigned long long)frag, depth);
99 UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
100 uspi->s_fpbshift, uspi->s_apbmask,
101 (unsigned long long)mask);
102
103 if (depth == 0)
104 return 0;
105
106 p = offsets;
107
108 if (needs_lock)
109 lock_ufs(sb);
110 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
111 goto ufs2;
112
113 block = ufsi->i_u1.i_data[*p++];
114 if (!block)
115 goto out;
116 while (--depth) {
117 struct buffer_head *bh;
118 sector_t n = *p++;
119
120 bh = sb_bread(sb, uspi->s_sbbase + fs32_to_cpu(sb, block)+(n>>shift));
121 if (!bh)
122 goto out;
123 block = ((__fs32 *) bh->b_data)[n & mask];
124 brelse (bh);
125 if (!block)
126 goto out;
127 }
128 ret = (u64) (uspi->s_sbbase + fs32_to_cpu(sb, block) + (frag & uspi->s_fpbmask));
129 goto out;
130ufs2:
131 u2_block = ufsi->i_u1.u2_i_data[*p++];
132 if (!u2_block)
133 goto out;
134
135
136 while (--depth) {
137 struct buffer_head *bh;
138 sector_t n = *p++;
139
140
141 temp = (u64)(uspi->s_sbbase) + fs64_to_cpu(sb, u2_block);
142 bh = sb_bread(sb, temp +(u64) (n>>shift));
143 if (!bh)
144 goto out;
145 u2_block = ((__fs64 *)bh->b_data)[n & mask];
146 brelse(bh);
147 if (!u2_block)
148 goto out;
149 }
150 temp = (u64)uspi->s_sbbase + fs64_to_cpu(sb, u2_block);
151 ret = temp + (u64) (frag & uspi->s_fpbmask);
152
153out:
154 if (needs_lock)
155 unlock_ufs(sb);
156 return ret;
157}
158
159/**
160 * ufs_inode_getfrag() - allocate new fragment(s)
161 * @inode - pointer to inode
162 * @fragment - number of `fragment' which hold pointer
163 * to new allocated fragment(s)
164 * @new_fragment - number of new allocated fragment(s)
165 * @required - how many fragment(s) we require
166 * @err - we set it if something wrong
167 * @phys - pointer to where we save physical number of new allocated fragments,
168 * NULL if we allocate not data(indirect blocks for example).
169 * @new - we set it if we allocate new block
170 * @locked_page - for ufs_new_fragments()
171 */
172static struct buffer_head *
173ufs_inode_getfrag(struct inode *inode, u64 fragment,
174 sector_t new_fragment, unsigned int required, int *err,
175 long *phys, int *new, struct page *locked_page)
176{
177 struct ufs_inode_info *ufsi = UFS_I(inode);
178 struct super_block *sb = inode->i_sb;
179 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
180 struct buffer_head * result;
181 unsigned blockoff, lastblockoff;
182 u64 tmp, goal, lastfrag, block, lastblock;
183 void *p, *p2;
184
185 UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, required %u, "
186 "metadata %d\n", inode->i_ino, (unsigned long long)fragment,
187 (unsigned long long)new_fragment, required, !phys);
188
189 /* TODO : to be done for write support
190 if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
191 goto ufs2;
192 */
193
194 block = ufs_fragstoblks (fragment);
195 blockoff = ufs_fragnum (fragment);
196 p = ufs_get_direct_data_ptr(uspi, ufsi, block);
197
198 goal = 0;
199
200repeat:
201 tmp = ufs_data_ptr_to_cpu(sb, p);
202
203 lastfrag = ufsi->i_lastfrag;
204 if (tmp && fragment < lastfrag) {
205 if (!phys) {
206 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
207 if (tmp == ufs_data_ptr_to_cpu(sb, p)) {
208 UFSD("EXIT, result %llu\n",
209 (unsigned long long)tmp + blockoff);
210 return result;
211 }
212 brelse (result);
213 goto repeat;
214 } else {
215 *phys = uspi->s_sbbase + tmp + blockoff;
216 return NULL;
217 }
218 }
219
220 lastblock = ufs_fragstoblks (lastfrag);
221 lastblockoff = ufs_fragnum (lastfrag);
222 /*
223 * We will extend file into new block beyond last allocated block
224 */
225 if (lastblock < block) {
226 /*
227 * We must reallocate last allocated block
228 */
229 if (lastblockoff) {
230 p2 = ufs_get_direct_data_ptr(uspi, ufsi, lastblock);
231 tmp = ufs_new_fragments(inode, p2, lastfrag,
232 ufs_data_ptr_to_cpu(sb, p2),
233 uspi->s_fpb - lastblockoff,
234 err, locked_page);
235 if (!tmp) {
236 if (lastfrag != ufsi->i_lastfrag)
237 goto repeat;
238 else
239 return NULL;
240 }
241 lastfrag = ufsi->i_lastfrag;
242
243 }
244 tmp = ufs_data_ptr_to_cpu(sb,
245 ufs_get_direct_data_ptr(uspi, ufsi,
246 lastblock));
247 if (tmp)
248 goal = tmp + uspi->s_fpb;
249 tmp = ufs_new_fragments (inode, p, fragment - blockoff,
250 goal, required + blockoff,
251 err,
252 phys != NULL ? locked_page : NULL);
253 } else if (lastblock == block) {
254 /*
255 * We will extend last allocated block
256 */
257 tmp = ufs_new_fragments(inode, p, fragment -
258 (blockoff - lastblockoff),
259 ufs_data_ptr_to_cpu(sb, p),
260 required + (blockoff - lastblockoff),
261 err, phys != NULL ? locked_page : NULL);
262 } else /* (lastblock > block) */ {
263 /*
264 * We will allocate new block before last allocated block
265 */
266 if (block) {
267 tmp = ufs_data_ptr_to_cpu(sb,
268 ufs_get_direct_data_ptr(uspi, ufsi, block - 1));
269 if (tmp)
270 goal = tmp + uspi->s_fpb;
271 }
272 tmp = ufs_new_fragments(inode, p, fragment - blockoff,
273 goal, uspi->s_fpb, err,
274 phys != NULL ? locked_page : NULL);
275 }
276 if (!tmp) {
277 if ((!blockoff && ufs_data_ptr_to_cpu(sb, p)) ||
278 (blockoff && lastfrag != ufsi->i_lastfrag))
279 goto repeat;
280 *err = -ENOSPC;
281 return NULL;
282 }
283
284 if (!phys) {
285 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
286 } else {
287 *phys = uspi->s_sbbase + tmp + blockoff;
288 result = NULL;
289 *err = 0;
290 *new = 1;
291 }
292
293 inode->i_ctime = CURRENT_TIME_SEC;
294 if (IS_SYNC(inode))
295 ufs_sync_inode (inode);
296 mark_inode_dirty(inode);
297 UFSD("EXIT, result %llu\n", (unsigned long long)tmp + blockoff);
298 return result;
299
300 /* This part : To be implemented ....
301 Required only for writing, not required for READ-ONLY.
302ufs2:
303
304 u2_block = ufs_fragstoblks(fragment);
305 u2_blockoff = ufs_fragnum(fragment);
306 p = ufsi->i_u1.u2_i_data + block;
307 goal = 0;
308
309repeat2:
310 tmp = fs32_to_cpu(sb, *p);
311 lastfrag = ufsi->i_lastfrag;
312
313 */
314}
315
316/**
317 * ufs_inode_getblock() - allocate new block
318 * @inode - pointer to inode
319 * @bh - pointer to block which hold "pointer" to new allocated block
320 * @fragment - number of `fragment' which hold pointer
321 * to new allocated block
322 * @new_fragment - number of new allocated fragment
323 * (block will hold this fragment and also uspi->s_fpb-1)
324 * @err - see ufs_inode_getfrag()
325 * @phys - see ufs_inode_getfrag()
326 * @new - see ufs_inode_getfrag()
327 * @locked_page - see ufs_inode_getfrag()
328 */
329static struct buffer_head *
330ufs_inode_getblock(struct inode *inode, struct buffer_head *bh,
331 u64 fragment, sector_t new_fragment, int *err,
332 long *phys, int *new, struct page *locked_page)
333{
334 struct super_block *sb = inode->i_sb;
335 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
336 struct buffer_head * result;
337 unsigned blockoff;
338 u64 tmp, goal, block;
339 void *p;
340
341 block = ufs_fragstoblks (fragment);
342 blockoff = ufs_fragnum (fragment);
343
344 UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, metadata %d\n",
345 inode->i_ino, (unsigned long long)fragment,
346 (unsigned long long)new_fragment, !phys);
347
348 result = NULL;
349 if (!bh)
350 goto out;
351 if (!buffer_uptodate(bh)) {
352 ll_rw_block (READ, 1, &bh);
353 wait_on_buffer (bh);
354 if (!buffer_uptodate(bh))
355 goto out;
356 }
357 if (uspi->fs_magic == UFS2_MAGIC)
358 p = (__fs64 *)bh->b_data + block;
359 else
360 p = (__fs32 *)bh->b_data + block;
361repeat:
362 tmp = ufs_data_ptr_to_cpu(sb, p);
363 if (tmp) {
364 if (!phys) {
365 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
366 if (tmp == ufs_data_ptr_to_cpu(sb, p))
367 goto out;
368 brelse (result);
369 goto repeat;
370 } else {
371 *phys = uspi->s_sbbase + tmp + blockoff;
372 goto out;
373 }
374 }
375
376 if (block && (uspi->fs_magic == UFS2_MAGIC ?
377 (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[block-1])) :
378 (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[block-1]))))
379 goal = tmp + uspi->s_fpb;
380 else
381 goal = bh->b_blocknr + uspi->s_fpb;
382 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
383 uspi->s_fpb, err, locked_page);
384 if (!tmp) {
385 if (ufs_data_ptr_to_cpu(sb, p))
386 goto repeat;
387 goto out;
388 }
389
390
391 if (!phys) {
392 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
393 } else {
394 *phys = uspi->s_sbbase + tmp + blockoff;
395 *new = 1;
396 }
397
398 mark_buffer_dirty(bh);
399 if (IS_SYNC(inode))
400 sync_dirty_buffer(bh);
401 inode->i_ctime = CURRENT_TIME_SEC;
402 mark_inode_dirty(inode);
403 UFSD("result %llu\n", (unsigned long long)tmp + blockoff);
404out:
405 brelse (bh);
406 UFSD("EXIT\n");
407 return result;
408}
409
410/**
411 * ufs_getfrag_block() - `get_block_t' function, interface between UFS and
412 * readpage, writepage and so on
413 */
414
415int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
416{
417 struct super_block * sb = inode->i_sb;
418 struct ufs_sb_info * sbi = UFS_SB(sb);
419 struct ufs_sb_private_info * uspi = sbi->s_uspi;
420 struct buffer_head * bh;
421 int ret, err, new;
422 unsigned long ptr,phys;
423 u64 phys64 = 0;
424 bool needs_lock = (sbi->mutex_owner != current);
425
426 if (!create) {
427 phys64 = ufs_frag_map(inode, fragment, needs_lock);
428 UFSD("phys64 = %llu\n", (unsigned long long)phys64);
429 if (phys64)
430 map_bh(bh_result, sb, phys64);
431 return 0;
432 }
433
434 /* This code entered only while writing ....? */
435
436 err = -EIO;
437 new = 0;
438 ret = 0;
439 bh = NULL;
440
441 if (needs_lock)
442 lock_ufs(sb);
443
444 UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
445 if (fragment >
446 ((UFS_NDADDR + uspi->s_apb + uspi->s_2apb + uspi->s_3apb)
447 << uspi->s_fpbshift))
448 goto abort_too_big;
449
450 err = 0;
451 ptr = fragment;
452
453 /*
454 * ok, these macros clean the logic up a bit and make
455 * it much more readable:
456 */
457#define GET_INODE_DATABLOCK(x) \
458 ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new,\
459 bh_result->b_page)
460#define GET_INODE_PTR(x) \
461 ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL,\
462 bh_result->b_page)
463#define GET_INDIRECT_DATABLOCK(x) \
464 ufs_inode_getblock(inode, bh, x, fragment, \
465 &err, &phys, &new, bh_result->b_page)
466#define GET_INDIRECT_PTR(x) \
467 ufs_inode_getblock(inode, bh, x, fragment, \
468 &err, NULL, NULL, NULL)
469
470 if (ptr < UFS_NDIR_FRAGMENT) {
471 bh = GET_INODE_DATABLOCK(ptr);
472 goto out;
473 }
474 ptr -= UFS_NDIR_FRAGMENT;
475 if (ptr < (1 << (uspi->s_apbshift + uspi->s_fpbshift))) {
476 bh = GET_INODE_PTR(UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift));
477 goto get_indirect;
478 }
479 ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift);
480 if (ptr < (1 << (uspi->s_2apbshift + uspi->s_fpbshift))) {
481 bh = GET_INODE_PTR(UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift));
482 goto get_double;
483 }
484 ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift);
485 bh = GET_INODE_PTR(UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift));
486 bh = GET_INDIRECT_PTR((ptr >> uspi->s_2apbshift) & uspi->s_apbmask);
487get_double:
488 bh = GET_INDIRECT_PTR((ptr >> uspi->s_apbshift) & uspi->s_apbmask);
489get_indirect:
490 bh = GET_INDIRECT_DATABLOCK(ptr & uspi->s_apbmask);
491
492#undef GET_INODE_DATABLOCK
493#undef GET_INODE_PTR
494#undef GET_INDIRECT_DATABLOCK
495#undef GET_INDIRECT_PTR
496
497out:
498 if (err)
499 goto abort;
500 if (new)
501 set_buffer_new(bh_result);
502 map_bh(bh_result, sb, phys);
503abort:
504 if (needs_lock)
505 unlock_ufs(sb);
506
507 return err;
508
509abort_too_big:
510 ufs_warning(sb, "ufs_get_block", "block > big");
511 goto abort;
512}
513
514static int ufs_writepage(struct page *page, struct writeback_control *wbc)
515{
516 return block_write_full_page(page,ufs_getfrag_block,wbc);
517}
518
519static int ufs_readpage(struct file *file, struct page *page)
520{
521 return block_read_full_page(page,ufs_getfrag_block);
522}
523
524int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
525{
526 return __block_write_begin(page, pos, len, ufs_getfrag_block);
527}
528
529static void ufs_write_failed(struct address_space *mapping, loff_t to)
530{
531 struct inode *inode = mapping->host;
532
533 if (to > inode->i_size)
534 truncate_pagecache(inode, inode->i_size);
535}
536
537static int ufs_write_begin(struct file *file, struct address_space *mapping,
538 loff_t pos, unsigned len, unsigned flags,
539 struct page **pagep, void **fsdata)
540{
541 int ret;
542
543 ret = block_write_begin(mapping, pos, len, flags, pagep,
544 ufs_getfrag_block);
545 if (unlikely(ret))
546 ufs_write_failed(mapping, pos + len);
547
548 return ret;
549}
550
551static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
552{
553 return generic_block_bmap(mapping,block,ufs_getfrag_block);
554}
555
556const struct address_space_operations ufs_aops = {
557 .readpage = ufs_readpage,
558 .writepage = ufs_writepage,
559 .write_begin = ufs_write_begin,
560 .write_end = generic_write_end,
561 .bmap = ufs_bmap
562};
563
564static void ufs_set_inode_ops(struct inode *inode)
565{
566 if (S_ISREG(inode->i_mode)) {
567 inode->i_op = &ufs_file_inode_operations;
568 inode->i_fop = &ufs_file_operations;
569 inode->i_mapping->a_ops = &ufs_aops;
570 } else if (S_ISDIR(inode->i_mode)) {
571 inode->i_op = &ufs_dir_inode_operations;
572 inode->i_fop = &ufs_dir_operations;
573 inode->i_mapping->a_ops = &ufs_aops;
574 } else if (S_ISLNK(inode->i_mode)) {
575 if (!inode->i_blocks)
576 inode->i_op = &ufs_fast_symlink_inode_operations;
577 else {
578 inode->i_op = &ufs_symlink_inode_operations;
579 inode->i_mapping->a_ops = &ufs_aops;
580 }
581 } else
582 init_special_inode(inode, inode->i_mode,
583 ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
584}
585
586static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
587{
588 struct ufs_inode_info *ufsi = UFS_I(inode);
589 struct super_block *sb = inode->i_sb;
590 umode_t mode;
591
592 /*
593 * Copy data to the in-core inode.
594 */
595 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
596 set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
597 if (inode->i_nlink == 0) {
598 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
599 return -1;
600 }
601
602 /*
603 * Linux now has 32-bit uid and gid, so we can support EFT.
604 */
605 i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode));
606 i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
607
608 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
609 inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
610 inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
611 inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
612 inode->i_mtime.tv_nsec = 0;
613 inode->i_atime.tv_nsec = 0;
614 inode->i_ctime.tv_nsec = 0;
615 inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
616 inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
617 ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
618 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
619 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
620
621
622 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
623 memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
624 sizeof(ufs_inode->ui_u2.ui_addr));
625 } else {
626 memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
627 sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
628 ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
629 }
630 return 0;
631}
632
633static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
634{
635 struct ufs_inode_info *ufsi = UFS_I(inode);
636 struct super_block *sb = inode->i_sb;
637 umode_t mode;
638
639 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
640 /*
641 * Copy data to the in-core inode.
642 */
643 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
644 set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
645 if (inode->i_nlink == 0) {
646 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
647 return -1;
648 }
649
650 /*
651 * Linux now has 32-bit uid and gid, so we can support EFT.
652 */
653 i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid));
654 i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid));
655
656 inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
657 inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime);
658 inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime);
659 inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime);
660 inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec);
661 inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec);
662 inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec);
663 inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
664 inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
665 ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
666 /*
667 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
668 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
669 */
670
671 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
672 memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
673 sizeof(ufs2_inode->ui_u2.ui_addr));
674 } else {
675 memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
676 sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
677 ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
678 }
679 return 0;
680}
681
682struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
683{
684 struct ufs_inode_info *ufsi;
685 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
686 struct buffer_head * bh;
687 struct inode *inode;
688 int err;
689
690 UFSD("ENTER, ino %lu\n", ino);
691
692 if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) {
693 ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
694 ino);
695 return ERR_PTR(-EIO);
696 }
697
698 inode = iget_locked(sb, ino);
699 if (!inode)
700 return ERR_PTR(-ENOMEM);
701 if (!(inode->i_state & I_NEW))
702 return inode;
703
704 ufsi = UFS_I(inode);
705
706 bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
707 if (!bh) {
708 ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
709 inode->i_ino);
710 goto bad_inode;
711 }
712 if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
713 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
714
715 err = ufs2_read_inode(inode,
716 ufs2_inode + ufs_inotofsbo(inode->i_ino));
717 } else {
718 struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
719
720 err = ufs1_read_inode(inode,
721 ufs_inode + ufs_inotofsbo(inode->i_ino));
722 }
723
724 if (err)
725 goto bad_inode;
726 inode->i_version++;
727 ufsi->i_lastfrag =
728 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
729 ufsi->i_dir_start_lookup = 0;
730 ufsi->i_osync = 0;
731
732 ufs_set_inode_ops(inode);
733
734 brelse(bh);
735
736 UFSD("EXIT\n");
737 unlock_new_inode(inode);
738 return inode;
739
740bad_inode:
741 iget_failed(inode);
742 return ERR_PTR(-EIO);
743}
744
745static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
746{
747 struct super_block *sb = inode->i_sb;
748 struct ufs_inode_info *ufsi = UFS_I(inode);
749
750 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
751 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
752
753 ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode));
754 ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
755
756 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
757 ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
758 ufs_inode->ui_atime.tv_usec = 0;
759 ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec);
760 ufs_inode->ui_ctime.tv_usec = 0;
761 ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
762 ufs_inode->ui_mtime.tv_usec = 0;
763 ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
764 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
765 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
766
767 if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) {
768 ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
769 ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
770 }
771
772 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
773 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
774 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
775 } else if (inode->i_blocks) {
776 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
777 sizeof(ufs_inode->ui_u2.ui_addr));
778 }
779 else {
780 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
781 sizeof(ufs_inode->ui_u2.ui_symlink));
782 }
783
784 if (!inode->i_nlink)
785 memset (ufs_inode, 0, sizeof(struct ufs_inode));
786}
787
788static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
789{
790 struct super_block *sb = inode->i_sb;
791 struct ufs_inode_info *ufsi = UFS_I(inode);
792
793 UFSD("ENTER\n");
794 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
795 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
796
797 ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode));
798 ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode));
799
800 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
801 ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
802 ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec);
803 ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec);
804 ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec);
805 ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec);
806 ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec);
807
808 ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
809 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
810 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
811
812 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
813 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
814 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
815 } else if (inode->i_blocks) {
816 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
817 sizeof(ufs_inode->ui_u2.ui_addr));
818 } else {
819 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
820 sizeof(ufs_inode->ui_u2.ui_symlink));
821 }
822
823 if (!inode->i_nlink)
824 memset (ufs_inode, 0, sizeof(struct ufs2_inode));
825 UFSD("EXIT\n");
826}
827
828static int ufs_update_inode(struct inode * inode, int do_sync)
829{
830 struct super_block *sb = inode->i_sb;
831 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
832 struct buffer_head * bh;
833
834 UFSD("ENTER, ino %lu\n", inode->i_ino);
835
836 if (inode->i_ino < UFS_ROOTINO ||
837 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
838 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
839 return -1;
840 }
841
842 bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
843 if (!bh) {
844 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
845 return -1;
846 }
847 if (uspi->fs_magic == UFS2_MAGIC) {
848 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
849
850 ufs2_update_inode(inode,
851 ufs2_inode + ufs_inotofsbo(inode->i_ino));
852 } else {
853 struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data;
854
855 ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
856 }
857
858 mark_buffer_dirty(bh);
859 if (do_sync)
860 sync_dirty_buffer(bh);
861 brelse (bh);
862
863 UFSD("EXIT\n");
864 return 0;
865}
866
867int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
868{
869 int ret;
870 lock_ufs(inode->i_sb);
871 ret = ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
872 unlock_ufs(inode->i_sb);
873 return ret;
874}
875
876int ufs_sync_inode (struct inode *inode)
877{
878 return ufs_update_inode (inode, 1);
879}
880
881void ufs_evict_inode(struct inode * inode)
882{
883 int want_delete = 0;
884
885 if (!inode->i_nlink && !is_bad_inode(inode))
886 want_delete = 1;
887
888 truncate_inode_pages_final(&inode->i_data);
889 if (want_delete) {
890 loff_t old_i_size;
891 /*UFS_I(inode)->i_dtime = CURRENT_TIME;*/
892 lock_ufs(inode->i_sb);
893 mark_inode_dirty(inode);
894 ufs_update_inode(inode, IS_SYNC(inode));
895 old_i_size = inode->i_size;
896 inode->i_size = 0;
897 if (inode->i_blocks && ufs_truncate(inode, old_i_size))
898 ufs_warning(inode->i_sb, __func__, "ufs_truncate failed\n");
899 unlock_ufs(inode->i_sb);
900 }
901
902 invalidate_inode_buffers(inode);
903 clear_inode(inode);
904
905 if (want_delete) {
906 lock_ufs(inode->i_sb);
907 ufs_free_inode (inode);
908 unlock_ufs(inode->i_sb);
909 }
910}