Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/fs/ufs/inode.c
   4 *
   5 * Copyright (C) 1998
   6 * Daniel Pirkl <daniel.pirkl@email.cz>
   7 * Charles University, Faculty of Mathematics and Physics
   8 *
   9 *  from
  10 *
  11 *  linux/fs/ext2/inode.c
  12 *
  13 * Copyright (C) 1992, 1993, 1994, 1995
  14 * Remy Card (card@masi.ibp.fr)
  15 * Laboratoire MASI - Institut Blaise Pascal
  16 * Universite Pierre et Marie Curie (Paris VI)
  17 *
  18 *  from
  19 *
  20 *  linux/fs/minix/inode.c
  21 *
  22 *  Copyright (C) 1991, 1992  Linus Torvalds
  23 *
  24 *  Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
  25 *  Big-endian to little-endian byte-swapping/bitmaps by
  26 *        David S. Miller (davem@caip.rutgers.edu), 1995
  27 */
  28
  29#include <linux/uaccess.h>
  30
  31#include <linux/errno.h>
  32#include <linux/fs.h>
  33#include <linux/time.h>
  34#include <linux/stat.h>
  35#include <linux/string.h>
  36#include <linux/mm.h>
  37#include <linux/buffer_head.h>
  38#include <linux/writeback.h>
  39#include <linux/iversion.h>
  40
  41#include "ufs_fs.h"
  42#include "ufs.h"
  43#include "swab.h"
  44#include "util.h"
  45
  46static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4])
  47{
  48	struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
  49	int ptrs = uspi->s_apb;
  50	int ptrs_bits = uspi->s_apbshift;
  51	const long direct_blocks = UFS_NDADDR,
  52		indirect_blocks = ptrs,
  53		double_blocks = (1 << (ptrs_bits * 2));
  54	int n = 0;
  55
  56
  57	UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
  58	if (i_block < direct_blocks) {
  59		offsets[n++] = i_block;
  60	} else if ((i_block -= direct_blocks) < indirect_blocks) {
  61		offsets[n++] = UFS_IND_BLOCK;
  62		offsets[n++] = i_block;
  63	} else if ((i_block -= indirect_blocks) < double_blocks) {
  64		offsets[n++] = UFS_DIND_BLOCK;
  65		offsets[n++] = i_block >> ptrs_bits;
  66		offsets[n++] = i_block & (ptrs - 1);
  67	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
  68		offsets[n++] = UFS_TIND_BLOCK;
  69		offsets[n++] = i_block >> (ptrs_bits * 2);
  70		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
  71		offsets[n++] = i_block & (ptrs - 1);
  72	} else {
  73		ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
  74	}
  75	return n;
  76}
  77
  78typedef struct {
  79	void	*p;
  80	union {
  81		__fs32	key32;
  82		__fs64	key64;
  83	};
  84	struct buffer_head *bh;
  85} Indirect;
  86
  87static inline int grow_chain32(struct ufs_inode_info *ufsi,
  88			       struct buffer_head *bh, __fs32 *v,
  89			       Indirect *from, Indirect *to)
  90{
  91	Indirect *p;
  92	unsigned seq;
  93	to->bh = bh;
  94	do {
  95		seq = read_seqbegin(&ufsi->meta_lock);
  96		to->key32 = *(__fs32 *)(to->p = v);
  97		for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++)
  98			;
  99	} while (read_seqretry(&ufsi->meta_lock, seq));
 100	return (p > to);
 101}
 102
 103static inline int grow_chain64(struct ufs_inode_info *ufsi,
 104			       struct buffer_head *bh, __fs64 *v,
 105			       Indirect *from, Indirect *to)
 106{
 107	Indirect *p;
 108	unsigned seq;
 109	to->bh = bh;
 110	do {
 111		seq = read_seqbegin(&ufsi->meta_lock);
 112		to->key64 = *(__fs64 *)(to->p = v);
 113		for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++)
 114			;
 115	} while (read_seqretry(&ufsi->meta_lock, seq));
 116	return (p > to);
 117}
 118
 119/*
 120 * Returns the location of the fragment from
 121 * the beginning of the filesystem.
 122 */
 123
 124static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth)
 125{
 126	struct ufs_inode_info *ufsi = UFS_I(inode);
 127	struct super_block *sb = inode->i_sb;
 128	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 129	u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
 130	int shift = uspi->s_apbshift-uspi->s_fpbshift;
 131	Indirect chain[4], *q = chain;
 132	unsigned *p;
 133	unsigned flags = UFS_SB(sb)->s_flags;
 134	u64 res = 0;
 135
 136	UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
 137		uspi->s_fpbshift, uspi->s_apbmask,
 138		(unsigned long long)mask);
 139
 140	if (depth == 0)
 141		goto no_block;
 142
 143again:
 144	p = offsets;
 145
 146	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
 147		goto ufs2;
 148
 149	if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q))
 150		goto changed;
 151	if (!q->key32)
 152		goto no_block;
 153	while (--depth) {
 154		__fs32 *ptr;
 155		struct buffer_head *bh;
 156		unsigned n = *p++;
 157
 158		bh = sb_bread(sb, uspi->s_sbbase +
 159				  fs32_to_cpu(sb, q->key32) + (n>>shift));
 160		if (!bh)
 161			goto no_block;
 162		ptr = (__fs32 *)bh->b_data + (n & mask);
 163		if (!grow_chain32(ufsi, bh, ptr, chain, ++q))
 164			goto changed;
 165		if (!q->key32)
 166			goto no_block;
 167	}
 168	res = fs32_to_cpu(sb, q->key32);
 169	goto found;
 170
 171ufs2:
 172	if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q))
 173		goto changed;
 174	if (!q->key64)
 175		goto no_block;
 176
 177	while (--depth) {
 178		__fs64 *ptr;
 179		struct buffer_head *bh;
 180		unsigned n = *p++;
 181
 182		bh = sb_bread(sb, uspi->s_sbbase +
 183				  fs64_to_cpu(sb, q->key64) + (n>>shift));
 184		if (!bh)
 185			goto no_block;
 186		ptr = (__fs64 *)bh->b_data + (n & mask);
 187		if (!grow_chain64(ufsi, bh, ptr, chain, ++q))
 188			goto changed;
 189		if (!q->key64)
 190			goto no_block;
 191	}
 192	res = fs64_to_cpu(sb, q->key64);
 193found:
 194	res += uspi->s_sbbase;
 195no_block:
 196	while (q > chain) {
 197		brelse(q->bh);
 198		q--;
 199	}
 200	return res;
 201
 202changed:
 203	while (q > chain) {
 204		brelse(q->bh);
 205		q--;
 206	}
 207	goto again;
 208}
 209
 210/*
 211 * Unpacking tails: we have a file with partial final block and
 212 * we had been asked to extend it.  If the fragment being written
 213 * is within the same block, we need to extend the tail just to cover
 214 * that fragment.  Otherwise the tail is extended to full block.
 215 *
 216 * Note that we might need to create a _new_ tail, but that will
 217 * be handled elsewhere; this is strictly for resizing old
 218 * ones.
 219 */
 220static bool
 221ufs_extend_tail(struct inode *inode, u64 writes_to,
 222		  int *err, struct page *locked_page)
 223{
 224	struct ufs_inode_info *ufsi = UFS_I(inode);
 225	struct super_block *sb = inode->i_sb;
 226	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 227	unsigned lastfrag = ufsi->i_lastfrag;	/* it's a short file, so unsigned is enough */
 228	unsigned block = ufs_fragstoblks(lastfrag);
 229	unsigned new_size;
 230	void *p;
 231	u64 tmp;
 232
 233	if (writes_to < (lastfrag | uspi->s_fpbmask))
 234		new_size = (writes_to & uspi->s_fpbmask) + 1;
 235	else
 236		new_size = uspi->s_fpb;
 237
 238	p = ufs_get_direct_data_ptr(uspi, ufsi, block);
 239	tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
 240				new_size - (lastfrag & uspi->s_fpbmask), err,
 241				locked_page);
 242	return tmp != 0;
 243}
 244
 245/**
 246 * ufs_inode_getfrag() - allocate new fragment(s)
 247 * @inode: pointer to inode
 248 * @index: number of block pointer within the inode's array.
 249 * @new_fragment: number of new allocated fragment(s)
 250 * @err: we set it if something wrong
 251 * @new: we set it if we allocate new block
 252 * @locked_page: for ufs_new_fragments()
 253 */
 254static u64
 255ufs_inode_getfrag(struct inode *inode, unsigned index,
 256		  sector_t new_fragment, int *err,
 257		  int *new, struct page *locked_page)
 258{
 259	struct ufs_inode_info *ufsi = UFS_I(inode);
 260	struct super_block *sb = inode->i_sb;
 261	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 262	u64 tmp, goal, lastfrag;
 263	unsigned nfrags = uspi->s_fpb;
 264	void *p;
 265
 266        /* TODO : to be done for write support
 267        if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
 268             goto ufs2;
 269         */
 270
 271	p = ufs_get_direct_data_ptr(uspi, ufsi, index);
 272	tmp = ufs_data_ptr_to_cpu(sb, p);
 273	if (tmp)
 274		goto out;
 275
 276	lastfrag = ufsi->i_lastfrag;
 277
 278	/* will that be a new tail? */
 279	if (new_fragment < UFS_NDIR_FRAGMENT && new_fragment >= lastfrag)
 280		nfrags = (new_fragment & uspi->s_fpbmask) + 1;
 281
 282	goal = 0;
 283	if (index) {
 284		goal = ufs_data_ptr_to_cpu(sb,
 285				 ufs_get_direct_data_ptr(uspi, ufsi, index - 1));
 286		if (goal)
 287			goal += uspi->s_fpb;
 288	}
 289	tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
 290				goal, nfrags, err, locked_page);
 291
 292	if (!tmp) {
 293		*err = -ENOSPC;
 294		return 0;
 295	}
 296
 297	if (new)
 298		*new = 1;
 299	inode->i_ctime = current_time(inode);
 300	if (IS_SYNC(inode))
 301		ufs_sync_inode (inode);
 302	mark_inode_dirty(inode);
 303out:
 304	return tmp + uspi->s_sbbase;
 305
 306     /* This part : To be implemented ....
 307        Required only for writing, not required for READ-ONLY.
 308ufs2:
 309
 310	u2_block = ufs_fragstoblks(fragment);
 311	u2_blockoff = ufs_fragnum(fragment);
 312	p = ufsi->i_u1.u2_i_data + block;
 313	goal = 0;
 314
 315repeat2:
 316	tmp = fs32_to_cpu(sb, *p);
 317	lastfrag = ufsi->i_lastfrag;
 318
 319     */
 320}
 321
 322/**
 323 * ufs_inode_getblock() - allocate new block
 324 * @inode: pointer to inode
 325 * @ind_block: block number of the indirect block
 326 * @index: number of pointer within the indirect block
 327 * @new_fragment: number of new allocated fragment
 328 *  (block will hold this fragment and also uspi->s_fpb-1)
 329 * @err: see ufs_inode_getfrag()
 330 * @new: see ufs_inode_getfrag()
 331 * @locked_page: see ufs_inode_getfrag()
 332 */
 333static u64
 334ufs_inode_getblock(struct inode *inode, u64 ind_block,
 335		  unsigned index, sector_t new_fragment, int *err,
 336		  int *new, struct page *locked_page)
 337{
 338	struct super_block *sb = inode->i_sb;
 339	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 340	int shift = uspi->s_apbshift - uspi->s_fpbshift;
 341	u64 tmp = 0, goal;
 342	struct buffer_head *bh;
 343	void *p;
 344
 345	if (!ind_block)
 346		return 0;
 347
 348	bh = sb_bread(sb, ind_block + (index >> shift));
 349	if (unlikely(!bh)) {
 350		*err = -EIO;
 351		return 0;
 352	}
 353
 354	index &= uspi->s_apbmask >> uspi->s_fpbshift;
 355	if (uspi->fs_magic == UFS2_MAGIC)
 356		p = (__fs64 *)bh->b_data + index;
 357	else
 358		p = (__fs32 *)bh->b_data + index;
 359
 360	tmp = ufs_data_ptr_to_cpu(sb, p);
 361	if (tmp)
 362		goto out;
 363
 364	if (index && (uspi->fs_magic == UFS2_MAGIC ?
 365		      (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[index-1])) :
 366		      (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[index-1]))))
 367		goal = tmp + uspi->s_fpb;
 368	else
 369		goal = bh->b_blocknr + uspi->s_fpb;
 370	tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
 371				uspi->s_fpb, err, locked_page);
 372	if (!tmp)
 373		goto out;
 374
 375	if (new)
 376		*new = 1;
 377
 378	mark_buffer_dirty(bh);
 379	if (IS_SYNC(inode))
 380		sync_dirty_buffer(bh);
 381	inode->i_ctime = current_time(inode);
 382	mark_inode_dirty(inode);
 383out:
 384	brelse (bh);
 385	UFSD("EXIT\n");
 386	if (tmp)
 387		tmp += uspi->s_sbbase;
 388	return tmp;
 389}
 390
 391/**
 392 * ufs_getfrag_block() - `get_block_t' function, interface between UFS and
 393 * readpage, writepage and so on
 394 */
 395
 396static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
 397{
 398	struct super_block *sb = inode->i_sb;
 399	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 400	int err = 0, new = 0;
 401	unsigned offsets[4];
 402	int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets);
 403	u64 phys64 = 0;
 404	unsigned frag = fragment & uspi->s_fpbmask;
 405
 406	phys64 = ufs_frag_map(inode, offsets, depth);
 407	if (!create)
 408		goto done;
 409
 410	if (phys64) {
 411		if (fragment >= UFS_NDIR_FRAGMENT)
 412			goto done;
 413		read_seqlock_excl(&UFS_I(inode)->meta_lock);
 414		if (fragment < UFS_I(inode)->i_lastfrag) {
 415			read_sequnlock_excl(&UFS_I(inode)->meta_lock);
 416			goto done;
 417		}
 418		read_sequnlock_excl(&UFS_I(inode)->meta_lock);
 419	}
 420        /* This code entered only while writing ....? */
 421
 422	mutex_lock(&UFS_I(inode)->truncate_mutex);
 423
 424	UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
 425	if (unlikely(!depth)) {
 426		ufs_warning(sb, "ufs_get_block", "block > big");
 427		err = -EIO;
 428		goto out;
 429	}
 430
 431	if (UFS_I(inode)->i_lastfrag < UFS_NDIR_FRAGMENT) {
 432		unsigned lastfrag = UFS_I(inode)->i_lastfrag;
 433		unsigned tailfrags = lastfrag & uspi->s_fpbmask;
 434		if (tailfrags && fragment >= lastfrag) {
 435			if (!ufs_extend_tail(inode, fragment,
 436					     &err, bh_result->b_page))
 437				goto out;
 438		}
 439	}
 440
 441	if (depth == 1) {
 442		phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
 443					   &err, &new, bh_result->b_page);
 444	} else {
 445		int i;
 446		phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
 447					   &err, NULL, NULL);
 448		for (i = 1; i < depth - 1; i++)
 449			phys64 = ufs_inode_getblock(inode, phys64, offsets[i],
 450						fragment, &err, NULL, NULL);
 451		phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 1],
 452					fragment, &err, &new, bh_result->b_page);
 453	}
 454out:
 455	if (phys64) {
 456		phys64 += frag;
 457		map_bh(bh_result, sb, phys64);
 458		if (new)
 459			set_buffer_new(bh_result);
 460	}
 461	mutex_unlock(&UFS_I(inode)->truncate_mutex);
 462	return err;
 463
 464done:
 465	if (phys64)
 466		map_bh(bh_result, sb, phys64 + frag);
 467	return 0;
 468}
 469
 470static int ufs_writepage(struct page *page, struct writeback_control *wbc)
 471{
 472	return block_write_full_page(page,ufs_getfrag_block,wbc);
 473}
 474
 475static int ufs_readpage(struct file *file, struct page *page)
 476{
 477	return block_read_full_page(page,ufs_getfrag_block);
 478}
 479
 480int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
 481{
 482	return __block_write_begin(page, pos, len, ufs_getfrag_block);
 483}
 484
 485static void ufs_truncate_blocks(struct inode *);
 486
 487static void ufs_write_failed(struct address_space *mapping, loff_t to)
 488{
 489	struct inode *inode = mapping->host;
 490
 491	if (to > inode->i_size) {
 492		truncate_pagecache(inode, inode->i_size);
 493		ufs_truncate_blocks(inode);
 494	}
 495}
 496
 497static int ufs_write_begin(struct file *file, struct address_space *mapping,
 498			loff_t pos, unsigned len, unsigned flags,
 499			struct page **pagep, void **fsdata)
 500{
 501	int ret;
 502
 503	ret = block_write_begin(mapping, pos, len, flags, pagep,
 504				ufs_getfrag_block);
 505	if (unlikely(ret))
 506		ufs_write_failed(mapping, pos + len);
 507
 508	return ret;
 509}
 510
 511static int ufs_write_end(struct file *file, struct address_space *mapping,
 512			loff_t pos, unsigned len, unsigned copied,
 513			struct page *page, void *fsdata)
 514{
 515	int ret;
 516
 517	ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
 518	if (ret < len)
 519		ufs_write_failed(mapping, pos + len);
 520	return ret;
 521}
 522
 523static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
 524{
 525	return generic_block_bmap(mapping,block,ufs_getfrag_block);
 526}
 527
 528const struct address_space_operations ufs_aops = {
 529	.set_page_dirty = __set_page_dirty_buffers,
 530	.readpage = ufs_readpage,
 531	.writepage = ufs_writepage,
 532	.write_begin = ufs_write_begin,
 533	.write_end = ufs_write_end,
 534	.bmap = ufs_bmap
 535};
 536
 537static void ufs_set_inode_ops(struct inode *inode)
 538{
 539	if (S_ISREG(inode->i_mode)) {
 540		inode->i_op = &ufs_file_inode_operations;
 541		inode->i_fop = &ufs_file_operations;
 542		inode->i_mapping->a_ops = &ufs_aops;
 543	} else if (S_ISDIR(inode->i_mode)) {
 544		inode->i_op = &ufs_dir_inode_operations;
 545		inode->i_fop = &ufs_dir_operations;
 546		inode->i_mapping->a_ops = &ufs_aops;
 547	} else if (S_ISLNK(inode->i_mode)) {
 548		if (!inode->i_blocks) {
 549			inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink;
 550			inode->i_op = &simple_symlink_inode_operations;
 551		} else {
 552			inode->i_mapping->a_ops = &ufs_aops;
 553			inode->i_op = &page_symlink_inode_operations;
 554			inode_nohighmem(inode);
 555		}
 556	} else
 557		init_special_inode(inode, inode->i_mode,
 558				   ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
 559}
 560
 561static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
 562{
 563	struct ufs_inode_info *ufsi = UFS_I(inode);
 564	struct super_block *sb = inode->i_sb;
 565	umode_t mode;
 566
 567	/*
 568	 * Copy data to the in-core inode.
 569	 */
 570	inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
 571	set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
 572	if (inode->i_nlink == 0)
 573		return -ESTALE;
 574
 575	/*
 576	 * Linux now has 32-bit uid and gid, so we can support EFT.
 577	 */
 578	i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode));
 579	i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
 580
 581	inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
 582	inode->i_atime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
 583	inode->i_ctime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
 584	inode->i_mtime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
 585	inode->i_mtime.tv_nsec = 0;
 586	inode->i_atime.tv_nsec = 0;
 587	inode->i_ctime.tv_nsec = 0;
 588	inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
 589	inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
 590	ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
 591	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
 592	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
 593
 594
 595	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
 596		memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
 597		       sizeof(ufs_inode->ui_u2.ui_addr));
 598	} else {
 599		memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
 600		       sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
 601		ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
 602	}
 603	return 0;
 604}
 605
 606static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
 607{
 608	struct ufs_inode_info *ufsi = UFS_I(inode);
 609	struct super_block *sb = inode->i_sb;
 610	umode_t mode;
 611
 612	UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
 613	/*
 614	 * Copy data to the in-core inode.
 615	 */
 616	inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
 617	set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
 618	if (inode->i_nlink == 0)
 619		return -ESTALE;
 620
 621        /*
 622         * Linux now has 32-bit uid and gid, so we can support EFT.
 623         */
 624	i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid));
 625	i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid));
 626
 627	inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
 628	inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime);
 629	inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime);
 630	inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime);
 631	inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec);
 632	inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec);
 633	inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec);
 634	inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
 635	inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
 636	ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
 637	/*
 638	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
 639	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
 640	*/
 641
 642	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
 643		memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
 644		       sizeof(ufs2_inode->ui_u2.ui_addr));
 645	} else {
 646		memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
 647		       sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
 648		ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
 649	}
 650	return 0;
 651}
 652
 653struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
 654{
 655	struct ufs_inode_info *ufsi;
 656	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 657	struct buffer_head * bh;
 658	struct inode *inode;
 659	int err = -EIO;
 660
 661	UFSD("ENTER, ino %lu\n", ino);
 662
 663	if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) {
 664		ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
 665			    ino);
 666		return ERR_PTR(-EIO);
 667	}
 668
 669	inode = iget_locked(sb, ino);
 670	if (!inode)
 671		return ERR_PTR(-ENOMEM);
 672	if (!(inode->i_state & I_NEW))
 673		return inode;
 674
 675	ufsi = UFS_I(inode);
 676
 677	bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
 678	if (!bh) {
 679		ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
 680			    inode->i_ino);
 681		goto bad_inode;
 682	}
 683	if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
 684		struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
 685
 686		err = ufs2_read_inode(inode,
 687				      ufs2_inode + ufs_inotofsbo(inode->i_ino));
 688	} else {
 689		struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
 690
 691		err = ufs1_read_inode(inode,
 692				      ufs_inode + ufs_inotofsbo(inode->i_ino));
 693	}
 694	brelse(bh);
 695	if (err)
 696		goto bad_inode;
 697
 698	inode_inc_iversion(inode);
 699	ufsi->i_lastfrag =
 700		(inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
 701	ufsi->i_dir_start_lookup = 0;
 702	ufsi->i_osync = 0;
 703
 704	ufs_set_inode_ops(inode);
 705
 706	UFSD("EXIT\n");
 707	unlock_new_inode(inode);
 708	return inode;
 709
 710bad_inode:
 711	iget_failed(inode);
 712	return ERR_PTR(err);
 713}
 714
 715static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
 716{
 717	struct super_block *sb = inode->i_sb;
 718 	struct ufs_inode_info *ufsi = UFS_I(inode);
 719
 720	ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
 721	ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
 722
 723	ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode));
 724	ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
 725
 726	ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
 727	ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
 728	ufs_inode->ui_atime.tv_usec = 0;
 729	ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec);
 730	ufs_inode->ui_ctime.tv_usec = 0;
 731	ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
 732	ufs_inode->ui_mtime.tv_usec = 0;
 733	ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
 734	ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
 735	ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
 736
 737	if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) {
 738		ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
 739		ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
 740	}
 741
 742	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
 743		/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
 744		ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
 745	} else if (inode->i_blocks) {
 746		memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
 747		       sizeof(ufs_inode->ui_u2.ui_addr));
 748	}
 749	else {
 750		memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
 751		       sizeof(ufs_inode->ui_u2.ui_symlink));
 752	}
 753
 754	if (!inode->i_nlink)
 755		memset (ufs_inode, 0, sizeof(struct ufs_inode));
 756}
 757
 758static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
 759{
 760	struct super_block *sb = inode->i_sb;
 761 	struct ufs_inode_info *ufsi = UFS_I(inode);
 762
 763	UFSD("ENTER\n");
 764	ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
 765	ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
 766
 767	ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode));
 768	ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode));
 769
 770	ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
 771	ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
 772	ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec);
 773	ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec);
 774	ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec);
 775	ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec);
 776	ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec);
 777
 778	ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
 779	ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
 780	ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
 781
 782	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
 783		/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
 784		ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
 785	} else if (inode->i_blocks) {
 786		memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
 787		       sizeof(ufs_inode->ui_u2.ui_addr));
 788	} else {
 789		memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
 790		       sizeof(ufs_inode->ui_u2.ui_symlink));
 791 	}
 792
 793	if (!inode->i_nlink)
 794		memset (ufs_inode, 0, sizeof(struct ufs2_inode));
 795	UFSD("EXIT\n");
 796}
 797
 798static int ufs_update_inode(struct inode * inode, int do_sync)
 799{
 800	struct super_block *sb = inode->i_sb;
 801	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 802	struct buffer_head * bh;
 803
 804	UFSD("ENTER, ino %lu\n", inode->i_ino);
 805
 806	if (inode->i_ino < UFS_ROOTINO ||
 807	    inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
 808		ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
 809		return -1;
 810	}
 811
 812	bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
 813	if (!bh) {
 814		ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
 815		return -1;
 816	}
 817	if (uspi->fs_magic == UFS2_MAGIC) {
 818		struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
 819
 820		ufs2_update_inode(inode,
 821				  ufs2_inode + ufs_inotofsbo(inode->i_ino));
 822	} else {
 823		struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data;
 824
 825		ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
 826	}
 827
 828	mark_buffer_dirty(bh);
 829	if (do_sync)
 830		sync_dirty_buffer(bh);
 831	brelse (bh);
 832
 833	UFSD("EXIT\n");
 834	return 0;
 835}
 836
 837int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
 838{
 839	return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
 840}
 841
 842int ufs_sync_inode (struct inode *inode)
 843{
 844	return ufs_update_inode (inode, 1);
 845}
 846
 847void ufs_evict_inode(struct inode * inode)
 848{
 849	int want_delete = 0;
 850
 851	if (!inode->i_nlink && !is_bad_inode(inode))
 852		want_delete = 1;
 853
 854	truncate_inode_pages_final(&inode->i_data);
 855	if (want_delete) {
 856		inode->i_size = 0;
 857		if (inode->i_blocks &&
 858		    (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 859		     S_ISLNK(inode->i_mode)))
 860			ufs_truncate_blocks(inode);
 861		ufs_update_inode(inode, inode_needs_sync(inode));
 862	}
 863
 864	invalidate_inode_buffers(inode);
 865	clear_inode(inode);
 866
 867	if (want_delete)
 868		ufs_free_inode(inode);
 869}
 870
 871struct to_free {
 872	struct inode *inode;
 873	u64 to;
 874	unsigned count;
 875};
 876
 877static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
 878{
 879	if (ctx->count && ctx->to != from) {
 880		ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count);
 881		ctx->count = 0;
 882	}
 883	ctx->count += count;
 884	ctx->to = from + count;
 885}
 886
 887#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
 888
 889static void ufs_trunc_direct(struct inode *inode)
 890{
 891	struct ufs_inode_info *ufsi = UFS_I(inode);
 892	struct super_block * sb;
 893	struct ufs_sb_private_info * uspi;
 894	void *p;
 895	u64 frag1, frag2, frag3, frag4, block1, block2;
 896	struct to_free ctx = {.inode = inode};
 897	unsigned i, tmp;
 898
 899	UFSD("ENTER: ino %lu\n", inode->i_ino);
 900
 901	sb = inode->i_sb;
 902	uspi = UFS_SB(sb)->s_uspi;
 903
 904	frag1 = DIRECT_FRAGMENT;
 905	frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
 906	frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
 907	frag3 = frag4 & ~uspi->s_fpbmask;
 908	block1 = block2 = 0;
 909	if (frag2 > frag3) {
 910		frag2 = frag4;
 911		frag3 = frag4 = 0;
 912	} else if (frag2 < frag3) {
 913		block1 = ufs_fragstoblks (frag2);
 914		block2 = ufs_fragstoblks (frag3);
 915	}
 916
 917	UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
 918	     " frag3 %llu, frag4 %llu\n", inode->i_ino,
 919	     (unsigned long long)frag1, (unsigned long long)frag2,
 920	     (unsigned long long)block1, (unsigned long long)block2,
 921	     (unsigned long long)frag3, (unsigned long long)frag4);
 922
 923	if (frag1 >= frag2)
 924		goto next1;
 925
 926	/*
 927	 * Free first free fragments
 928	 */
 929	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1));
 930	tmp = ufs_data_ptr_to_cpu(sb, p);
 931	if (!tmp )
 932		ufs_panic (sb, "ufs_trunc_direct", "internal error");
 933	frag2 -= frag1;
 934	frag1 = ufs_fragnum (frag1);
 935
 936	ufs_free_fragments(inode, tmp + frag1, frag2);
 937
 938next1:
 939	/*
 940	 * Free whole blocks
 941	 */
 942	for (i = block1 ; i < block2; i++) {
 943		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
 944		tmp = ufs_data_ptr_to_cpu(sb, p);
 945		if (!tmp)
 946			continue;
 947		write_seqlock(&ufsi->meta_lock);
 948		ufs_data_ptr_clear(uspi, p);
 949		write_sequnlock(&ufsi->meta_lock);
 950
 951		free_data(&ctx, tmp, uspi->s_fpb);
 952	}
 953
 954	free_data(&ctx, 0, 0);
 955
 956	if (frag3 >= frag4)
 957		goto next3;
 958
 959	/*
 960	 * Free last free fragments
 961	 */
 962	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3));
 963	tmp = ufs_data_ptr_to_cpu(sb, p);
 964	if (!tmp )
 965		ufs_panic(sb, "ufs_truncate_direct", "internal error");
 966	frag4 = ufs_fragnum (frag4);
 967	write_seqlock(&ufsi->meta_lock);
 968	ufs_data_ptr_clear(uspi, p);
 969	write_sequnlock(&ufsi->meta_lock);
 970
 971	ufs_free_fragments (inode, tmp, frag4);
 972 next3:
 973
 974	UFSD("EXIT: ino %lu\n", inode->i_ino);
 975}
 976
 977static void free_full_branch(struct inode *inode, u64 ind_block, int depth)
 978{
 979	struct super_block *sb = inode->i_sb;
 980	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 981	struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize);
 982	unsigned i;
 983
 984	if (!ubh)
 985		return;
 986
 987	if (--depth) {
 988		for (i = 0; i < uspi->s_apb; i++) {
 989			void *p = ubh_get_data_ptr(uspi, ubh, i);
 990			u64 block = ufs_data_ptr_to_cpu(sb, p);
 991			if (block)
 992				free_full_branch(inode, block, depth);
 993		}
 994	} else {
 995		struct to_free ctx = {.inode = inode};
 996
 997		for (i = 0; i < uspi->s_apb; i++) {
 998			void *p = ubh_get_data_ptr(uspi, ubh, i);
 999			u64 block = ufs_data_ptr_to_cpu(sb, p);
1000			if (block)
1001				free_data(&ctx, block, uspi->s_fpb);
1002		}
1003		free_data(&ctx, 0, 0);
1004	}
1005
1006	ubh_bforget(ubh);
1007	ufs_free_blocks(inode, ind_block, uspi->s_fpb);
1008}
1009
1010static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth)
1011{
1012	struct super_block *sb = inode->i_sb;
1013	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1014	unsigned i;
1015
1016	if (--depth) {
1017		for (i = from; i < uspi->s_apb ; i++) {
1018			void *p = ubh_get_data_ptr(uspi, ubh, i);
1019			u64 block = ufs_data_ptr_to_cpu(sb, p);
1020			if (block) {
1021				write_seqlock(&UFS_I(inode)->meta_lock);
1022				ufs_data_ptr_clear(uspi, p);
1023				write_sequnlock(&UFS_I(inode)->meta_lock);
1024				ubh_mark_buffer_dirty(ubh);
1025				free_full_branch(inode, block, depth);
1026			}
1027		}
1028	} else {
1029		struct to_free ctx = {.inode = inode};
1030
1031		for (i = from; i < uspi->s_apb; i++) {
1032			void *p = ubh_get_data_ptr(uspi, ubh, i);
1033			u64 block = ufs_data_ptr_to_cpu(sb, p);
1034			if (block) {
1035				write_seqlock(&UFS_I(inode)->meta_lock);
1036				ufs_data_ptr_clear(uspi, p);
1037				write_sequnlock(&UFS_I(inode)->meta_lock);
1038				ubh_mark_buffer_dirty(ubh);
1039				free_data(&ctx, block, uspi->s_fpb);
1040			}
1041		}
1042		free_data(&ctx, 0, 0);
1043	}
1044	if (IS_SYNC(inode) && ubh_buffer_dirty(ubh))
1045		ubh_sync_block(ubh);
1046	ubh_brelse(ubh);
1047}
1048
1049static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
1050{
1051	int err = 0;
1052	struct super_block *sb = inode->i_sb;
1053	struct address_space *mapping = inode->i_mapping;
1054	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1055	unsigned i, end;
1056	sector_t lastfrag;
1057	struct page *lastpage;
1058	struct buffer_head *bh;
1059	u64 phys64;
1060
1061	lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
1062
1063	if (!lastfrag)
1064		goto out;
1065
1066	lastfrag--;
1067
1068	lastpage = ufs_get_locked_page(mapping, lastfrag >>
1069				       (PAGE_SHIFT - inode->i_blkbits));
1070       if (IS_ERR(lastpage)) {
1071               err = -EIO;
1072               goto out;
1073       }
1074
1075       end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1);
1076       bh = page_buffers(lastpage);
1077       for (i = 0; i < end; ++i)
1078               bh = bh->b_this_page;
1079
1080
1081       err = ufs_getfrag_block(inode, lastfrag, bh, 1);
1082
1083       if (unlikely(err))
1084	       goto out_unlock;
1085
1086       if (buffer_new(bh)) {
1087	       clear_buffer_new(bh);
1088	       clean_bdev_bh_alias(bh);
1089	       /*
1090		* we do not zeroize fragment, because of
1091		* if it maped to hole, it already contains zeroes
1092		*/
1093	       set_buffer_uptodate(bh);
1094	       mark_buffer_dirty(bh);
1095	       set_page_dirty(lastpage);
1096       }
1097
1098       if (lastfrag >= UFS_IND_FRAGMENT) {
1099	       end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
1100	       phys64 = bh->b_blocknr + 1;
1101	       for (i = 0; i < end; ++i) {
1102		       bh = sb_getblk(sb, i + phys64);
1103		       lock_buffer(bh);
1104		       memset(bh->b_data, 0, sb->s_blocksize);
1105		       set_buffer_uptodate(bh);
1106		       mark_buffer_dirty(bh);
1107		       unlock_buffer(bh);
1108		       sync_dirty_buffer(bh);
1109		       brelse(bh);
1110	       }
1111       }
1112out_unlock:
1113       ufs_put_locked_page(lastpage);
1114out:
1115       return err;
1116}
1117
1118static void ufs_truncate_blocks(struct inode *inode)
1119{
1120	struct ufs_inode_info *ufsi = UFS_I(inode);
1121	struct super_block *sb = inode->i_sb;
1122	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1123	unsigned offsets[4];
1124	int depth;
1125	int depth2;
1126	unsigned i;
1127	struct ufs_buffer_head *ubh[3];
1128	void *p;
1129	u64 block;
1130
1131	if (inode->i_size) {
1132		sector_t last = (inode->i_size - 1) >> uspi->s_bshift;
1133		depth = ufs_block_to_path(inode, last, offsets);
1134		if (!depth)
1135			return;
1136	} else {
1137		depth = 1;
1138	}
1139
1140	for (depth2 = depth - 1; depth2; depth2--)
1141		if (offsets[depth2] != uspi->s_apb - 1)
1142			break;
1143
1144	mutex_lock(&ufsi->truncate_mutex);
1145	if (depth == 1) {
1146		ufs_trunc_direct(inode);
1147		offsets[0] = UFS_IND_BLOCK;
1148	} else {
1149		/* get the blocks that should be partially emptied */
1150		p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++);
1151		for (i = 0; i < depth2; i++) {
1152			block = ufs_data_ptr_to_cpu(sb, p);
1153			if (!block)
1154				break;
1155			ubh[i] = ubh_bread(sb, block, uspi->s_bsize);
1156			if (!ubh[i]) {
1157				write_seqlock(&ufsi->meta_lock);
1158				ufs_data_ptr_clear(uspi, p);
1159				write_sequnlock(&ufsi->meta_lock);
1160				break;
1161			}
1162			p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++);
1163		}
1164		while (i--)
1165			free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
1166	}
1167	for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) {
1168		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
1169		block = ufs_data_ptr_to_cpu(sb, p);
1170		if (block) {
1171			write_seqlock(&ufsi->meta_lock);
1172			ufs_data_ptr_clear(uspi, p);
1173			write_sequnlock(&ufsi->meta_lock);
1174			free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
1175		}
1176	}
1177	read_seqlock_excl(&ufsi->meta_lock);
1178	ufsi->i_lastfrag = DIRECT_FRAGMENT;
1179	read_sequnlock_excl(&ufsi->meta_lock);
1180	mark_inode_dirty(inode);
1181	mutex_unlock(&ufsi->truncate_mutex);
1182}
1183
1184static int ufs_truncate(struct inode *inode, loff_t size)
1185{
1186	int err = 0;
1187
1188	UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
1189	     inode->i_ino, (unsigned long long)size,
1190	     (unsigned long long)i_size_read(inode));
1191
1192	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1193	      S_ISLNK(inode->i_mode)))
1194		return -EINVAL;
1195	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1196		return -EPERM;
1197
1198	err = ufs_alloc_lastblock(inode, size);
1199
1200	if (err)
1201		goto out;
1202
1203	block_truncate_page(inode->i_mapping, size, ufs_getfrag_block);
1204
1205	truncate_setsize(inode, size);
1206
1207	ufs_truncate_blocks(inode);
1208	inode->i_mtime = inode->i_ctime = current_time(inode);
1209	mark_inode_dirty(inode);
1210out:
1211	UFSD("EXIT: err %d\n", err);
1212	return err;
1213}
1214
1215int ufs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
1216		struct iattr *attr)
1217{
1218	struct inode *inode = d_inode(dentry);
1219	unsigned int ia_valid = attr->ia_valid;
1220	int error;
1221
1222	error = setattr_prepare(&init_user_ns, dentry, attr);
1223	if (error)
1224		return error;
1225
1226	if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
1227		error = ufs_truncate(inode, attr->ia_size);
1228		if (error)
1229			return error;
1230	}
1231
1232	setattr_copy(&init_user_ns, inode, attr);
1233	mark_inode_dirty(inode);
1234	return 0;
1235}
1236
1237const struct inode_operations ufs_file_inode_operations = {
1238	.setattr = ufs_setattr,
1239};
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/fs/ufs/inode.c
   4 *
   5 * Copyright (C) 1998
   6 * Daniel Pirkl <daniel.pirkl@email.cz>
   7 * Charles University, Faculty of Mathematics and Physics
   8 *
   9 *  from
  10 *
  11 *  linux/fs/ext2/inode.c
  12 *
  13 * Copyright (C) 1992, 1993, 1994, 1995
  14 * Remy Card (card@masi.ibp.fr)
  15 * Laboratoire MASI - Institut Blaise Pascal
  16 * Universite Pierre et Marie Curie (Paris VI)
  17 *
  18 *  from
  19 *
  20 *  linux/fs/minix/inode.c
  21 *
  22 *  Copyright (C) 1991, 1992  Linus Torvalds
  23 *
  24 *  Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
  25 *  Big-endian to little-endian byte-swapping/bitmaps by
  26 *        David S. Miller (davem@caip.rutgers.edu), 1995
  27 */
  28
  29#include <linux/uaccess.h>
  30
  31#include <linux/errno.h>
  32#include <linux/fs.h>
  33#include <linux/time.h>
  34#include <linux/stat.h>
  35#include <linux/string.h>
  36#include <linux/mm.h>
  37#include <linux/buffer_head.h>
  38#include <linux/writeback.h>
  39#include <linux/iversion.h>
  40
  41#include "ufs_fs.h"
  42#include "ufs.h"
  43#include "swab.h"
  44#include "util.h"
  45
  46static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4])
  47{
  48	struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
  49	int ptrs = uspi->s_apb;
  50	int ptrs_bits = uspi->s_apbshift;
  51	const long direct_blocks = UFS_NDADDR,
  52		indirect_blocks = ptrs,
  53		double_blocks = (1 << (ptrs_bits * 2));
  54	int n = 0;
  55
  56
  57	UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
  58	if (i_block < direct_blocks) {
  59		offsets[n++] = i_block;
  60	} else if ((i_block -= direct_blocks) < indirect_blocks) {
  61		offsets[n++] = UFS_IND_BLOCK;
  62		offsets[n++] = i_block;
  63	} else if ((i_block -= indirect_blocks) < double_blocks) {
  64		offsets[n++] = UFS_DIND_BLOCK;
  65		offsets[n++] = i_block >> ptrs_bits;
  66		offsets[n++] = i_block & (ptrs - 1);
  67	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
  68		offsets[n++] = UFS_TIND_BLOCK;
  69		offsets[n++] = i_block >> (ptrs_bits * 2);
  70		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
  71		offsets[n++] = i_block & (ptrs - 1);
  72	} else {
  73		ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
  74	}
  75	return n;
  76}
  77
  78typedef struct {
  79	void	*p;
  80	union {
  81		__fs32	key32;
  82		__fs64	key64;
  83	};
  84	struct buffer_head *bh;
  85} Indirect;
  86
  87static inline int grow_chain32(struct ufs_inode_info *ufsi,
  88			       struct buffer_head *bh, __fs32 *v,
  89			       Indirect *from, Indirect *to)
  90{
  91	Indirect *p;
  92	unsigned seq;
  93	to->bh = bh;
  94	do {
  95		seq = read_seqbegin(&ufsi->meta_lock);
  96		to->key32 = *(__fs32 *)(to->p = v);
  97		for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++)
  98			;
  99	} while (read_seqretry(&ufsi->meta_lock, seq));
 100	return (p > to);
 101}
 102
 103static inline int grow_chain64(struct ufs_inode_info *ufsi,
 104			       struct buffer_head *bh, __fs64 *v,
 105			       Indirect *from, Indirect *to)
 106{
 107	Indirect *p;
 108	unsigned seq;
 109	to->bh = bh;
 110	do {
 111		seq = read_seqbegin(&ufsi->meta_lock);
 112		to->key64 = *(__fs64 *)(to->p = v);
 113		for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++)
 114			;
 115	} while (read_seqretry(&ufsi->meta_lock, seq));
 116	return (p > to);
 117}
 118
 119/*
 120 * Returns the location of the fragment from
 121 * the beginning of the filesystem.
 122 */
 123
 124static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth)
 125{
 126	struct ufs_inode_info *ufsi = UFS_I(inode);
 127	struct super_block *sb = inode->i_sb;
 128	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 129	u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
 130	int shift = uspi->s_apbshift-uspi->s_fpbshift;
 131	Indirect chain[4], *q = chain;
 132	unsigned *p;
 133	unsigned flags = UFS_SB(sb)->s_flags;
 134	u64 res = 0;
 135
 136	UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
 137		uspi->s_fpbshift, uspi->s_apbmask,
 138		(unsigned long long)mask);
 139
 140	if (depth == 0)
 141		goto no_block;
 142
 143again:
 144	p = offsets;
 145
 146	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
 147		goto ufs2;
 148
 149	if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q))
 150		goto changed;
 151	if (!q->key32)
 152		goto no_block;
 153	while (--depth) {
 154		__fs32 *ptr;
 155		struct buffer_head *bh;
 156		unsigned n = *p++;
 157
 158		bh = sb_bread(sb, uspi->s_sbbase +
 159				  fs32_to_cpu(sb, q->key32) + (n>>shift));
 160		if (!bh)
 161			goto no_block;
 162		ptr = (__fs32 *)bh->b_data + (n & mask);
 163		if (!grow_chain32(ufsi, bh, ptr, chain, ++q))
 164			goto changed;
 165		if (!q->key32)
 166			goto no_block;
 167	}
 168	res = fs32_to_cpu(sb, q->key32);
 169	goto found;
 170
 171ufs2:
 172	if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q))
 173		goto changed;
 174	if (!q->key64)
 175		goto no_block;
 176
 177	while (--depth) {
 178		__fs64 *ptr;
 179		struct buffer_head *bh;
 180		unsigned n = *p++;
 181
 182		bh = sb_bread(sb, uspi->s_sbbase +
 183				  fs64_to_cpu(sb, q->key64) + (n>>shift));
 184		if (!bh)
 185			goto no_block;
 186		ptr = (__fs64 *)bh->b_data + (n & mask);
 187		if (!grow_chain64(ufsi, bh, ptr, chain, ++q))
 188			goto changed;
 189		if (!q->key64)
 190			goto no_block;
 191	}
 192	res = fs64_to_cpu(sb, q->key64);
 193found:
 194	res += uspi->s_sbbase;
 195no_block:
 196	while (q > chain) {
 197		brelse(q->bh);
 198		q--;
 199	}
 200	return res;
 201
 202changed:
 203	while (q > chain) {
 204		brelse(q->bh);
 205		q--;
 206	}
 207	goto again;
 208}
 209
 210/*
 211 * Unpacking tails: we have a file with partial final block and
 212 * we had been asked to extend it.  If the fragment being written
 213 * is within the same block, we need to extend the tail just to cover
 214 * that fragment.  Otherwise the tail is extended to full block.
 215 *
 216 * Note that we might need to create a _new_ tail, but that will
 217 * be handled elsewhere; this is strictly for resizing old
 218 * ones.
 219 */
 220static bool
 221ufs_extend_tail(struct inode *inode, u64 writes_to,
 222		  int *err, struct page *locked_page)
 223{
 224	struct ufs_inode_info *ufsi = UFS_I(inode);
 225	struct super_block *sb = inode->i_sb;
 226	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 227	unsigned lastfrag = ufsi->i_lastfrag;	/* it's a short file, so unsigned is enough */
 228	unsigned block = ufs_fragstoblks(lastfrag);
 229	unsigned new_size;
 230	void *p;
 231	u64 tmp;
 232
 233	if (writes_to < (lastfrag | uspi->s_fpbmask))
 234		new_size = (writes_to & uspi->s_fpbmask) + 1;
 235	else
 236		new_size = uspi->s_fpb;
 237
 238	p = ufs_get_direct_data_ptr(uspi, ufsi, block);
 239	tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
 240				new_size - (lastfrag & uspi->s_fpbmask), err,
 241				locked_page);
 242	return tmp != 0;
 243}
 244
 245/**
 246 * ufs_inode_getfrag() - allocate new fragment(s)
 247 * @inode: pointer to inode
 248 * @index: number of block pointer within the inode's array.
 249 * @new_fragment: number of new allocated fragment(s)
 250 * @err: we set it if something wrong
 251 * @new: we set it if we allocate new block
 252 * @locked_page: for ufs_new_fragments()
 253 */
 254static u64
 255ufs_inode_getfrag(struct inode *inode, unsigned index,
 256		  sector_t new_fragment, int *err,
 257		  int *new, struct page *locked_page)
 258{
 259	struct ufs_inode_info *ufsi = UFS_I(inode);
 260	struct super_block *sb = inode->i_sb;
 261	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 262	u64 tmp, goal, lastfrag;
 263	unsigned nfrags = uspi->s_fpb;
 264	void *p;
 265
 266        /* TODO : to be done for write support
 267        if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
 268             goto ufs2;
 269         */
 270
 271	p = ufs_get_direct_data_ptr(uspi, ufsi, index);
 272	tmp = ufs_data_ptr_to_cpu(sb, p);
 273	if (tmp)
 274		goto out;
 275
 276	lastfrag = ufsi->i_lastfrag;
 277
 278	/* will that be a new tail? */
 279	if (new_fragment < UFS_NDIR_FRAGMENT && new_fragment >= lastfrag)
 280		nfrags = (new_fragment & uspi->s_fpbmask) + 1;
 281
 282	goal = 0;
 283	if (index) {
 284		goal = ufs_data_ptr_to_cpu(sb,
 285				 ufs_get_direct_data_ptr(uspi, ufsi, index - 1));
 286		if (goal)
 287			goal += uspi->s_fpb;
 288	}
 289	tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
 290				goal, nfrags, err, locked_page);
 291
 292	if (!tmp) {
 293		*err = -ENOSPC;
 294		return 0;
 295	}
 296
 297	if (new)
 298		*new = 1;
 299	inode->i_ctime = current_time(inode);
 300	if (IS_SYNC(inode))
 301		ufs_sync_inode (inode);
 302	mark_inode_dirty(inode);
 303out:
 304	return tmp + uspi->s_sbbase;
 305
 306     /* This part : To be implemented ....
 307        Required only for writing, not required for READ-ONLY.
 308ufs2:
 309
 310	u2_block = ufs_fragstoblks(fragment);
 311	u2_blockoff = ufs_fragnum(fragment);
 312	p = ufsi->i_u1.u2_i_data + block;
 313	goal = 0;
 314
 315repeat2:
 316	tmp = fs32_to_cpu(sb, *p);
 317	lastfrag = ufsi->i_lastfrag;
 318
 319     */
 320}
 321
 322/**
 323 * ufs_inode_getblock() - allocate new block
 324 * @inode: pointer to inode
 325 * @ind_block: block number of the indirect block
 326 * @index: number of pointer within the indirect block
 327 * @new_fragment: number of new allocated fragment
 328 *  (block will hold this fragment and also uspi->s_fpb-1)
 329 * @err: see ufs_inode_getfrag()
 330 * @new: see ufs_inode_getfrag()
 331 * @locked_page: see ufs_inode_getfrag()
 332 */
 333static u64
 334ufs_inode_getblock(struct inode *inode, u64 ind_block,
 335		  unsigned index, sector_t new_fragment, int *err,
 336		  int *new, struct page *locked_page)
 337{
 338	struct super_block *sb = inode->i_sb;
 339	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 340	int shift = uspi->s_apbshift - uspi->s_fpbshift;
 341	u64 tmp = 0, goal;
 342	struct buffer_head *bh;
 343	void *p;
 344
 345	if (!ind_block)
 346		return 0;
 347
 348	bh = sb_bread(sb, ind_block + (index >> shift));
 349	if (unlikely(!bh)) {
 350		*err = -EIO;
 351		return 0;
 352	}
 353
 354	index &= uspi->s_apbmask >> uspi->s_fpbshift;
 355	if (uspi->fs_magic == UFS2_MAGIC)
 356		p = (__fs64 *)bh->b_data + index;
 357	else
 358		p = (__fs32 *)bh->b_data + index;
 359
 360	tmp = ufs_data_ptr_to_cpu(sb, p);
 361	if (tmp)
 362		goto out;
 363
 364	if (index && (uspi->fs_magic == UFS2_MAGIC ?
 365		      (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[index-1])) :
 366		      (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[index-1]))))
 367		goal = tmp + uspi->s_fpb;
 368	else
 369		goal = bh->b_blocknr + uspi->s_fpb;
 370	tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
 371				uspi->s_fpb, err, locked_page);
 372	if (!tmp)
 373		goto out;
 374
 375	if (new)
 376		*new = 1;
 377
 378	mark_buffer_dirty(bh);
 379	if (IS_SYNC(inode))
 380		sync_dirty_buffer(bh);
 381	inode->i_ctime = current_time(inode);
 382	mark_inode_dirty(inode);
 383out:
 384	brelse (bh);
 385	UFSD("EXIT\n");
 386	if (tmp)
 387		tmp += uspi->s_sbbase;
 388	return tmp;
 389}
 390
 391/**
 392 * ufs_getfrag_block() - `get_block_t' function, interface between UFS and
 393 * readpage, writepage and so on
 394 */
 395
 396static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
 397{
 398	struct super_block *sb = inode->i_sb;
 399	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 400	int err = 0, new = 0;
 401	unsigned offsets[4];
 402	int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets);
 403	u64 phys64 = 0;
 404	unsigned frag = fragment & uspi->s_fpbmask;
 405
 406	phys64 = ufs_frag_map(inode, offsets, depth);
 407	if (!create)
 408		goto done;
 409
 410	if (phys64) {
 411		if (fragment >= UFS_NDIR_FRAGMENT)
 412			goto done;
 413		read_seqlock_excl(&UFS_I(inode)->meta_lock);
 414		if (fragment < UFS_I(inode)->i_lastfrag) {
 415			read_sequnlock_excl(&UFS_I(inode)->meta_lock);
 416			goto done;
 417		}
 418		read_sequnlock_excl(&UFS_I(inode)->meta_lock);
 419	}
 420        /* This code entered only while writing ....? */
 421
 422	mutex_lock(&UFS_I(inode)->truncate_mutex);
 423
 424	UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
 425	if (unlikely(!depth)) {
 426		ufs_warning(sb, "ufs_get_block", "block > big");
 427		err = -EIO;
 428		goto out;
 429	}
 430
 431	if (UFS_I(inode)->i_lastfrag < UFS_NDIR_FRAGMENT) {
 432		unsigned lastfrag = UFS_I(inode)->i_lastfrag;
 433		unsigned tailfrags = lastfrag & uspi->s_fpbmask;
 434		if (tailfrags && fragment >= lastfrag) {
 435			if (!ufs_extend_tail(inode, fragment,
 436					     &err, bh_result->b_page))
 437				goto out;
 438		}
 439	}
 440
 441	if (depth == 1) {
 442		phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
 443					   &err, &new, bh_result->b_page);
 444	} else {
 445		int i;
 446		phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
 447					   &err, NULL, NULL);
 448		for (i = 1; i < depth - 1; i++)
 449			phys64 = ufs_inode_getblock(inode, phys64, offsets[i],
 450						fragment, &err, NULL, NULL);
 451		phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 1],
 452					fragment, &err, &new, bh_result->b_page);
 453	}
 454out:
 455	if (phys64) {
 456		phys64 += frag;
 457		map_bh(bh_result, sb, phys64);
 458		if (new)
 459			set_buffer_new(bh_result);
 460	}
 461	mutex_unlock(&UFS_I(inode)->truncate_mutex);
 462	return err;
 463
 464done:
 465	if (phys64)
 466		map_bh(bh_result, sb, phys64 + frag);
 467	return 0;
 468}
 469
 470static int ufs_writepage(struct page *page, struct writeback_control *wbc)
 471{
 472	return block_write_full_page(page,ufs_getfrag_block,wbc);
 473}
 474
 475static int ufs_readpage(struct file *file, struct page *page)
 476{
 477	return block_read_full_page(page,ufs_getfrag_block);
 478}
 479
 480int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
 481{
 482	return __block_write_begin(page, pos, len, ufs_getfrag_block);
 483}
 484
 485static void ufs_truncate_blocks(struct inode *);
 486
 487static void ufs_write_failed(struct address_space *mapping, loff_t to)
 488{
 489	struct inode *inode = mapping->host;
 490
 491	if (to > inode->i_size) {
 492		truncate_pagecache(inode, inode->i_size);
 493		ufs_truncate_blocks(inode);
 494	}
 495}
 496
 497static int ufs_write_begin(struct file *file, struct address_space *mapping,
 498			loff_t pos, unsigned len, unsigned flags,
 499			struct page **pagep, void **fsdata)
 500{
 501	int ret;
 502
 503	ret = block_write_begin(mapping, pos, len, flags, pagep,
 504				ufs_getfrag_block);
 505	if (unlikely(ret))
 506		ufs_write_failed(mapping, pos + len);
 507
 508	return ret;
 509}
 510
 511static int ufs_write_end(struct file *file, struct address_space *mapping,
 512			loff_t pos, unsigned len, unsigned copied,
 513			struct page *page, void *fsdata)
 514{
 515	int ret;
 516
 517	ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
 518	if (ret < len)
 519		ufs_write_failed(mapping, pos + len);
 520	return ret;
 521}
 522
 523static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
 524{
 525	return generic_block_bmap(mapping,block,ufs_getfrag_block);
 526}
 527
 528const struct address_space_operations ufs_aops = {
 
 529	.readpage = ufs_readpage,
 530	.writepage = ufs_writepage,
 531	.write_begin = ufs_write_begin,
 532	.write_end = ufs_write_end,
 533	.bmap = ufs_bmap
 534};
 535
 536static void ufs_set_inode_ops(struct inode *inode)
 537{
 538	if (S_ISREG(inode->i_mode)) {
 539		inode->i_op = &ufs_file_inode_operations;
 540		inode->i_fop = &ufs_file_operations;
 541		inode->i_mapping->a_ops = &ufs_aops;
 542	} else if (S_ISDIR(inode->i_mode)) {
 543		inode->i_op = &ufs_dir_inode_operations;
 544		inode->i_fop = &ufs_dir_operations;
 545		inode->i_mapping->a_ops = &ufs_aops;
 546	} else if (S_ISLNK(inode->i_mode)) {
 547		if (!inode->i_blocks) {
 548			inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink;
 549			inode->i_op = &simple_symlink_inode_operations;
 550		} else {
 551			inode->i_mapping->a_ops = &ufs_aops;
 552			inode->i_op = &page_symlink_inode_operations;
 553			inode_nohighmem(inode);
 554		}
 555	} else
 556		init_special_inode(inode, inode->i_mode,
 557				   ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
 558}
 559
 560static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
 561{
 562	struct ufs_inode_info *ufsi = UFS_I(inode);
 563	struct super_block *sb = inode->i_sb;
 564	umode_t mode;
 565
 566	/*
 567	 * Copy data to the in-core inode.
 568	 */
 569	inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
 570	set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
 571	if (inode->i_nlink == 0)
 572		return -ESTALE;
 573
 574	/*
 575	 * Linux now has 32-bit uid and gid, so we can support EFT.
 576	 */
 577	i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode));
 578	i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
 579
 580	inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
 581	inode->i_atime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
 582	inode->i_ctime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
 583	inode->i_mtime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
 584	inode->i_mtime.tv_nsec = 0;
 585	inode->i_atime.tv_nsec = 0;
 586	inode->i_ctime.tv_nsec = 0;
 587	inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
 588	inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
 589	ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
 590	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
 591	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
 592
 593
 594	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
 595		memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
 596		       sizeof(ufs_inode->ui_u2.ui_addr));
 597	} else {
 598		memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
 599		       sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
 600		ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
 601	}
 602	return 0;
 603}
 604
 605static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
 606{
 607	struct ufs_inode_info *ufsi = UFS_I(inode);
 608	struct super_block *sb = inode->i_sb;
 609	umode_t mode;
 610
 611	UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
 612	/*
 613	 * Copy data to the in-core inode.
 614	 */
 615	inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
 616	set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
 617	if (inode->i_nlink == 0)
 618		return -ESTALE;
 619
 620        /*
 621         * Linux now has 32-bit uid and gid, so we can support EFT.
 622         */
 623	i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid));
 624	i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid));
 625
 626	inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
 627	inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime);
 628	inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime);
 629	inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime);
 630	inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec);
 631	inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec);
 632	inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec);
 633	inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
 634	inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
 635	ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
 636	/*
 637	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
 638	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
 639	*/
 640
 641	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
 642		memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
 643		       sizeof(ufs2_inode->ui_u2.ui_addr));
 644	} else {
 645		memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
 646		       sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
 647		ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
 648	}
 649	return 0;
 650}
 651
 652struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
 653{
 654	struct ufs_inode_info *ufsi;
 655	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 656	struct buffer_head * bh;
 657	struct inode *inode;
 658	int err = -EIO;
 659
 660	UFSD("ENTER, ino %lu\n", ino);
 661
 662	if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) {
 663		ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
 664			    ino);
 665		return ERR_PTR(-EIO);
 666	}
 667
 668	inode = iget_locked(sb, ino);
 669	if (!inode)
 670		return ERR_PTR(-ENOMEM);
 671	if (!(inode->i_state & I_NEW))
 672		return inode;
 673
 674	ufsi = UFS_I(inode);
 675
 676	bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
 677	if (!bh) {
 678		ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
 679			    inode->i_ino);
 680		goto bad_inode;
 681	}
 682	if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
 683		struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
 684
 685		err = ufs2_read_inode(inode,
 686				      ufs2_inode + ufs_inotofsbo(inode->i_ino));
 687	} else {
 688		struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
 689
 690		err = ufs1_read_inode(inode,
 691				      ufs_inode + ufs_inotofsbo(inode->i_ino));
 692	}
 693	brelse(bh);
 694	if (err)
 695		goto bad_inode;
 696
 697	inode_inc_iversion(inode);
 698	ufsi->i_lastfrag =
 699		(inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
 700	ufsi->i_dir_start_lookup = 0;
 701	ufsi->i_osync = 0;
 702
 703	ufs_set_inode_ops(inode);
 704
 705	UFSD("EXIT\n");
 706	unlock_new_inode(inode);
 707	return inode;
 708
 709bad_inode:
 710	iget_failed(inode);
 711	return ERR_PTR(err);
 712}
 713
 714static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
 715{
 716	struct super_block *sb = inode->i_sb;
 717 	struct ufs_inode_info *ufsi = UFS_I(inode);
 718
 719	ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
 720	ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
 721
 722	ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode));
 723	ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
 724
 725	ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
 726	ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
 727	ufs_inode->ui_atime.tv_usec = 0;
 728	ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec);
 729	ufs_inode->ui_ctime.tv_usec = 0;
 730	ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
 731	ufs_inode->ui_mtime.tv_usec = 0;
 732	ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
 733	ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
 734	ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
 735
 736	if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) {
 737		ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
 738		ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
 739	}
 740
 741	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
 742		/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
 743		ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
 744	} else if (inode->i_blocks) {
 745		memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
 746		       sizeof(ufs_inode->ui_u2.ui_addr));
 747	}
 748	else {
 749		memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
 750		       sizeof(ufs_inode->ui_u2.ui_symlink));
 751	}
 752
 753	if (!inode->i_nlink)
 754		memset (ufs_inode, 0, sizeof(struct ufs_inode));
 755}
 756
 757static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
 758{
 759	struct super_block *sb = inode->i_sb;
 760 	struct ufs_inode_info *ufsi = UFS_I(inode);
 761
 762	UFSD("ENTER\n");
 763	ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
 764	ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
 765
 766	ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode));
 767	ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode));
 768
 769	ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
 770	ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
 771	ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec);
 772	ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec);
 773	ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec);
 774	ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec);
 775	ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec);
 776
 777	ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
 778	ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
 779	ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
 780
 781	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
 782		/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
 783		ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
 784	} else if (inode->i_blocks) {
 785		memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
 786		       sizeof(ufs_inode->ui_u2.ui_addr));
 787	} else {
 788		memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
 789		       sizeof(ufs_inode->ui_u2.ui_symlink));
 790 	}
 791
 792	if (!inode->i_nlink)
 793		memset (ufs_inode, 0, sizeof(struct ufs2_inode));
 794	UFSD("EXIT\n");
 795}
 796
 797static int ufs_update_inode(struct inode * inode, int do_sync)
 798{
 799	struct super_block *sb = inode->i_sb;
 800	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 801	struct buffer_head * bh;
 802
 803	UFSD("ENTER, ino %lu\n", inode->i_ino);
 804
 805	if (inode->i_ino < UFS_ROOTINO ||
 806	    inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
 807		ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
 808		return -1;
 809	}
 810
 811	bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
 812	if (!bh) {
 813		ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
 814		return -1;
 815	}
 816	if (uspi->fs_magic == UFS2_MAGIC) {
 817		struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
 818
 819		ufs2_update_inode(inode,
 820				  ufs2_inode + ufs_inotofsbo(inode->i_ino));
 821	} else {
 822		struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data;
 823
 824		ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
 825	}
 826
 827	mark_buffer_dirty(bh);
 828	if (do_sync)
 829		sync_dirty_buffer(bh);
 830	brelse (bh);
 831
 832	UFSD("EXIT\n");
 833	return 0;
 834}
 835
 836int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
 837{
 838	return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
 839}
 840
 841int ufs_sync_inode (struct inode *inode)
 842{
 843	return ufs_update_inode (inode, 1);
 844}
 845
 846void ufs_evict_inode(struct inode * inode)
 847{
 848	int want_delete = 0;
 849
 850	if (!inode->i_nlink && !is_bad_inode(inode))
 851		want_delete = 1;
 852
 853	truncate_inode_pages_final(&inode->i_data);
 854	if (want_delete) {
 855		inode->i_size = 0;
 856		if (inode->i_blocks &&
 857		    (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 858		     S_ISLNK(inode->i_mode)))
 859			ufs_truncate_blocks(inode);
 860		ufs_update_inode(inode, inode_needs_sync(inode));
 861	}
 862
 863	invalidate_inode_buffers(inode);
 864	clear_inode(inode);
 865
 866	if (want_delete)
 867		ufs_free_inode(inode);
 868}
 869
 870struct to_free {
 871	struct inode *inode;
 872	u64 to;
 873	unsigned count;
 874};
 875
 876static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
 877{
 878	if (ctx->count && ctx->to != from) {
 879		ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count);
 880		ctx->count = 0;
 881	}
 882	ctx->count += count;
 883	ctx->to = from + count;
 884}
 885
 886#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
 887
 888static void ufs_trunc_direct(struct inode *inode)
 889{
 890	struct ufs_inode_info *ufsi = UFS_I(inode);
 891	struct super_block * sb;
 892	struct ufs_sb_private_info * uspi;
 893	void *p;
 894	u64 frag1, frag2, frag3, frag4, block1, block2;
 895	struct to_free ctx = {.inode = inode};
 896	unsigned i, tmp;
 897
 898	UFSD("ENTER: ino %lu\n", inode->i_ino);
 899
 900	sb = inode->i_sb;
 901	uspi = UFS_SB(sb)->s_uspi;
 902
 903	frag1 = DIRECT_FRAGMENT;
 904	frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
 905	frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
 906	frag3 = frag4 & ~uspi->s_fpbmask;
 907	block1 = block2 = 0;
 908	if (frag2 > frag3) {
 909		frag2 = frag4;
 910		frag3 = frag4 = 0;
 911	} else if (frag2 < frag3) {
 912		block1 = ufs_fragstoblks (frag2);
 913		block2 = ufs_fragstoblks (frag3);
 914	}
 915
 916	UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
 917	     " frag3 %llu, frag4 %llu\n", inode->i_ino,
 918	     (unsigned long long)frag1, (unsigned long long)frag2,
 919	     (unsigned long long)block1, (unsigned long long)block2,
 920	     (unsigned long long)frag3, (unsigned long long)frag4);
 921
 922	if (frag1 >= frag2)
 923		goto next1;
 924
 925	/*
 926	 * Free first free fragments
 927	 */
 928	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1));
 929	tmp = ufs_data_ptr_to_cpu(sb, p);
 930	if (!tmp )
 931		ufs_panic (sb, "ufs_trunc_direct", "internal error");
 932	frag2 -= frag1;
 933	frag1 = ufs_fragnum (frag1);
 934
 935	ufs_free_fragments(inode, tmp + frag1, frag2);
 936
 937next1:
 938	/*
 939	 * Free whole blocks
 940	 */
 941	for (i = block1 ; i < block2; i++) {
 942		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
 943		tmp = ufs_data_ptr_to_cpu(sb, p);
 944		if (!tmp)
 945			continue;
 946		write_seqlock(&ufsi->meta_lock);
 947		ufs_data_ptr_clear(uspi, p);
 948		write_sequnlock(&ufsi->meta_lock);
 949
 950		free_data(&ctx, tmp, uspi->s_fpb);
 951	}
 952
 953	free_data(&ctx, 0, 0);
 954
 955	if (frag3 >= frag4)
 956		goto next3;
 957
 958	/*
 959	 * Free last free fragments
 960	 */
 961	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3));
 962	tmp = ufs_data_ptr_to_cpu(sb, p);
 963	if (!tmp )
 964		ufs_panic(sb, "ufs_truncate_direct", "internal error");
 965	frag4 = ufs_fragnum (frag4);
 966	write_seqlock(&ufsi->meta_lock);
 967	ufs_data_ptr_clear(uspi, p);
 968	write_sequnlock(&ufsi->meta_lock);
 969
 970	ufs_free_fragments (inode, tmp, frag4);
 971 next3:
 972
 973	UFSD("EXIT: ino %lu\n", inode->i_ino);
 974}
 975
 976static void free_full_branch(struct inode *inode, u64 ind_block, int depth)
 977{
 978	struct super_block *sb = inode->i_sb;
 979	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 980	struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize);
 981	unsigned i;
 982
 983	if (!ubh)
 984		return;
 985
 986	if (--depth) {
 987		for (i = 0; i < uspi->s_apb; i++) {
 988			void *p = ubh_get_data_ptr(uspi, ubh, i);
 989			u64 block = ufs_data_ptr_to_cpu(sb, p);
 990			if (block)
 991				free_full_branch(inode, block, depth);
 992		}
 993	} else {
 994		struct to_free ctx = {.inode = inode};
 995
 996		for (i = 0; i < uspi->s_apb; i++) {
 997			void *p = ubh_get_data_ptr(uspi, ubh, i);
 998			u64 block = ufs_data_ptr_to_cpu(sb, p);
 999			if (block)
1000				free_data(&ctx, block, uspi->s_fpb);
1001		}
1002		free_data(&ctx, 0, 0);
1003	}
1004
1005	ubh_bforget(ubh);
1006	ufs_free_blocks(inode, ind_block, uspi->s_fpb);
1007}
1008
1009static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth)
1010{
1011	struct super_block *sb = inode->i_sb;
1012	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1013	unsigned i;
1014
1015	if (--depth) {
1016		for (i = from; i < uspi->s_apb ; i++) {
1017			void *p = ubh_get_data_ptr(uspi, ubh, i);
1018			u64 block = ufs_data_ptr_to_cpu(sb, p);
1019			if (block) {
1020				write_seqlock(&UFS_I(inode)->meta_lock);
1021				ufs_data_ptr_clear(uspi, p);
1022				write_sequnlock(&UFS_I(inode)->meta_lock);
1023				ubh_mark_buffer_dirty(ubh);
1024				free_full_branch(inode, block, depth);
1025			}
1026		}
1027	} else {
1028		struct to_free ctx = {.inode = inode};
1029
1030		for (i = from; i < uspi->s_apb; i++) {
1031			void *p = ubh_get_data_ptr(uspi, ubh, i);
1032			u64 block = ufs_data_ptr_to_cpu(sb, p);
1033			if (block) {
1034				write_seqlock(&UFS_I(inode)->meta_lock);
1035				ufs_data_ptr_clear(uspi, p);
1036				write_sequnlock(&UFS_I(inode)->meta_lock);
1037				ubh_mark_buffer_dirty(ubh);
1038				free_data(&ctx, block, uspi->s_fpb);
1039			}
1040		}
1041		free_data(&ctx, 0, 0);
1042	}
1043	if (IS_SYNC(inode) && ubh_buffer_dirty(ubh))
1044		ubh_sync_block(ubh);
1045	ubh_brelse(ubh);
1046}
1047
1048static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
1049{
1050	int err = 0;
1051	struct super_block *sb = inode->i_sb;
1052	struct address_space *mapping = inode->i_mapping;
1053	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1054	unsigned i, end;
1055	sector_t lastfrag;
1056	struct page *lastpage;
1057	struct buffer_head *bh;
1058	u64 phys64;
1059
1060	lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
1061
1062	if (!lastfrag)
1063		goto out;
1064
1065	lastfrag--;
1066
1067	lastpage = ufs_get_locked_page(mapping, lastfrag >>
1068				       (PAGE_SHIFT - inode->i_blkbits));
1069       if (IS_ERR(lastpage)) {
1070               err = -EIO;
1071               goto out;
1072       }
1073
1074       end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1);
1075       bh = page_buffers(lastpage);
1076       for (i = 0; i < end; ++i)
1077               bh = bh->b_this_page;
1078
1079
1080       err = ufs_getfrag_block(inode, lastfrag, bh, 1);
1081
1082       if (unlikely(err))
1083	       goto out_unlock;
1084
1085       if (buffer_new(bh)) {
1086	       clear_buffer_new(bh);
1087	       clean_bdev_bh_alias(bh);
1088	       /*
1089		* we do not zeroize fragment, because of
1090		* if it maped to hole, it already contains zeroes
1091		*/
1092	       set_buffer_uptodate(bh);
1093	       mark_buffer_dirty(bh);
1094	       set_page_dirty(lastpage);
1095       }
1096
1097       if (lastfrag >= UFS_IND_FRAGMENT) {
1098	       end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
1099	       phys64 = bh->b_blocknr + 1;
1100	       for (i = 0; i < end; ++i) {
1101		       bh = sb_getblk(sb, i + phys64);
1102		       lock_buffer(bh);
1103		       memset(bh->b_data, 0, sb->s_blocksize);
1104		       set_buffer_uptodate(bh);
1105		       mark_buffer_dirty(bh);
1106		       unlock_buffer(bh);
1107		       sync_dirty_buffer(bh);
1108		       brelse(bh);
1109	       }
1110       }
1111out_unlock:
1112       ufs_put_locked_page(lastpage);
1113out:
1114       return err;
1115}
1116
1117static void ufs_truncate_blocks(struct inode *inode)
1118{
1119	struct ufs_inode_info *ufsi = UFS_I(inode);
1120	struct super_block *sb = inode->i_sb;
1121	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1122	unsigned offsets[4];
1123	int depth;
1124	int depth2;
1125	unsigned i;
1126	struct ufs_buffer_head *ubh[3];
1127	void *p;
1128	u64 block;
1129
1130	if (inode->i_size) {
1131		sector_t last = (inode->i_size - 1) >> uspi->s_bshift;
1132		depth = ufs_block_to_path(inode, last, offsets);
1133		if (!depth)
1134			return;
1135	} else {
1136		depth = 1;
1137	}
1138
1139	for (depth2 = depth - 1; depth2; depth2--)
1140		if (offsets[depth2] != uspi->s_apb - 1)
1141			break;
1142
1143	mutex_lock(&ufsi->truncate_mutex);
1144	if (depth == 1) {
1145		ufs_trunc_direct(inode);
1146		offsets[0] = UFS_IND_BLOCK;
1147	} else {
1148		/* get the blocks that should be partially emptied */
1149		p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++);
1150		for (i = 0; i < depth2; i++) {
1151			block = ufs_data_ptr_to_cpu(sb, p);
1152			if (!block)
1153				break;
1154			ubh[i] = ubh_bread(sb, block, uspi->s_bsize);
1155			if (!ubh[i]) {
1156				write_seqlock(&ufsi->meta_lock);
1157				ufs_data_ptr_clear(uspi, p);
1158				write_sequnlock(&ufsi->meta_lock);
1159				break;
1160			}
1161			p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++);
1162		}
1163		while (i--)
1164			free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
1165	}
1166	for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) {
1167		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
1168		block = ufs_data_ptr_to_cpu(sb, p);
1169		if (block) {
1170			write_seqlock(&ufsi->meta_lock);
1171			ufs_data_ptr_clear(uspi, p);
1172			write_sequnlock(&ufsi->meta_lock);
1173			free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
1174		}
1175	}
1176	read_seqlock_excl(&ufsi->meta_lock);
1177	ufsi->i_lastfrag = DIRECT_FRAGMENT;
1178	read_sequnlock_excl(&ufsi->meta_lock);
1179	mark_inode_dirty(inode);
1180	mutex_unlock(&ufsi->truncate_mutex);
1181}
1182
1183static int ufs_truncate(struct inode *inode, loff_t size)
1184{
1185	int err = 0;
1186
1187	UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
1188	     inode->i_ino, (unsigned long long)size,
1189	     (unsigned long long)i_size_read(inode));
1190
1191	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1192	      S_ISLNK(inode->i_mode)))
1193		return -EINVAL;
1194	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1195		return -EPERM;
1196
1197	err = ufs_alloc_lastblock(inode, size);
1198
1199	if (err)
1200		goto out;
1201
1202	block_truncate_page(inode->i_mapping, size, ufs_getfrag_block);
1203
1204	truncate_setsize(inode, size);
1205
1206	ufs_truncate_blocks(inode);
1207	inode->i_mtime = inode->i_ctime = current_time(inode);
1208	mark_inode_dirty(inode);
1209out:
1210	UFSD("EXIT: err %d\n", err);
1211	return err;
1212}
1213
1214int ufs_setattr(struct dentry *dentry, struct iattr *attr)
 
1215{
1216	struct inode *inode = d_inode(dentry);
1217	unsigned int ia_valid = attr->ia_valid;
1218	int error;
1219
1220	error = setattr_prepare(dentry, attr);
1221	if (error)
1222		return error;
1223
1224	if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
1225		error = ufs_truncate(inode, attr->ia_size);
1226		if (error)
1227			return error;
1228	}
1229
1230	setattr_copy(inode, attr);
1231	mark_inode_dirty(inode);
1232	return 0;
1233}
1234
1235const struct inode_operations ufs_file_inode_operations = {
1236	.setattr = ufs_setattr,
1237};