Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 *  linux/fs/ufs/inode.c
   3 *
   4 * Copyright (C) 1998
   5 * Daniel Pirkl <daniel.pirkl@email.cz>
   6 * Charles University, Faculty of Mathematics and Physics
   7 *
   8 *  from
   9 *
  10 *  linux/fs/ext2/inode.c
  11 *
  12 * Copyright (C) 1992, 1993, 1994, 1995
  13 * Remy Card (card@masi.ibp.fr)
  14 * Laboratoire MASI - Institut Blaise Pascal
  15 * Universite Pierre et Marie Curie (Paris VI)
  16 *
  17 *  from
  18 *
  19 *  linux/fs/minix/inode.c
  20 *
  21 *  Copyright (C) 1991, 1992  Linus Torvalds
  22 *
  23 *  Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
  24 *  Big-endian to little-endian byte-swapping/bitmaps by
  25 *        David S. Miller (davem@caip.rutgers.edu), 1995
  26 */
  27
  28#include <linux/uaccess.h>
  29
  30#include <linux/errno.h>
  31#include <linux/fs.h>
  32#include <linux/time.h>
  33#include <linux/stat.h>
  34#include <linux/string.h>
  35#include <linux/mm.h>
  36#include <linux/buffer_head.h>
  37#include <linux/writeback.h>
 
  38
  39#include "ufs_fs.h"
  40#include "ufs.h"
  41#include "swab.h"
  42#include "util.h"
  43
  44static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4])
  45{
  46	struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
  47	int ptrs = uspi->s_apb;
  48	int ptrs_bits = uspi->s_apbshift;
  49	const long direct_blocks = UFS_NDADDR,
  50		indirect_blocks = ptrs,
  51		double_blocks = (1 << (ptrs_bits * 2));
  52	int n = 0;
  53
  54
  55	UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
  56	if (i_block < direct_blocks) {
  57		offsets[n++] = i_block;
  58	} else if ((i_block -= direct_blocks) < indirect_blocks) {
  59		offsets[n++] = UFS_IND_BLOCK;
  60		offsets[n++] = i_block;
  61	} else if ((i_block -= indirect_blocks) < double_blocks) {
  62		offsets[n++] = UFS_DIND_BLOCK;
  63		offsets[n++] = i_block >> ptrs_bits;
  64		offsets[n++] = i_block & (ptrs - 1);
  65	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
  66		offsets[n++] = UFS_TIND_BLOCK;
  67		offsets[n++] = i_block >> (ptrs_bits * 2);
  68		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
  69		offsets[n++] = i_block & (ptrs - 1);
  70	} else {
  71		ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
  72	}
  73	return n;
  74}
  75
  76typedef struct {
  77	void	*p;
  78	union {
  79		__fs32	key32;
  80		__fs64	key64;
  81	};
  82	struct buffer_head *bh;
  83} Indirect;
  84
  85static inline int grow_chain32(struct ufs_inode_info *ufsi,
  86			       struct buffer_head *bh, __fs32 *v,
  87			       Indirect *from, Indirect *to)
  88{
  89	Indirect *p;
  90	unsigned seq;
  91	to->bh = bh;
  92	do {
  93		seq = read_seqbegin(&ufsi->meta_lock);
  94		to->key32 = *(__fs32 *)(to->p = v);
  95		for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++)
  96			;
  97	} while (read_seqretry(&ufsi->meta_lock, seq));
  98	return (p > to);
  99}
 100
 101static inline int grow_chain64(struct ufs_inode_info *ufsi,
 102			       struct buffer_head *bh, __fs64 *v,
 103			       Indirect *from, Indirect *to)
 104{
 105	Indirect *p;
 106	unsigned seq;
 107	to->bh = bh;
 108	do {
 109		seq = read_seqbegin(&ufsi->meta_lock);
 110		to->key64 = *(__fs64 *)(to->p = v);
 111		for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++)
 112			;
 113	} while (read_seqretry(&ufsi->meta_lock, seq));
 114	return (p > to);
 115}
 116
 117/*
 118 * Returns the location of the fragment from
 119 * the beginning of the filesystem.
 120 */
 121
 122static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth)
 123{
 124	struct ufs_inode_info *ufsi = UFS_I(inode);
 125	struct super_block *sb = inode->i_sb;
 126	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 127	u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
 128	int shift = uspi->s_apbshift-uspi->s_fpbshift;
 129	Indirect chain[4], *q = chain;
 130	unsigned *p;
 131	unsigned flags = UFS_SB(sb)->s_flags;
 132	u64 res = 0;
 133
 134	UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
 135		uspi->s_fpbshift, uspi->s_apbmask,
 136		(unsigned long long)mask);
 137
 138	if (depth == 0)
 139		goto no_block;
 140
 141again:
 142	p = offsets;
 143
 144	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
 145		goto ufs2;
 146
 147	if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q))
 148		goto changed;
 149	if (!q->key32)
 150		goto no_block;
 151	while (--depth) {
 152		__fs32 *ptr;
 153		struct buffer_head *bh;
 154		unsigned n = *p++;
 155
 156		bh = sb_bread(sb, uspi->s_sbbase +
 157				  fs32_to_cpu(sb, q->key32) + (n>>shift));
 158		if (!bh)
 159			goto no_block;
 160		ptr = (__fs32 *)bh->b_data + (n & mask);
 161		if (!grow_chain32(ufsi, bh, ptr, chain, ++q))
 162			goto changed;
 163		if (!q->key32)
 164			goto no_block;
 165	}
 166	res = fs32_to_cpu(sb, q->key32);
 167	goto found;
 168
 169ufs2:
 170	if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q))
 171		goto changed;
 172	if (!q->key64)
 173		goto no_block;
 174
 175	while (--depth) {
 176		__fs64 *ptr;
 177		struct buffer_head *bh;
 178		unsigned n = *p++;
 179
 180		bh = sb_bread(sb, uspi->s_sbbase +
 181				  fs64_to_cpu(sb, q->key64) + (n>>shift));
 182		if (!bh)
 183			goto no_block;
 184		ptr = (__fs64 *)bh->b_data + (n & mask);
 185		if (!grow_chain64(ufsi, bh, ptr, chain, ++q))
 186			goto changed;
 187		if (!q->key64)
 188			goto no_block;
 189	}
 190	res = fs64_to_cpu(sb, q->key64);
 191found:
 192	res += uspi->s_sbbase;
 193no_block:
 194	while (q > chain) {
 195		brelse(q->bh);
 196		q--;
 197	}
 198	return res;
 199
 200changed:
 201	while (q > chain) {
 202		brelse(q->bh);
 203		q--;
 204	}
 205	goto again;
 206}
 207
 208/*
 209 * Unpacking tails: we have a file with partial final block and
 210 * we had been asked to extend it.  If the fragment being written
 211 * is within the same block, we need to extend the tail just to cover
 212 * that fragment.  Otherwise the tail is extended to full block.
 213 *
 214 * Note that we might need to create a _new_ tail, but that will
 215 * be handled elsewhere; this is strictly for resizing old
 216 * ones.
 217 */
 218static bool
 219ufs_extend_tail(struct inode *inode, u64 writes_to,
 220		  int *err, struct page *locked_page)
 221{
 222	struct ufs_inode_info *ufsi = UFS_I(inode);
 223	struct super_block *sb = inode->i_sb;
 224	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 225	unsigned lastfrag = ufsi->i_lastfrag;	/* it's a short file, so unsigned is enough */
 226	unsigned block = ufs_fragstoblks(lastfrag);
 227	unsigned new_size;
 228	void *p;
 229	u64 tmp;
 230
 231	if (writes_to < (lastfrag | uspi->s_fpbmask))
 232		new_size = (writes_to & uspi->s_fpbmask) + 1;
 233	else
 234		new_size = uspi->s_fpb;
 235
 236	p = ufs_get_direct_data_ptr(uspi, ufsi, block);
 237	tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
 238				new_size, err, locked_page);
 
 239	return tmp != 0;
 240}
 241
 242/**
 243 * ufs_inode_getfrag() - allocate new fragment(s)
 244 * @inode: pointer to inode
 245 * @index: number of block pointer within the inode's array.
 246 * @new_fragment: number of new allocated fragment(s)
 247 * @err: we set it if something wrong
 248 * @new: we set it if we allocate new block
 249 * @locked_page: for ufs_new_fragments()
 250 */
 251static u64
 252ufs_inode_getfrag(struct inode *inode, unsigned index,
 253		  sector_t new_fragment, int *err,
 254		  int *new, struct page *locked_page)
 255{
 256	struct ufs_inode_info *ufsi = UFS_I(inode);
 257	struct super_block *sb = inode->i_sb;
 258	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 259	u64 tmp, goal, lastfrag;
 260	unsigned nfrags = uspi->s_fpb;
 261	void *p;
 262
 263        /* TODO : to be done for write support
 264        if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
 265             goto ufs2;
 266         */
 267
 268	p = ufs_get_direct_data_ptr(uspi, ufsi, index);
 269	tmp = ufs_data_ptr_to_cpu(sb, p);
 270	if (tmp)
 271		goto out;
 272
 273	lastfrag = ufsi->i_lastfrag;
 274
 275	/* will that be a new tail? */
 276	if (new_fragment < UFS_NDIR_FRAGMENT && new_fragment >= lastfrag)
 277		nfrags = (new_fragment & uspi->s_fpbmask) + 1;
 278
 279	goal = 0;
 280	if (index) {
 281		goal = ufs_data_ptr_to_cpu(sb,
 282				 ufs_get_direct_data_ptr(uspi, ufsi, index - 1));
 283		if (goal)
 284			goal += uspi->s_fpb;
 285	}
 286	tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
 287				goal, uspi->s_fpb, err, locked_page);
 288
 289	if (!tmp) {
 290		*err = -ENOSPC;
 291		return 0;
 292	}
 293
 294	if (new)
 295		*new = 1;
 296	inode->i_ctime = current_time(inode);
 297	if (IS_SYNC(inode))
 298		ufs_sync_inode (inode);
 299	mark_inode_dirty(inode);
 300out:
 301	return tmp + uspi->s_sbbase;
 302
 303     /* This part : To be implemented ....
 304        Required only for writing, not required for READ-ONLY.
 305ufs2:
 306
 307	u2_block = ufs_fragstoblks(fragment);
 308	u2_blockoff = ufs_fragnum(fragment);
 309	p = ufsi->i_u1.u2_i_data + block;
 310	goal = 0;
 311
 312repeat2:
 313	tmp = fs32_to_cpu(sb, *p);
 314	lastfrag = ufsi->i_lastfrag;
 315
 316     */
 317}
 318
 319/**
 320 * ufs_inode_getblock() - allocate new block
 321 * @inode: pointer to inode
 322 * @ind_block: block number of the indirect block
 323 * @index: number of pointer within the indirect block
 324 * @new_fragment: number of new allocated fragment
 325 *  (block will hold this fragment and also uspi->s_fpb-1)
 326 * @err: see ufs_inode_getfrag()
 327 * @new: see ufs_inode_getfrag()
 328 * @locked_page: see ufs_inode_getfrag()
 329 */
 330static u64
 331ufs_inode_getblock(struct inode *inode, u64 ind_block,
 332		  unsigned index, sector_t new_fragment, int *err,
 333		  int *new, struct page *locked_page)
 334{
 335	struct super_block *sb = inode->i_sb;
 336	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 337	int shift = uspi->s_apbshift - uspi->s_fpbshift;
 338	u64 tmp = 0, goal;
 339	struct buffer_head *bh;
 340	void *p;
 341
 342	if (!ind_block)
 343		return 0;
 344
 345	bh = sb_bread(sb, ind_block + (index >> shift));
 346	if (unlikely(!bh)) {
 347		*err = -EIO;
 348		return 0;
 349	}
 350
 351	index &= uspi->s_apbmask >> uspi->s_fpbshift;
 352	if (uspi->fs_magic == UFS2_MAGIC)
 353		p = (__fs64 *)bh->b_data + index;
 354	else
 355		p = (__fs32 *)bh->b_data + index;
 356
 357	tmp = ufs_data_ptr_to_cpu(sb, p);
 358	if (tmp)
 359		goto out;
 360
 361	if (index && (uspi->fs_magic == UFS2_MAGIC ?
 362		      (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[index-1])) :
 363		      (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[index-1]))))
 364		goal = tmp + uspi->s_fpb;
 365	else
 366		goal = bh->b_blocknr + uspi->s_fpb;
 367	tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
 368				uspi->s_fpb, err, locked_page);
 369	if (!tmp)
 370		goto out;
 371
 372	if (new)
 373		*new = 1;
 374
 375	mark_buffer_dirty(bh);
 376	if (IS_SYNC(inode))
 377		sync_dirty_buffer(bh);
 378	inode->i_ctime = current_time(inode);
 379	mark_inode_dirty(inode);
 380out:
 381	brelse (bh);
 382	UFSD("EXIT\n");
 383	if (tmp)
 384		tmp += uspi->s_sbbase;
 385	return tmp;
 386}
 387
 388/**
 389 * ufs_getfrag_block() - `get_block_t' function, interface between UFS and
 390 * readpage, writepage and so on
 391 */
 392
 393static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
 394{
 395	struct super_block *sb = inode->i_sb;
 396	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 397	int err = 0, new = 0;
 398	unsigned offsets[4];
 399	int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets);
 400	u64 phys64 = 0;
 401	unsigned frag = fragment & uspi->s_fpbmask;
 402
 403	if (!create) {
 404		phys64 = ufs_frag_map(inode, offsets, depth);
 405		goto out;
 406	}
 407
 
 
 
 
 
 
 
 
 
 
 408        /* This code entered only while writing ....? */
 409
 410	mutex_lock(&UFS_I(inode)->truncate_mutex);
 411
 412	UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
 413	if (unlikely(!depth)) {
 414		ufs_warning(sb, "ufs_get_block", "block > big");
 415		err = -EIO;
 416		goto out;
 417	}
 418
 419	if (UFS_I(inode)->i_lastfrag < UFS_NDIR_FRAGMENT) {
 420		unsigned lastfrag = UFS_I(inode)->i_lastfrag;
 421		unsigned tailfrags = lastfrag & uspi->s_fpbmask;
 422		if (tailfrags && fragment >= lastfrag) {
 423			if (!ufs_extend_tail(inode, fragment,
 424					     &err, bh_result->b_page))
 425				goto out;
 426		}
 427	}
 428
 429	if (depth == 1) {
 430		phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
 431					   &err, &new, bh_result->b_page);
 432	} else {
 433		int i;
 434		phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
 435					   &err, NULL, NULL);
 436		for (i = 1; i < depth - 1; i++)
 437			phys64 = ufs_inode_getblock(inode, phys64, offsets[i],
 438						fragment, &err, NULL, NULL);
 439		phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 1],
 440					fragment, &err, &new, bh_result->b_page);
 441	}
 442out:
 443	if (phys64) {
 444		phys64 += frag;
 445		map_bh(bh_result, sb, phys64);
 446		if (new)
 447			set_buffer_new(bh_result);
 448	}
 449	mutex_unlock(&UFS_I(inode)->truncate_mutex);
 450	return err;
 
 
 
 
 
 451}
 452
 453static int ufs_writepage(struct page *page, struct writeback_control *wbc)
 454{
 455	return block_write_full_page(page,ufs_getfrag_block,wbc);
 456}
 457
 458static int ufs_readpage(struct file *file, struct page *page)
 459{
 460	return block_read_full_page(page,ufs_getfrag_block);
 461}
 462
 463int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
 464{
 465	return __block_write_begin(page, pos, len, ufs_getfrag_block);
 466}
 467
 468static void ufs_truncate_blocks(struct inode *);
 469
 470static void ufs_write_failed(struct address_space *mapping, loff_t to)
 471{
 472	struct inode *inode = mapping->host;
 473
 474	if (to > inode->i_size) {
 475		truncate_pagecache(inode, inode->i_size);
 476		ufs_truncate_blocks(inode);
 477	}
 478}
 479
 480static int ufs_write_begin(struct file *file, struct address_space *mapping,
 481			loff_t pos, unsigned len, unsigned flags,
 482			struct page **pagep, void **fsdata)
 483{
 484	int ret;
 485
 486	ret = block_write_begin(mapping, pos, len, flags, pagep,
 487				ufs_getfrag_block);
 488	if (unlikely(ret))
 489		ufs_write_failed(mapping, pos + len);
 490
 491	return ret;
 492}
 493
 494static int ufs_write_end(struct file *file, struct address_space *mapping,
 495			loff_t pos, unsigned len, unsigned copied,
 496			struct page *page, void *fsdata)
 497{
 498	int ret;
 499
 500	ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
 501	if (ret < len)
 502		ufs_write_failed(mapping, pos + len);
 503	return ret;
 504}
 505
 506static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
 507{
 508	return generic_block_bmap(mapping,block,ufs_getfrag_block);
 509}
 510
 511const struct address_space_operations ufs_aops = {
 512	.readpage = ufs_readpage,
 
 
 513	.writepage = ufs_writepage,
 514	.write_begin = ufs_write_begin,
 515	.write_end = ufs_write_end,
 516	.bmap = ufs_bmap
 517};
 518
 519static void ufs_set_inode_ops(struct inode *inode)
 520{
 521	if (S_ISREG(inode->i_mode)) {
 522		inode->i_op = &ufs_file_inode_operations;
 523		inode->i_fop = &ufs_file_operations;
 524		inode->i_mapping->a_ops = &ufs_aops;
 525	} else if (S_ISDIR(inode->i_mode)) {
 526		inode->i_op = &ufs_dir_inode_operations;
 527		inode->i_fop = &ufs_dir_operations;
 528		inode->i_mapping->a_ops = &ufs_aops;
 529	} else if (S_ISLNK(inode->i_mode)) {
 530		if (!inode->i_blocks) {
 531			inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink;
 532			inode->i_op = &simple_symlink_inode_operations;
 533		} else {
 534			inode->i_mapping->a_ops = &ufs_aops;
 535			inode->i_op = &page_symlink_inode_operations;
 536			inode_nohighmem(inode);
 537		}
 538	} else
 539		init_special_inode(inode, inode->i_mode,
 540				   ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
 541}
 542
 543static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
 544{
 545	struct ufs_inode_info *ufsi = UFS_I(inode);
 546	struct super_block *sb = inode->i_sb;
 547	umode_t mode;
 548
 549	/*
 550	 * Copy data to the in-core inode.
 551	 */
 552	inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
 553	set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
 554	if (inode->i_nlink == 0) {
 555		ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
 556		return -1;
 557	}
 558
 559	/*
 560	 * Linux now has 32-bit uid and gid, so we can support EFT.
 561	 */
 562	i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode));
 563	i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
 564
 565	inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
 566	inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
 567	inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
 568	inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
 569	inode->i_mtime.tv_nsec = 0;
 570	inode->i_atime.tv_nsec = 0;
 571	inode->i_ctime.tv_nsec = 0;
 572	inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
 573	inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
 574	ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
 575	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
 576	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
 577
 578
 579	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
 580		memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
 581		       sizeof(ufs_inode->ui_u2.ui_addr));
 582	} else {
 583		memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
 584		       sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
 585		ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
 586	}
 587	return 0;
 588}
 589
 590static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
 591{
 592	struct ufs_inode_info *ufsi = UFS_I(inode);
 593	struct super_block *sb = inode->i_sb;
 594	umode_t mode;
 595
 596	UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
 597	/*
 598	 * Copy data to the in-core inode.
 599	 */
 600	inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
 601	set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
 602	if (inode->i_nlink == 0) {
 603		ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
 604		return -1;
 605	}
 606
 607        /*
 608         * Linux now has 32-bit uid and gid, so we can support EFT.
 609         */
 610	i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid));
 611	i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid));
 612
 613	inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
 614	inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime);
 615	inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime);
 616	inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime);
 617	inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec);
 618	inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec);
 619	inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec);
 620	inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
 621	inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
 622	ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
 623	/*
 624	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
 625	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
 626	*/
 627
 628	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
 629		memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
 630		       sizeof(ufs2_inode->ui_u2.ui_addr));
 631	} else {
 632		memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
 633		       sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
 634		ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
 635	}
 636	return 0;
 637}
 638
 639struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
 640{
 641	struct ufs_inode_info *ufsi;
 642	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 643	struct buffer_head * bh;
 644	struct inode *inode;
 645	int err;
 646
 647	UFSD("ENTER, ino %lu\n", ino);
 648
 649	if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) {
 650		ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
 651			    ino);
 652		return ERR_PTR(-EIO);
 653	}
 654
 655	inode = iget_locked(sb, ino);
 656	if (!inode)
 657		return ERR_PTR(-ENOMEM);
 658	if (!(inode->i_state & I_NEW))
 659		return inode;
 660
 661	ufsi = UFS_I(inode);
 662
 663	bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
 664	if (!bh) {
 665		ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
 666			    inode->i_ino);
 667		goto bad_inode;
 668	}
 669	if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
 670		struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
 671
 672		err = ufs2_read_inode(inode,
 673				      ufs2_inode + ufs_inotofsbo(inode->i_ino));
 674	} else {
 675		struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
 676
 677		err = ufs1_read_inode(inode,
 678				      ufs_inode + ufs_inotofsbo(inode->i_ino));
 679	}
 680
 681	if (err)
 682		goto bad_inode;
 683	inode->i_version++;
 
 684	ufsi->i_lastfrag =
 685		(inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
 686	ufsi->i_dir_start_lookup = 0;
 687	ufsi->i_osync = 0;
 688
 689	ufs_set_inode_ops(inode);
 690
 691	brelse(bh);
 692
 693	UFSD("EXIT\n");
 694	unlock_new_inode(inode);
 695	return inode;
 696
 697bad_inode:
 698	iget_failed(inode);
 699	return ERR_PTR(-EIO);
 700}
 701
 702static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
 703{
 704	struct super_block *sb = inode->i_sb;
 705 	struct ufs_inode_info *ufsi = UFS_I(inode);
 706
 707	ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
 708	ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
 709
 710	ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode));
 711	ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
 712
 713	ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
 714	ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
 715	ufs_inode->ui_atime.tv_usec = 0;
 716	ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec);
 717	ufs_inode->ui_ctime.tv_usec = 0;
 718	ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
 719	ufs_inode->ui_mtime.tv_usec = 0;
 720	ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
 721	ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
 722	ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
 723
 724	if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) {
 725		ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
 726		ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
 727	}
 728
 729	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
 730		/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
 731		ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
 732	} else if (inode->i_blocks) {
 733		memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
 734		       sizeof(ufs_inode->ui_u2.ui_addr));
 735	}
 736	else {
 737		memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
 738		       sizeof(ufs_inode->ui_u2.ui_symlink));
 739	}
 740
 741	if (!inode->i_nlink)
 742		memset (ufs_inode, 0, sizeof(struct ufs_inode));
 743}
 744
 745static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
 746{
 747	struct super_block *sb = inode->i_sb;
 748 	struct ufs_inode_info *ufsi = UFS_I(inode);
 749
 750	UFSD("ENTER\n");
 751	ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
 752	ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
 753
 754	ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode));
 755	ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode));
 756
 757	ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
 758	ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
 759	ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec);
 760	ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec);
 761	ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec);
 762	ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec);
 763	ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec);
 764
 765	ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
 766	ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
 767	ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
 768
 769	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
 770		/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
 771		ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
 772	} else if (inode->i_blocks) {
 773		memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
 774		       sizeof(ufs_inode->ui_u2.ui_addr));
 775	} else {
 776		memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
 777		       sizeof(ufs_inode->ui_u2.ui_symlink));
 778 	}
 779
 780	if (!inode->i_nlink)
 781		memset (ufs_inode, 0, sizeof(struct ufs2_inode));
 782	UFSD("EXIT\n");
 783}
 784
 785static int ufs_update_inode(struct inode * inode, int do_sync)
 786{
 787	struct super_block *sb = inode->i_sb;
 788	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 789	struct buffer_head * bh;
 790
 791	UFSD("ENTER, ino %lu\n", inode->i_ino);
 792
 793	if (inode->i_ino < UFS_ROOTINO ||
 794	    inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
 795		ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
 796		return -1;
 797	}
 798
 799	bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
 800	if (!bh) {
 801		ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
 802		return -1;
 803	}
 804	if (uspi->fs_magic == UFS2_MAGIC) {
 805		struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
 806
 807		ufs2_update_inode(inode,
 808				  ufs2_inode + ufs_inotofsbo(inode->i_ino));
 809	} else {
 810		struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data;
 811
 812		ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
 813	}
 814
 815	mark_buffer_dirty(bh);
 816	if (do_sync)
 817		sync_dirty_buffer(bh);
 818	brelse (bh);
 819
 820	UFSD("EXIT\n");
 821	return 0;
 822}
 823
 824int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
 825{
 826	return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
 827}
 828
 829int ufs_sync_inode (struct inode *inode)
 830{
 831	return ufs_update_inode (inode, 1);
 832}
 833
 834void ufs_evict_inode(struct inode * inode)
 835{
 836	int want_delete = 0;
 837
 838	if (!inode->i_nlink && !is_bad_inode(inode))
 839		want_delete = 1;
 840
 841	truncate_inode_pages_final(&inode->i_data);
 842	if (want_delete) {
 843		inode->i_size = 0;
 844		if (inode->i_blocks)
 
 
 845			ufs_truncate_blocks(inode);
 
 846	}
 847
 848	invalidate_inode_buffers(inode);
 849	clear_inode(inode);
 850
 851	if (want_delete)
 852		ufs_free_inode(inode);
 853}
 854
 855struct to_free {
 856	struct inode *inode;
 857	u64 to;
 858	unsigned count;
 859};
 860
 861static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
 862{
 863	if (ctx->count && ctx->to != from) {
 864		ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count);
 865		ctx->count = 0;
 866	}
 867	ctx->count += count;
 868	ctx->to = from + count;
 869}
 870
 871#define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
 872#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
 873
 874static void ufs_trunc_direct(struct inode *inode)
 875{
 876	struct ufs_inode_info *ufsi = UFS_I(inode);
 877	struct super_block * sb;
 878	struct ufs_sb_private_info * uspi;
 879	void *p;
 880	u64 frag1, frag2, frag3, frag4, block1, block2;
 881	struct to_free ctx = {.inode = inode};
 882	unsigned i, tmp;
 883
 884	UFSD("ENTER: ino %lu\n", inode->i_ino);
 885
 886	sb = inode->i_sb;
 887	uspi = UFS_SB(sb)->s_uspi;
 888
 889	frag1 = DIRECT_FRAGMENT;
 890	frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
 891	frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
 892	frag3 = frag4 & ~uspi->s_fpbmask;
 893	block1 = block2 = 0;
 894	if (frag2 > frag3) {
 895		frag2 = frag4;
 896		frag3 = frag4 = 0;
 897	} else if (frag2 < frag3) {
 898		block1 = ufs_fragstoblks (frag2);
 899		block2 = ufs_fragstoblks (frag3);
 900	}
 901
 902	UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
 903	     " frag3 %llu, frag4 %llu\n", inode->i_ino,
 904	     (unsigned long long)frag1, (unsigned long long)frag2,
 905	     (unsigned long long)block1, (unsigned long long)block2,
 906	     (unsigned long long)frag3, (unsigned long long)frag4);
 907
 908	if (frag1 >= frag2)
 909		goto next1;
 910
 911	/*
 912	 * Free first free fragments
 913	 */
 914	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1));
 915	tmp = ufs_data_ptr_to_cpu(sb, p);
 916	if (!tmp )
 917		ufs_panic (sb, "ufs_trunc_direct", "internal error");
 918	frag2 -= frag1;
 919	frag1 = ufs_fragnum (frag1);
 920
 921	ufs_free_fragments(inode, tmp + frag1, frag2);
 922
 923next1:
 924	/*
 925	 * Free whole blocks
 926	 */
 927	for (i = block1 ; i < block2; i++) {
 928		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
 929		tmp = ufs_data_ptr_to_cpu(sb, p);
 930		if (!tmp)
 931			continue;
 932		write_seqlock(&ufsi->meta_lock);
 933		ufs_data_ptr_clear(uspi, p);
 934		write_sequnlock(&ufsi->meta_lock);
 935
 936		free_data(&ctx, tmp, uspi->s_fpb);
 937	}
 938
 939	free_data(&ctx, 0, 0);
 940
 941	if (frag3 >= frag4)
 942		goto next3;
 943
 944	/*
 945	 * Free last free fragments
 946	 */
 947	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3));
 948	tmp = ufs_data_ptr_to_cpu(sb, p);
 949	if (!tmp )
 950		ufs_panic(sb, "ufs_truncate_direct", "internal error");
 951	frag4 = ufs_fragnum (frag4);
 952	write_seqlock(&ufsi->meta_lock);
 953	ufs_data_ptr_clear(uspi, p);
 954	write_sequnlock(&ufsi->meta_lock);
 955
 956	ufs_free_fragments (inode, tmp, frag4);
 957 next3:
 958
 959	UFSD("EXIT: ino %lu\n", inode->i_ino);
 960}
 961
 962static void free_full_branch(struct inode *inode, u64 ind_block, int depth)
 963{
 964	struct super_block *sb = inode->i_sb;
 965	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 966	struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize);
 967	unsigned i;
 968
 969	if (!ubh)
 970		return;
 971
 972	if (--depth) {
 973		for (i = 0; i < uspi->s_apb; i++) {
 974			void *p = ubh_get_data_ptr(uspi, ubh, i);
 975			u64 block = ufs_data_ptr_to_cpu(sb, p);
 976			if (block)
 977				free_full_branch(inode, block, depth);
 978		}
 979	} else {
 980		struct to_free ctx = {.inode = inode};
 981
 982		for (i = 0; i < uspi->s_apb; i++) {
 983			void *p = ubh_get_data_ptr(uspi, ubh, i);
 984			u64 block = ufs_data_ptr_to_cpu(sb, p);
 985			if (block)
 986				free_data(&ctx, block, uspi->s_fpb);
 987		}
 988		free_data(&ctx, 0, 0);
 989	}
 990
 991	ubh_bforget(ubh);
 992	ufs_free_blocks(inode, ind_block, uspi->s_fpb);
 993}
 994
 995static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth)
 996{
 997	struct super_block *sb = inode->i_sb;
 998	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 999	unsigned i;
1000
1001	if (--depth) {
1002		for (i = from; i < uspi->s_apb ; i++) {
1003			void *p = ubh_get_data_ptr(uspi, ubh, i);
1004			u64 block = ufs_data_ptr_to_cpu(sb, p);
1005			if (block) {
1006				write_seqlock(&UFS_I(inode)->meta_lock);
1007				ufs_data_ptr_clear(uspi, p);
1008				write_sequnlock(&UFS_I(inode)->meta_lock);
1009				ubh_mark_buffer_dirty(ubh);
1010				free_full_branch(inode, block, depth);
1011			}
1012		}
1013	} else {
1014		struct to_free ctx = {.inode = inode};
1015
1016		for (i = from; i < uspi->s_apb; i++) {
1017			void *p = ubh_get_data_ptr(uspi, ubh, i);
1018			u64 block = ufs_data_ptr_to_cpu(sb, p);
1019			if (block) {
1020				write_seqlock(&UFS_I(inode)->meta_lock);
1021				ufs_data_ptr_clear(uspi, p);
1022				write_sequnlock(&UFS_I(inode)->meta_lock);
1023				ubh_mark_buffer_dirty(ubh);
1024				free_data(&ctx, block, uspi->s_fpb);
1025			}
1026		}
1027		free_data(&ctx, 0, 0);
1028	}
1029	if (IS_SYNC(inode) && ubh_buffer_dirty(ubh))
1030		ubh_sync_block(ubh);
1031	ubh_brelse(ubh);
1032}
1033
1034static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
1035{
1036	int err = 0;
1037	struct super_block *sb = inode->i_sb;
1038	struct address_space *mapping = inode->i_mapping;
1039	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1040	unsigned i, end;
1041	sector_t lastfrag;
1042	struct page *lastpage;
1043	struct buffer_head *bh;
1044	u64 phys64;
1045
1046	lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
1047
1048	if (!lastfrag)
1049		goto out;
1050
1051	lastfrag--;
1052
1053	lastpage = ufs_get_locked_page(mapping, lastfrag >>
1054				       (PAGE_SHIFT - inode->i_blkbits));
1055       if (IS_ERR(lastpage)) {
1056               err = -EIO;
1057               goto out;
1058       }
1059
1060       end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1);
1061       bh = page_buffers(lastpage);
1062       for (i = 0; i < end; ++i)
1063               bh = bh->b_this_page;
1064
1065
1066       err = ufs_getfrag_block(inode, lastfrag, bh, 1);
1067
1068       if (unlikely(err))
1069	       goto out_unlock;
1070
1071       if (buffer_new(bh)) {
1072	       clear_buffer_new(bh);
1073	       clean_bdev_bh_alias(bh);
1074	       /*
1075		* we do not zeroize fragment, because of
1076		* if it maped to hole, it already contains zeroes
1077		*/
1078	       set_buffer_uptodate(bh);
1079	       mark_buffer_dirty(bh);
1080	       set_page_dirty(lastpage);
1081       }
1082
1083       if (lastfrag >= UFS_IND_FRAGMENT) {
1084	       end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
1085	       phys64 = bh->b_blocknr + 1;
1086	       for (i = 0; i < end; ++i) {
1087		       bh = sb_getblk(sb, i + phys64);
1088		       lock_buffer(bh);
1089		       memset(bh->b_data, 0, sb->s_blocksize);
1090		       set_buffer_uptodate(bh);
1091		       mark_buffer_dirty(bh);
1092		       unlock_buffer(bh);
1093		       sync_dirty_buffer(bh);
1094		       brelse(bh);
1095	       }
1096       }
1097out_unlock:
1098       ufs_put_locked_page(lastpage);
1099out:
1100       return err;
1101}
1102
1103static void __ufs_truncate_blocks(struct inode *inode)
1104{
1105	struct ufs_inode_info *ufsi = UFS_I(inode);
1106	struct super_block *sb = inode->i_sb;
1107	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1108	unsigned offsets[4];
1109	int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets);
1110	int depth2;
1111	unsigned i;
1112	struct ufs_buffer_head *ubh[3];
1113	void *p;
1114	u64 block;
1115
1116	if (!depth)
1117		return;
 
 
 
 
 
 
1118
1119	/* find the last non-zero in offsets[] */
1120	for (depth2 = depth - 1; depth2; depth2--)
1121		if (offsets[depth2])
1122			break;
1123
1124	mutex_lock(&ufsi->truncate_mutex);
1125	if (depth == 1) {
1126		ufs_trunc_direct(inode);
1127		offsets[0] = UFS_IND_BLOCK;
1128	} else {
1129		/* get the blocks that should be partially emptied */
1130		p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]);
1131		for (i = 0; i < depth2; i++) {
1132			offsets[i]++;	/* next branch is fully freed */
1133			block = ufs_data_ptr_to_cpu(sb, p);
1134			if (!block)
1135				break;
1136			ubh[i] = ubh_bread(sb, block, uspi->s_bsize);
1137			if (!ubh[i]) {
1138				write_seqlock(&ufsi->meta_lock);
1139				ufs_data_ptr_clear(uspi, p);
1140				write_sequnlock(&ufsi->meta_lock);
1141				break;
1142			}
1143			p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]);
1144		}
1145		while (i--)
1146			free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
1147	}
1148	for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) {
1149		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
1150		block = ufs_data_ptr_to_cpu(sb, p);
1151		if (block) {
1152			write_seqlock(&ufsi->meta_lock);
1153			ufs_data_ptr_clear(uspi, p);
1154			write_sequnlock(&ufsi->meta_lock);
1155			free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
1156		}
1157	}
 
1158	ufsi->i_lastfrag = DIRECT_FRAGMENT;
 
1159	mark_inode_dirty(inode);
1160	mutex_unlock(&ufsi->truncate_mutex);
1161}
1162
1163static int ufs_truncate(struct inode *inode, loff_t size)
1164{
1165	int err = 0;
1166
1167	UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
1168	     inode->i_ino, (unsigned long long)size,
1169	     (unsigned long long)i_size_read(inode));
1170
1171	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1172	      S_ISLNK(inode->i_mode)))
1173		return -EINVAL;
1174	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1175		return -EPERM;
1176
1177	err = ufs_alloc_lastblock(inode, size);
1178
1179	if (err)
1180		goto out;
1181
1182	block_truncate_page(inode->i_mapping, size, ufs_getfrag_block);
1183
1184	truncate_setsize(inode, size);
1185
1186	__ufs_truncate_blocks(inode);
1187	inode->i_mtime = inode->i_ctime = current_time(inode);
1188	mark_inode_dirty(inode);
1189out:
1190	UFSD("EXIT: err %d\n", err);
1191	return err;
1192}
1193
1194static void ufs_truncate_blocks(struct inode *inode)
1195{
1196	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1197	      S_ISLNK(inode->i_mode)))
1198		return;
1199	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1200		return;
1201	__ufs_truncate_blocks(inode);
1202}
1203
1204int ufs_setattr(struct dentry *dentry, struct iattr *attr)
1205{
1206	struct inode *inode = d_inode(dentry);
1207	unsigned int ia_valid = attr->ia_valid;
1208	int error;
1209
1210	error = setattr_prepare(dentry, attr);
1211	if (error)
1212		return error;
1213
1214	if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
1215		error = ufs_truncate(inode, attr->ia_size);
1216		if (error)
1217			return error;
1218	}
1219
1220	setattr_copy(inode, attr);
1221	mark_inode_dirty(inode);
1222	return 0;
1223}
1224
1225const struct inode_operations ufs_file_inode_operations = {
1226	.setattr = ufs_setattr,
1227};
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/fs/ufs/inode.c
   4 *
   5 * Copyright (C) 1998
   6 * Daniel Pirkl <daniel.pirkl@email.cz>
   7 * Charles University, Faculty of Mathematics and Physics
   8 *
   9 *  from
  10 *
  11 *  linux/fs/ext2/inode.c
  12 *
  13 * Copyright (C) 1992, 1993, 1994, 1995
  14 * Remy Card (card@masi.ibp.fr)
  15 * Laboratoire MASI - Institut Blaise Pascal
  16 * Universite Pierre et Marie Curie (Paris VI)
  17 *
  18 *  from
  19 *
  20 *  linux/fs/minix/inode.c
  21 *
  22 *  Copyright (C) 1991, 1992  Linus Torvalds
  23 *
  24 *  Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
  25 *  Big-endian to little-endian byte-swapping/bitmaps by
  26 *        David S. Miller (davem@caip.rutgers.edu), 1995
  27 */
  28
  29#include <linux/uaccess.h>
  30
  31#include <linux/errno.h>
  32#include <linux/fs.h>
  33#include <linux/time.h>
  34#include <linux/stat.h>
  35#include <linux/string.h>
  36#include <linux/mm.h>
  37#include <linux/buffer_head.h>
  38#include <linux/writeback.h>
  39#include <linux/iversion.h>
  40
  41#include "ufs_fs.h"
  42#include "ufs.h"
  43#include "swab.h"
  44#include "util.h"
  45
  46static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4])
  47{
  48	struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
  49	int ptrs = uspi->s_apb;
  50	int ptrs_bits = uspi->s_apbshift;
  51	const long direct_blocks = UFS_NDADDR,
  52		indirect_blocks = ptrs,
  53		double_blocks = (1 << (ptrs_bits * 2));
  54	int n = 0;
  55
  56
  57	UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
  58	if (i_block < direct_blocks) {
  59		offsets[n++] = i_block;
  60	} else if ((i_block -= direct_blocks) < indirect_blocks) {
  61		offsets[n++] = UFS_IND_BLOCK;
  62		offsets[n++] = i_block;
  63	} else if ((i_block -= indirect_blocks) < double_blocks) {
  64		offsets[n++] = UFS_DIND_BLOCK;
  65		offsets[n++] = i_block >> ptrs_bits;
  66		offsets[n++] = i_block & (ptrs - 1);
  67	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
  68		offsets[n++] = UFS_TIND_BLOCK;
  69		offsets[n++] = i_block >> (ptrs_bits * 2);
  70		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
  71		offsets[n++] = i_block & (ptrs - 1);
  72	} else {
  73		ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
  74	}
  75	return n;
  76}
  77
  78typedef struct {
  79	void	*p;
  80	union {
  81		__fs32	key32;
  82		__fs64	key64;
  83	};
  84	struct buffer_head *bh;
  85} Indirect;
  86
  87static inline int grow_chain32(struct ufs_inode_info *ufsi,
  88			       struct buffer_head *bh, __fs32 *v,
  89			       Indirect *from, Indirect *to)
  90{
  91	Indirect *p;
  92	unsigned seq;
  93	to->bh = bh;
  94	do {
  95		seq = read_seqbegin(&ufsi->meta_lock);
  96		to->key32 = *(__fs32 *)(to->p = v);
  97		for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++)
  98			;
  99	} while (read_seqretry(&ufsi->meta_lock, seq));
 100	return (p > to);
 101}
 102
 103static inline int grow_chain64(struct ufs_inode_info *ufsi,
 104			       struct buffer_head *bh, __fs64 *v,
 105			       Indirect *from, Indirect *to)
 106{
 107	Indirect *p;
 108	unsigned seq;
 109	to->bh = bh;
 110	do {
 111		seq = read_seqbegin(&ufsi->meta_lock);
 112		to->key64 = *(__fs64 *)(to->p = v);
 113		for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++)
 114			;
 115	} while (read_seqretry(&ufsi->meta_lock, seq));
 116	return (p > to);
 117}
 118
 119/*
 120 * Returns the location of the fragment from
 121 * the beginning of the filesystem.
 122 */
 123
 124static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth)
 125{
 126	struct ufs_inode_info *ufsi = UFS_I(inode);
 127	struct super_block *sb = inode->i_sb;
 128	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 129	u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
 130	int shift = uspi->s_apbshift-uspi->s_fpbshift;
 131	Indirect chain[4], *q = chain;
 132	unsigned *p;
 133	unsigned flags = UFS_SB(sb)->s_flags;
 134	u64 res = 0;
 135
 136	UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
 137		uspi->s_fpbshift, uspi->s_apbmask,
 138		(unsigned long long)mask);
 139
 140	if (depth == 0)
 141		goto no_block;
 142
 143again:
 144	p = offsets;
 145
 146	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
 147		goto ufs2;
 148
 149	if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q))
 150		goto changed;
 151	if (!q->key32)
 152		goto no_block;
 153	while (--depth) {
 154		__fs32 *ptr;
 155		struct buffer_head *bh;
 156		unsigned n = *p++;
 157
 158		bh = sb_bread(sb, uspi->s_sbbase +
 159				  fs32_to_cpu(sb, q->key32) + (n>>shift));
 160		if (!bh)
 161			goto no_block;
 162		ptr = (__fs32 *)bh->b_data + (n & mask);
 163		if (!grow_chain32(ufsi, bh, ptr, chain, ++q))
 164			goto changed;
 165		if (!q->key32)
 166			goto no_block;
 167	}
 168	res = fs32_to_cpu(sb, q->key32);
 169	goto found;
 170
 171ufs2:
 172	if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q))
 173		goto changed;
 174	if (!q->key64)
 175		goto no_block;
 176
 177	while (--depth) {
 178		__fs64 *ptr;
 179		struct buffer_head *bh;
 180		unsigned n = *p++;
 181
 182		bh = sb_bread(sb, uspi->s_sbbase +
 183				  fs64_to_cpu(sb, q->key64) + (n>>shift));
 184		if (!bh)
 185			goto no_block;
 186		ptr = (__fs64 *)bh->b_data + (n & mask);
 187		if (!grow_chain64(ufsi, bh, ptr, chain, ++q))
 188			goto changed;
 189		if (!q->key64)
 190			goto no_block;
 191	}
 192	res = fs64_to_cpu(sb, q->key64);
 193found:
 194	res += uspi->s_sbbase;
 195no_block:
 196	while (q > chain) {
 197		brelse(q->bh);
 198		q--;
 199	}
 200	return res;
 201
 202changed:
 203	while (q > chain) {
 204		brelse(q->bh);
 205		q--;
 206	}
 207	goto again;
 208}
 209
 210/*
 211 * Unpacking tails: we have a file with partial final block and
 212 * we had been asked to extend it.  If the fragment being written
 213 * is within the same block, we need to extend the tail just to cover
 214 * that fragment.  Otherwise the tail is extended to full block.
 215 *
 216 * Note that we might need to create a _new_ tail, but that will
 217 * be handled elsewhere; this is strictly for resizing old
 218 * ones.
 219 */
 220static bool
 221ufs_extend_tail(struct inode *inode, u64 writes_to,
 222		  int *err, struct page *locked_page)
 223{
 224	struct ufs_inode_info *ufsi = UFS_I(inode);
 225	struct super_block *sb = inode->i_sb;
 226	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 227	unsigned lastfrag = ufsi->i_lastfrag;	/* it's a short file, so unsigned is enough */
 228	unsigned block = ufs_fragstoblks(lastfrag);
 229	unsigned new_size;
 230	void *p;
 231	u64 tmp;
 232
 233	if (writes_to < (lastfrag | uspi->s_fpbmask))
 234		new_size = (writes_to & uspi->s_fpbmask) + 1;
 235	else
 236		new_size = uspi->s_fpb;
 237
 238	p = ufs_get_direct_data_ptr(uspi, ufsi, block);
 239	tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
 240				new_size - (lastfrag & uspi->s_fpbmask), err,
 241				locked_page);
 242	return tmp != 0;
 243}
 244
 245/**
 246 * ufs_inode_getfrag() - allocate new fragment(s)
 247 * @inode: pointer to inode
 248 * @index: number of block pointer within the inode's array.
 249 * @new_fragment: number of new allocated fragment(s)
 250 * @err: we set it if something wrong
 251 * @new: we set it if we allocate new block
 252 * @locked_page: for ufs_new_fragments()
 253 */
 254static u64
 255ufs_inode_getfrag(struct inode *inode, unsigned index,
 256		  sector_t new_fragment, int *err,
 257		  int *new, struct page *locked_page)
 258{
 259	struct ufs_inode_info *ufsi = UFS_I(inode);
 260	struct super_block *sb = inode->i_sb;
 261	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 262	u64 tmp, goal, lastfrag;
 263	unsigned nfrags = uspi->s_fpb;
 264	void *p;
 265
 266        /* TODO : to be done for write support
 267        if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
 268             goto ufs2;
 269         */
 270
 271	p = ufs_get_direct_data_ptr(uspi, ufsi, index);
 272	tmp = ufs_data_ptr_to_cpu(sb, p);
 273	if (tmp)
 274		goto out;
 275
 276	lastfrag = ufsi->i_lastfrag;
 277
 278	/* will that be a new tail? */
 279	if (new_fragment < UFS_NDIR_FRAGMENT && new_fragment >= lastfrag)
 280		nfrags = (new_fragment & uspi->s_fpbmask) + 1;
 281
 282	goal = 0;
 283	if (index) {
 284		goal = ufs_data_ptr_to_cpu(sb,
 285				 ufs_get_direct_data_ptr(uspi, ufsi, index - 1));
 286		if (goal)
 287			goal += uspi->s_fpb;
 288	}
 289	tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
 290				goal, nfrags, err, locked_page);
 291
 292	if (!tmp) {
 293		*err = -ENOSPC;
 294		return 0;
 295	}
 296
 297	if (new)
 298		*new = 1;
 299	inode->i_ctime = current_time(inode);
 300	if (IS_SYNC(inode))
 301		ufs_sync_inode (inode);
 302	mark_inode_dirty(inode);
 303out:
 304	return tmp + uspi->s_sbbase;
 305
 306     /* This part : To be implemented ....
 307        Required only for writing, not required for READ-ONLY.
 308ufs2:
 309
 310	u2_block = ufs_fragstoblks(fragment);
 311	u2_blockoff = ufs_fragnum(fragment);
 312	p = ufsi->i_u1.u2_i_data + block;
 313	goal = 0;
 314
 315repeat2:
 316	tmp = fs32_to_cpu(sb, *p);
 317	lastfrag = ufsi->i_lastfrag;
 318
 319     */
 320}
 321
 322/**
 323 * ufs_inode_getblock() - allocate new block
 324 * @inode: pointer to inode
 325 * @ind_block: block number of the indirect block
 326 * @index: number of pointer within the indirect block
 327 * @new_fragment: number of new allocated fragment
 328 *  (block will hold this fragment and also uspi->s_fpb-1)
 329 * @err: see ufs_inode_getfrag()
 330 * @new: see ufs_inode_getfrag()
 331 * @locked_page: see ufs_inode_getfrag()
 332 */
 333static u64
 334ufs_inode_getblock(struct inode *inode, u64 ind_block,
 335		  unsigned index, sector_t new_fragment, int *err,
 336		  int *new, struct page *locked_page)
 337{
 338	struct super_block *sb = inode->i_sb;
 339	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 340	int shift = uspi->s_apbshift - uspi->s_fpbshift;
 341	u64 tmp = 0, goal;
 342	struct buffer_head *bh;
 343	void *p;
 344
 345	if (!ind_block)
 346		return 0;
 347
 348	bh = sb_bread(sb, ind_block + (index >> shift));
 349	if (unlikely(!bh)) {
 350		*err = -EIO;
 351		return 0;
 352	}
 353
 354	index &= uspi->s_apbmask >> uspi->s_fpbshift;
 355	if (uspi->fs_magic == UFS2_MAGIC)
 356		p = (__fs64 *)bh->b_data + index;
 357	else
 358		p = (__fs32 *)bh->b_data + index;
 359
 360	tmp = ufs_data_ptr_to_cpu(sb, p);
 361	if (tmp)
 362		goto out;
 363
 364	if (index && (uspi->fs_magic == UFS2_MAGIC ?
 365		      (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[index-1])) :
 366		      (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[index-1]))))
 367		goal = tmp + uspi->s_fpb;
 368	else
 369		goal = bh->b_blocknr + uspi->s_fpb;
 370	tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
 371				uspi->s_fpb, err, locked_page);
 372	if (!tmp)
 373		goto out;
 374
 375	if (new)
 376		*new = 1;
 377
 378	mark_buffer_dirty(bh);
 379	if (IS_SYNC(inode))
 380		sync_dirty_buffer(bh);
 381	inode->i_ctime = current_time(inode);
 382	mark_inode_dirty(inode);
 383out:
 384	brelse (bh);
 385	UFSD("EXIT\n");
 386	if (tmp)
 387		tmp += uspi->s_sbbase;
 388	return tmp;
 389}
 390
 391/**
 392 * ufs_getfrag_block() - `get_block_t' function, interface between UFS and
 393 * read_folio, writepage and so on
 394 */
 395
 396static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
 397{
 398	struct super_block *sb = inode->i_sb;
 399	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 400	int err = 0, new = 0;
 401	unsigned offsets[4];
 402	int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets);
 403	u64 phys64 = 0;
 404	unsigned frag = fragment & uspi->s_fpbmask;
 405
 406	phys64 = ufs_frag_map(inode, offsets, depth);
 407	if (!create)
 408		goto done;
 
 409
 410	if (phys64) {
 411		if (fragment >= UFS_NDIR_FRAGMENT)
 412			goto done;
 413		read_seqlock_excl(&UFS_I(inode)->meta_lock);
 414		if (fragment < UFS_I(inode)->i_lastfrag) {
 415			read_sequnlock_excl(&UFS_I(inode)->meta_lock);
 416			goto done;
 417		}
 418		read_sequnlock_excl(&UFS_I(inode)->meta_lock);
 419	}
 420        /* This code entered only while writing ....? */
 421
 422	mutex_lock(&UFS_I(inode)->truncate_mutex);
 423
 424	UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
 425	if (unlikely(!depth)) {
 426		ufs_warning(sb, "ufs_get_block", "block > big");
 427		err = -EIO;
 428		goto out;
 429	}
 430
 431	if (UFS_I(inode)->i_lastfrag < UFS_NDIR_FRAGMENT) {
 432		unsigned lastfrag = UFS_I(inode)->i_lastfrag;
 433		unsigned tailfrags = lastfrag & uspi->s_fpbmask;
 434		if (tailfrags && fragment >= lastfrag) {
 435			if (!ufs_extend_tail(inode, fragment,
 436					     &err, bh_result->b_page))
 437				goto out;
 438		}
 439	}
 440
 441	if (depth == 1) {
 442		phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
 443					   &err, &new, bh_result->b_page);
 444	} else {
 445		int i;
 446		phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
 447					   &err, NULL, NULL);
 448		for (i = 1; i < depth - 1; i++)
 449			phys64 = ufs_inode_getblock(inode, phys64, offsets[i],
 450						fragment, &err, NULL, NULL);
 451		phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 1],
 452					fragment, &err, &new, bh_result->b_page);
 453	}
 454out:
 455	if (phys64) {
 456		phys64 += frag;
 457		map_bh(bh_result, sb, phys64);
 458		if (new)
 459			set_buffer_new(bh_result);
 460	}
 461	mutex_unlock(&UFS_I(inode)->truncate_mutex);
 462	return err;
 463
 464done:
 465	if (phys64)
 466		map_bh(bh_result, sb, phys64 + frag);
 467	return 0;
 468}
 469
 470static int ufs_writepage(struct page *page, struct writeback_control *wbc)
 471{
 472	return block_write_full_page(page,ufs_getfrag_block,wbc);
 473}
 474
 475static int ufs_read_folio(struct file *file, struct folio *folio)
 476{
 477	return block_read_full_folio(folio, ufs_getfrag_block);
 478}
 479
 480int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
 481{
 482	return __block_write_begin(page, pos, len, ufs_getfrag_block);
 483}
 484
 485static void ufs_truncate_blocks(struct inode *);
 486
 487static void ufs_write_failed(struct address_space *mapping, loff_t to)
 488{
 489	struct inode *inode = mapping->host;
 490
 491	if (to > inode->i_size) {
 492		truncate_pagecache(inode, inode->i_size);
 493		ufs_truncate_blocks(inode);
 494	}
 495}
 496
 497static int ufs_write_begin(struct file *file, struct address_space *mapping,
 498			loff_t pos, unsigned len,
 499			struct page **pagep, void **fsdata)
 500{
 501	int ret;
 502
 503	ret = block_write_begin(mapping, pos, len, pagep, ufs_getfrag_block);
 
 504	if (unlikely(ret))
 505		ufs_write_failed(mapping, pos + len);
 506
 507	return ret;
 508}
 509
 510static int ufs_write_end(struct file *file, struct address_space *mapping,
 511			loff_t pos, unsigned len, unsigned copied,
 512			struct page *page, void *fsdata)
 513{
 514	int ret;
 515
 516	ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
 517	if (ret < len)
 518		ufs_write_failed(mapping, pos + len);
 519	return ret;
 520}
 521
 522static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
 523{
 524	return generic_block_bmap(mapping,block,ufs_getfrag_block);
 525}
 526
 527const struct address_space_operations ufs_aops = {
 528	.dirty_folio = block_dirty_folio,
 529	.invalidate_folio = block_invalidate_folio,
 530	.read_folio = ufs_read_folio,
 531	.writepage = ufs_writepage,
 532	.write_begin = ufs_write_begin,
 533	.write_end = ufs_write_end,
 534	.bmap = ufs_bmap
 535};
 536
 537static void ufs_set_inode_ops(struct inode *inode)
 538{
 539	if (S_ISREG(inode->i_mode)) {
 540		inode->i_op = &ufs_file_inode_operations;
 541		inode->i_fop = &ufs_file_operations;
 542		inode->i_mapping->a_ops = &ufs_aops;
 543	} else if (S_ISDIR(inode->i_mode)) {
 544		inode->i_op = &ufs_dir_inode_operations;
 545		inode->i_fop = &ufs_dir_operations;
 546		inode->i_mapping->a_ops = &ufs_aops;
 547	} else if (S_ISLNK(inode->i_mode)) {
 548		if (!inode->i_blocks) {
 549			inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink;
 550			inode->i_op = &simple_symlink_inode_operations;
 551		} else {
 552			inode->i_mapping->a_ops = &ufs_aops;
 553			inode->i_op = &page_symlink_inode_operations;
 554			inode_nohighmem(inode);
 555		}
 556	} else
 557		init_special_inode(inode, inode->i_mode,
 558				   ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
 559}
 560
 561static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
 562{
 563	struct ufs_inode_info *ufsi = UFS_I(inode);
 564	struct super_block *sb = inode->i_sb;
 565	umode_t mode;
 566
 567	/*
 568	 * Copy data to the in-core inode.
 569	 */
 570	inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
 571	set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
 572	if (inode->i_nlink == 0)
 573		return -ESTALE;
 
 
 574
 575	/*
 576	 * Linux now has 32-bit uid and gid, so we can support EFT.
 577	 */
 578	i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode));
 579	i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
 580
 581	inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
 582	inode->i_atime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
 583	inode->i_ctime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
 584	inode->i_mtime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
 585	inode->i_mtime.tv_nsec = 0;
 586	inode->i_atime.tv_nsec = 0;
 587	inode->i_ctime.tv_nsec = 0;
 588	inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
 589	inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
 590	ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
 591	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
 592	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
 593
 594
 595	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
 596		memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
 597		       sizeof(ufs_inode->ui_u2.ui_addr));
 598	} else {
 599		memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
 600		       sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
 601		ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
 602	}
 603	return 0;
 604}
 605
 606static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
 607{
 608	struct ufs_inode_info *ufsi = UFS_I(inode);
 609	struct super_block *sb = inode->i_sb;
 610	umode_t mode;
 611
 612	UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
 613	/*
 614	 * Copy data to the in-core inode.
 615	 */
 616	inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
 617	set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
 618	if (inode->i_nlink == 0)
 619		return -ESTALE;
 
 
 620
 621        /*
 622         * Linux now has 32-bit uid and gid, so we can support EFT.
 623         */
 624	i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid));
 625	i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid));
 626
 627	inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
 628	inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime);
 629	inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime);
 630	inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime);
 631	inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec);
 632	inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec);
 633	inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec);
 634	inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
 635	inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
 636	ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
 637	/*
 638	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
 639	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
 640	*/
 641
 642	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
 643		memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
 644		       sizeof(ufs2_inode->ui_u2.ui_addr));
 645	} else {
 646		memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
 647		       sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
 648		ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
 649	}
 650	return 0;
 651}
 652
 653struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
 654{
 655	struct ufs_inode_info *ufsi;
 656	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 657	struct buffer_head * bh;
 658	struct inode *inode;
 659	int err = -EIO;
 660
 661	UFSD("ENTER, ino %lu\n", ino);
 662
 663	if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) {
 664		ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
 665			    ino);
 666		return ERR_PTR(-EIO);
 667	}
 668
 669	inode = iget_locked(sb, ino);
 670	if (!inode)
 671		return ERR_PTR(-ENOMEM);
 672	if (!(inode->i_state & I_NEW))
 673		return inode;
 674
 675	ufsi = UFS_I(inode);
 676
 677	bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
 678	if (!bh) {
 679		ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
 680			    inode->i_ino);
 681		goto bad_inode;
 682	}
 683	if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
 684		struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
 685
 686		err = ufs2_read_inode(inode,
 687				      ufs2_inode + ufs_inotofsbo(inode->i_ino));
 688	} else {
 689		struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
 690
 691		err = ufs1_read_inode(inode,
 692				      ufs_inode + ufs_inotofsbo(inode->i_ino));
 693	}
 694	brelse(bh);
 695	if (err)
 696		goto bad_inode;
 697
 698	inode_inc_iversion(inode);
 699	ufsi->i_lastfrag =
 700		(inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
 701	ufsi->i_dir_start_lookup = 0;
 702	ufsi->i_osync = 0;
 703
 704	ufs_set_inode_ops(inode);
 705
 
 
 706	UFSD("EXIT\n");
 707	unlock_new_inode(inode);
 708	return inode;
 709
 710bad_inode:
 711	iget_failed(inode);
 712	return ERR_PTR(err);
 713}
 714
 715static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
 716{
 717	struct super_block *sb = inode->i_sb;
 718 	struct ufs_inode_info *ufsi = UFS_I(inode);
 719
 720	ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
 721	ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
 722
 723	ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode));
 724	ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
 725
 726	ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
 727	ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
 728	ufs_inode->ui_atime.tv_usec = 0;
 729	ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec);
 730	ufs_inode->ui_ctime.tv_usec = 0;
 731	ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
 732	ufs_inode->ui_mtime.tv_usec = 0;
 733	ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
 734	ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
 735	ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
 736
 737	if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) {
 738		ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
 739		ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
 740	}
 741
 742	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
 743		/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
 744		ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
 745	} else if (inode->i_blocks) {
 746		memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
 747		       sizeof(ufs_inode->ui_u2.ui_addr));
 748	}
 749	else {
 750		memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
 751		       sizeof(ufs_inode->ui_u2.ui_symlink));
 752	}
 753
 754	if (!inode->i_nlink)
 755		memset (ufs_inode, 0, sizeof(struct ufs_inode));
 756}
 757
 758static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
 759{
 760	struct super_block *sb = inode->i_sb;
 761 	struct ufs_inode_info *ufsi = UFS_I(inode);
 762
 763	UFSD("ENTER\n");
 764	ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
 765	ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
 766
 767	ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode));
 768	ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode));
 769
 770	ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
 771	ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
 772	ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec);
 773	ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec);
 774	ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec);
 775	ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec);
 776	ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec);
 777
 778	ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
 779	ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
 780	ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
 781
 782	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
 783		/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
 784		ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
 785	} else if (inode->i_blocks) {
 786		memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
 787		       sizeof(ufs_inode->ui_u2.ui_addr));
 788	} else {
 789		memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
 790		       sizeof(ufs_inode->ui_u2.ui_symlink));
 791 	}
 792
 793	if (!inode->i_nlink)
 794		memset (ufs_inode, 0, sizeof(struct ufs2_inode));
 795	UFSD("EXIT\n");
 796}
 797
 798static int ufs_update_inode(struct inode * inode, int do_sync)
 799{
 800	struct super_block *sb = inode->i_sb;
 801	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 802	struct buffer_head * bh;
 803
 804	UFSD("ENTER, ino %lu\n", inode->i_ino);
 805
 806	if (inode->i_ino < UFS_ROOTINO ||
 807	    inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
 808		ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
 809		return -1;
 810	}
 811
 812	bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
 813	if (!bh) {
 814		ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
 815		return -1;
 816	}
 817	if (uspi->fs_magic == UFS2_MAGIC) {
 818		struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
 819
 820		ufs2_update_inode(inode,
 821				  ufs2_inode + ufs_inotofsbo(inode->i_ino));
 822	} else {
 823		struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data;
 824
 825		ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
 826	}
 827
 828	mark_buffer_dirty(bh);
 829	if (do_sync)
 830		sync_dirty_buffer(bh);
 831	brelse (bh);
 832
 833	UFSD("EXIT\n");
 834	return 0;
 835}
 836
 837int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
 838{
 839	return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
 840}
 841
 842int ufs_sync_inode (struct inode *inode)
 843{
 844	return ufs_update_inode (inode, 1);
 845}
 846
 847void ufs_evict_inode(struct inode * inode)
 848{
 849	int want_delete = 0;
 850
 851	if (!inode->i_nlink && !is_bad_inode(inode))
 852		want_delete = 1;
 853
 854	truncate_inode_pages_final(&inode->i_data);
 855	if (want_delete) {
 856		inode->i_size = 0;
 857		if (inode->i_blocks &&
 858		    (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 859		     S_ISLNK(inode->i_mode)))
 860			ufs_truncate_blocks(inode);
 861		ufs_update_inode(inode, inode_needs_sync(inode));
 862	}
 863
 864	invalidate_inode_buffers(inode);
 865	clear_inode(inode);
 866
 867	if (want_delete)
 868		ufs_free_inode(inode);
 869}
 870
 871struct to_free {
 872	struct inode *inode;
 873	u64 to;
 874	unsigned count;
 875};
 876
 877static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
 878{
 879	if (ctx->count && ctx->to != from) {
 880		ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count);
 881		ctx->count = 0;
 882	}
 883	ctx->count += count;
 884	ctx->to = from + count;
 885}
 886
 
 887#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
 888
 889static void ufs_trunc_direct(struct inode *inode)
 890{
 891	struct ufs_inode_info *ufsi = UFS_I(inode);
 892	struct super_block * sb;
 893	struct ufs_sb_private_info * uspi;
 894	void *p;
 895	u64 frag1, frag2, frag3, frag4, block1, block2;
 896	struct to_free ctx = {.inode = inode};
 897	unsigned i, tmp;
 898
 899	UFSD("ENTER: ino %lu\n", inode->i_ino);
 900
 901	sb = inode->i_sb;
 902	uspi = UFS_SB(sb)->s_uspi;
 903
 904	frag1 = DIRECT_FRAGMENT;
 905	frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
 906	frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
 907	frag3 = frag4 & ~uspi->s_fpbmask;
 908	block1 = block2 = 0;
 909	if (frag2 > frag3) {
 910		frag2 = frag4;
 911		frag3 = frag4 = 0;
 912	} else if (frag2 < frag3) {
 913		block1 = ufs_fragstoblks (frag2);
 914		block2 = ufs_fragstoblks (frag3);
 915	}
 916
 917	UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
 918	     " frag3 %llu, frag4 %llu\n", inode->i_ino,
 919	     (unsigned long long)frag1, (unsigned long long)frag2,
 920	     (unsigned long long)block1, (unsigned long long)block2,
 921	     (unsigned long long)frag3, (unsigned long long)frag4);
 922
 923	if (frag1 >= frag2)
 924		goto next1;
 925
 926	/*
 927	 * Free first free fragments
 928	 */
 929	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1));
 930	tmp = ufs_data_ptr_to_cpu(sb, p);
 931	if (!tmp )
 932		ufs_panic (sb, "ufs_trunc_direct", "internal error");
 933	frag2 -= frag1;
 934	frag1 = ufs_fragnum (frag1);
 935
 936	ufs_free_fragments(inode, tmp + frag1, frag2);
 937
 938next1:
 939	/*
 940	 * Free whole blocks
 941	 */
 942	for (i = block1 ; i < block2; i++) {
 943		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
 944		tmp = ufs_data_ptr_to_cpu(sb, p);
 945		if (!tmp)
 946			continue;
 947		write_seqlock(&ufsi->meta_lock);
 948		ufs_data_ptr_clear(uspi, p);
 949		write_sequnlock(&ufsi->meta_lock);
 950
 951		free_data(&ctx, tmp, uspi->s_fpb);
 952	}
 953
 954	free_data(&ctx, 0, 0);
 955
 956	if (frag3 >= frag4)
 957		goto next3;
 958
 959	/*
 960	 * Free last free fragments
 961	 */
 962	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3));
 963	tmp = ufs_data_ptr_to_cpu(sb, p);
 964	if (!tmp )
 965		ufs_panic(sb, "ufs_truncate_direct", "internal error");
 966	frag4 = ufs_fragnum (frag4);
 967	write_seqlock(&ufsi->meta_lock);
 968	ufs_data_ptr_clear(uspi, p);
 969	write_sequnlock(&ufsi->meta_lock);
 970
 971	ufs_free_fragments (inode, tmp, frag4);
 972 next3:
 973
 974	UFSD("EXIT: ino %lu\n", inode->i_ino);
 975}
 976
 977static void free_full_branch(struct inode *inode, u64 ind_block, int depth)
 978{
 979	struct super_block *sb = inode->i_sb;
 980	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 981	struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize);
 982	unsigned i;
 983
 984	if (!ubh)
 985		return;
 986
 987	if (--depth) {
 988		for (i = 0; i < uspi->s_apb; i++) {
 989			void *p = ubh_get_data_ptr(uspi, ubh, i);
 990			u64 block = ufs_data_ptr_to_cpu(sb, p);
 991			if (block)
 992				free_full_branch(inode, block, depth);
 993		}
 994	} else {
 995		struct to_free ctx = {.inode = inode};
 996
 997		for (i = 0; i < uspi->s_apb; i++) {
 998			void *p = ubh_get_data_ptr(uspi, ubh, i);
 999			u64 block = ufs_data_ptr_to_cpu(sb, p);
1000			if (block)
1001				free_data(&ctx, block, uspi->s_fpb);
1002		}
1003		free_data(&ctx, 0, 0);
1004	}
1005
1006	ubh_bforget(ubh);
1007	ufs_free_blocks(inode, ind_block, uspi->s_fpb);
1008}
1009
1010static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth)
1011{
1012	struct super_block *sb = inode->i_sb;
1013	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1014	unsigned i;
1015
1016	if (--depth) {
1017		for (i = from; i < uspi->s_apb ; i++) {
1018			void *p = ubh_get_data_ptr(uspi, ubh, i);
1019			u64 block = ufs_data_ptr_to_cpu(sb, p);
1020			if (block) {
1021				write_seqlock(&UFS_I(inode)->meta_lock);
1022				ufs_data_ptr_clear(uspi, p);
1023				write_sequnlock(&UFS_I(inode)->meta_lock);
1024				ubh_mark_buffer_dirty(ubh);
1025				free_full_branch(inode, block, depth);
1026			}
1027		}
1028	} else {
1029		struct to_free ctx = {.inode = inode};
1030
1031		for (i = from; i < uspi->s_apb; i++) {
1032			void *p = ubh_get_data_ptr(uspi, ubh, i);
1033			u64 block = ufs_data_ptr_to_cpu(sb, p);
1034			if (block) {
1035				write_seqlock(&UFS_I(inode)->meta_lock);
1036				ufs_data_ptr_clear(uspi, p);
1037				write_sequnlock(&UFS_I(inode)->meta_lock);
1038				ubh_mark_buffer_dirty(ubh);
1039				free_data(&ctx, block, uspi->s_fpb);
1040			}
1041		}
1042		free_data(&ctx, 0, 0);
1043	}
1044	if (IS_SYNC(inode) && ubh_buffer_dirty(ubh))
1045		ubh_sync_block(ubh);
1046	ubh_brelse(ubh);
1047}
1048
1049static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
1050{
1051	int err = 0;
1052	struct super_block *sb = inode->i_sb;
1053	struct address_space *mapping = inode->i_mapping;
1054	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1055	unsigned i, end;
1056	sector_t lastfrag;
1057	struct page *lastpage;
1058	struct buffer_head *bh;
1059	u64 phys64;
1060
1061	lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
1062
1063	if (!lastfrag)
1064		goto out;
1065
1066	lastfrag--;
1067
1068	lastpage = ufs_get_locked_page(mapping, lastfrag >>
1069				       (PAGE_SHIFT - inode->i_blkbits));
1070       if (IS_ERR(lastpage)) {
1071               err = -EIO;
1072               goto out;
1073       }
1074
1075       end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1);
1076       bh = page_buffers(lastpage);
1077       for (i = 0; i < end; ++i)
1078               bh = bh->b_this_page;
1079
1080
1081       err = ufs_getfrag_block(inode, lastfrag, bh, 1);
1082
1083       if (unlikely(err))
1084	       goto out_unlock;
1085
1086       if (buffer_new(bh)) {
1087	       clear_buffer_new(bh);
1088	       clean_bdev_bh_alias(bh);
1089	       /*
1090		* we do not zeroize fragment, because of
1091		* if it maped to hole, it already contains zeroes
1092		*/
1093	       set_buffer_uptodate(bh);
1094	       mark_buffer_dirty(bh);
1095	       set_page_dirty(lastpage);
1096       }
1097
1098       if (lastfrag >= UFS_IND_FRAGMENT) {
1099	       end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
1100	       phys64 = bh->b_blocknr + 1;
1101	       for (i = 0; i < end; ++i) {
1102		       bh = sb_getblk(sb, i + phys64);
1103		       lock_buffer(bh);
1104		       memset(bh->b_data, 0, sb->s_blocksize);
1105		       set_buffer_uptodate(bh);
1106		       mark_buffer_dirty(bh);
1107		       unlock_buffer(bh);
1108		       sync_dirty_buffer(bh);
1109		       brelse(bh);
1110	       }
1111       }
1112out_unlock:
1113       ufs_put_locked_page(lastpage);
1114out:
1115       return err;
1116}
1117
1118static void ufs_truncate_blocks(struct inode *inode)
1119{
1120	struct ufs_inode_info *ufsi = UFS_I(inode);
1121	struct super_block *sb = inode->i_sb;
1122	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1123	unsigned offsets[4];
1124	int depth;
1125	int depth2;
1126	unsigned i;
1127	struct ufs_buffer_head *ubh[3];
1128	void *p;
1129	u64 block;
1130
1131	if (inode->i_size) {
1132		sector_t last = (inode->i_size - 1) >> uspi->s_bshift;
1133		depth = ufs_block_to_path(inode, last, offsets);
1134		if (!depth)
1135			return;
1136	} else {
1137		depth = 1;
1138	}
1139
 
1140	for (depth2 = depth - 1; depth2; depth2--)
1141		if (offsets[depth2] != uspi->s_apb - 1)
1142			break;
1143
1144	mutex_lock(&ufsi->truncate_mutex);
1145	if (depth == 1) {
1146		ufs_trunc_direct(inode);
1147		offsets[0] = UFS_IND_BLOCK;
1148	} else {
1149		/* get the blocks that should be partially emptied */
1150		p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++);
1151		for (i = 0; i < depth2; i++) {
 
1152			block = ufs_data_ptr_to_cpu(sb, p);
1153			if (!block)
1154				break;
1155			ubh[i] = ubh_bread(sb, block, uspi->s_bsize);
1156			if (!ubh[i]) {
1157				write_seqlock(&ufsi->meta_lock);
1158				ufs_data_ptr_clear(uspi, p);
1159				write_sequnlock(&ufsi->meta_lock);
1160				break;
1161			}
1162			p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++);
1163		}
1164		while (i--)
1165			free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
1166	}
1167	for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) {
1168		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
1169		block = ufs_data_ptr_to_cpu(sb, p);
1170		if (block) {
1171			write_seqlock(&ufsi->meta_lock);
1172			ufs_data_ptr_clear(uspi, p);
1173			write_sequnlock(&ufsi->meta_lock);
1174			free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
1175		}
1176	}
1177	read_seqlock_excl(&ufsi->meta_lock);
1178	ufsi->i_lastfrag = DIRECT_FRAGMENT;
1179	read_sequnlock_excl(&ufsi->meta_lock);
1180	mark_inode_dirty(inode);
1181	mutex_unlock(&ufsi->truncate_mutex);
1182}
1183
1184static int ufs_truncate(struct inode *inode, loff_t size)
1185{
1186	int err = 0;
1187
1188	UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
1189	     inode->i_ino, (unsigned long long)size,
1190	     (unsigned long long)i_size_read(inode));
1191
1192	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1193	      S_ISLNK(inode->i_mode)))
1194		return -EINVAL;
1195	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1196		return -EPERM;
1197
1198	err = ufs_alloc_lastblock(inode, size);
1199
1200	if (err)
1201		goto out;
1202
1203	block_truncate_page(inode->i_mapping, size, ufs_getfrag_block);
1204
1205	truncate_setsize(inode, size);
1206
1207	ufs_truncate_blocks(inode);
1208	inode->i_mtime = inode->i_ctime = current_time(inode);
1209	mark_inode_dirty(inode);
1210out:
1211	UFSD("EXIT: err %d\n", err);
1212	return err;
1213}
1214
1215int ufs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
1216		struct iattr *attr)
 
 
 
 
 
 
 
 
 
1217{
1218	struct inode *inode = d_inode(dentry);
1219	unsigned int ia_valid = attr->ia_valid;
1220	int error;
1221
1222	error = setattr_prepare(&init_user_ns, dentry, attr);
1223	if (error)
1224		return error;
1225
1226	if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
1227		error = ufs_truncate(inode, attr->ia_size);
1228		if (error)
1229			return error;
1230	}
1231
1232	setattr_copy(&init_user_ns, inode, attr);
1233	mark_inode_dirty(inode);
1234	return 0;
1235}
1236
1237const struct inode_operations ufs_file_inode_operations = {
1238	.setattr = ufs_setattr,
1239};