Linux Audio

Check our new training course

Loading...
v4.17
 
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11
  12#include <linux/sched.h>
  13#include <linux/slab.h>
  14#include <linux/spinlock.h>
  15#include <linux/completion.h>
  16#include <linux/buffer_head.h>
  17#include <linux/blkdev.h>
  18#include <linux/kthread.h>
  19#include <linux/export.h>
  20#include <linux/namei.h>
  21#include <linux/mount.h>
  22#include <linux/gfs2_ondisk.h>
  23#include <linux/quotaops.h>
  24#include <linux/lockdep.h>
  25#include <linux/module.h>
  26#include <linux/backing-dev.h>
 
  27
  28#include "gfs2.h"
  29#include "incore.h"
  30#include "bmap.h"
  31#include "glock.h"
  32#include "glops.h"
  33#include "inode.h"
  34#include "recovery.h"
  35#include "rgrp.h"
  36#include "super.h"
  37#include "sys.h"
  38#include "util.h"
  39#include "log.h"
  40#include "quota.h"
  41#include "dir.h"
  42#include "meta_io.h"
  43#include "trace_gfs2.h"
 
  44
  45#define DO 0
  46#define UNDO 1
  47
  48/**
  49 * gfs2_tune_init - Fill a gfs2_tune structure with default values
  50 * @gt: tune
  51 *
  52 */
  53
  54static void gfs2_tune_init(struct gfs2_tune *gt)
  55{
  56	spin_lock_init(&gt->gt_spin);
  57
  58	gt->gt_quota_warn_period = 10;
  59	gt->gt_quota_scale_num = 1;
  60	gt->gt_quota_scale_den = 1;
  61	gt->gt_new_files_jdata = 0;
  62	gt->gt_max_readahead = BIT(18);
  63	gt->gt_complain_secs = 10;
  64}
  65
 
 
 
 
 
 
 
  66static struct gfs2_sbd *init_sbd(struct super_block *sb)
  67{
  68	struct gfs2_sbd *sdp;
  69	struct address_space *mapping;
  70
  71	sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL);
  72	if (!sdp)
  73		return NULL;
  74
  75	sb->s_fs_info = sdp;
  76	sdp->sd_vfs = sb;
  77	sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats);
  78	if (!sdp->sd_lkstats) {
  79		kfree(sdp);
  80		return NULL;
  81	}
  82
  83	set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
  84	gfs2_tune_init(&sdp->sd_tune);
  85
  86	init_waitqueue_head(&sdp->sd_glock_wait);
 
  87	atomic_set(&sdp->sd_glock_disposal, 0);
  88	init_completion(&sdp->sd_locking_init);
  89	init_completion(&sdp->sd_wdack);
  90	spin_lock_init(&sdp->sd_statfs_spin);
  91
  92	spin_lock_init(&sdp->sd_rindex_spin);
  93	sdp->sd_rindex_tree.rb_node = NULL;
  94
  95	INIT_LIST_HEAD(&sdp->sd_jindex_list);
  96	spin_lock_init(&sdp->sd_jindex_spin);
  97	mutex_init(&sdp->sd_jindex_mutex);
  98	init_completion(&sdp->sd_journal_ready);
  99
 100	INIT_LIST_HEAD(&sdp->sd_quota_list);
 101	mutex_init(&sdp->sd_quota_mutex);
 102	mutex_init(&sdp->sd_quota_sync_mutex);
 103	init_waitqueue_head(&sdp->sd_quota_wait);
 104	INIT_LIST_HEAD(&sdp->sd_trunc_list);
 105	spin_lock_init(&sdp->sd_trunc_lock);
 106	spin_lock_init(&sdp->sd_bitmap_lock);
 107
 
 
 108	mapping = &sdp->sd_aspace;
 109
 110	address_space_init_once(mapping);
 111	mapping->a_ops = &gfs2_rgrp_aops;
 112	mapping->host = sb->s_bdev->bd_inode;
 113	mapping->flags = 0;
 114	mapping_set_gfp_mask(mapping, GFP_NOFS);
 115	mapping->private_data = NULL;
 116	mapping->writeback_index = 0;
 117
 118	spin_lock_init(&sdp->sd_log_lock);
 119	atomic_set(&sdp->sd_log_pinned, 0);
 120	INIT_LIST_HEAD(&sdp->sd_log_le_revoke);
 121	INIT_LIST_HEAD(&sdp->sd_log_le_ordered);
 122	spin_lock_init(&sdp->sd_ordered_lock);
 123
 124	init_waitqueue_head(&sdp->sd_log_waitq);
 125	init_waitqueue_head(&sdp->sd_logd_waitq);
 126	spin_lock_init(&sdp->sd_ail_lock);
 127	INIT_LIST_HEAD(&sdp->sd_ail1_list);
 128	INIT_LIST_HEAD(&sdp->sd_ail2_list);
 129
 130	init_rwsem(&sdp->sd_log_flush_lock);
 131	atomic_set(&sdp->sd_log_in_flight, 0);
 132	atomic_set(&sdp->sd_reserving_log, 0);
 133	init_waitqueue_head(&sdp->sd_reserving_log_wait);
 134	init_waitqueue_head(&sdp->sd_log_flush_wait);
 135	atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
 136	mutex_init(&sdp->sd_freeze_mutex);
 
 137
 138	return sdp;
 139}
 140
 
 
 
 
 141
 142/**
 143 * gfs2_check_sb - Check superblock
 144 * @sdp: the filesystem
 145 * @sb: The superblock
 146 * @silent: Don't print a message if the check fails
 147 *
 148 * Checks the version code of the FS is one that we understand how to
 149 * read and that the sizes of the various on-disk structures have not
 150 * changed.
 151 */
 152
 153static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
 154{
 155	struct gfs2_sb_host *sb = &sdp->sd_sb;
 156
 157	if (sb->sb_magic != GFS2_MAGIC ||
 158	    sb->sb_type != GFS2_METATYPE_SB) {
 159		if (!silent)
 160			pr_warn("not a GFS2 filesystem\n");
 161		return -EINVAL;
 162	}
 163
 164	/*  If format numbers match exactly, we're done.  */
 165
 166	if (sb->sb_fs_format == GFS2_FORMAT_FS &&
 167	    sb->sb_multihost_format == GFS2_FORMAT_MULTI)
 168		return 0;
 169
 170	fs_warn(sdp, "Unknown on-disk format, unable to mount\n");
 171
 172	return -EINVAL;
 173}
 174
 175static void end_bio_io_page(struct bio *bio)
 176{
 177	struct page *page = bio->bi_private;
 178
 179	if (!bio->bi_status)
 180		SetPageUptodate(page);
 181	else
 182		pr_warn("error %d reading superblock\n", bio->bi_status);
 183	unlock_page(page);
 
 
 
 
 
 184}
 185
 186static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf)
 187{
 188	struct gfs2_sb_host *sb = &sdp->sd_sb;
 189	struct super_block *s = sdp->sd_vfs;
 190	const struct gfs2_sb *str = buf;
 191
 192	sb->sb_magic = be32_to_cpu(str->sb_header.mh_magic);
 193	sb->sb_type = be32_to_cpu(str->sb_header.mh_type);
 194	sb->sb_format = be32_to_cpu(str->sb_header.mh_format);
 195	sb->sb_fs_format = be32_to_cpu(str->sb_fs_format);
 196	sb->sb_multihost_format = be32_to_cpu(str->sb_multihost_format);
 197	sb->sb_bsize = be32_to_cpu(str->sb_bsize);
 198	sb->sb_bsize_shift = be32_to_cpu(str->sb_bsize_shift);
 199	sb->sb_master_dir.no_addr = be64_to_cpu(str->sb_master_dir.no_addr);
 200	sb->sb_master_dir.no_formal_ino = be64_to_cpu(str->sb_master_dir.no_formal_ino);
 201	sb->sb_root_dir.no_addr = be64_to_cpu(str->sb_root_dir.no_addr);
 202	sb->sb_root_dir.no_formal_ino = be64_to_cpu(str->sb_root_dir.no_formal_ino);
 203
 204	memcpy(sb->sb_lockproto, str->sb_lockproto, GFS2_LOCKNAME_LEN);
 205	memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN);
 206	memcpy(&s->s_uuid, str->sb_uuid, 16);
 207}
 208
 209/**
 210 * gfs2_read_super - Read the gfs2 super block from disk
 211 * @sdp: The GFS2 super block
 212 * @sector: The location of the super block
 213 * @error: The error code to return
 214 *
 215 * This uses the bio functions to read the super block from disk
 216 * because we want to be 100% sure that we never read cached data.
 217 * A super block is read twice only during each GFS2 mount and is
 218 * never written to by the filesystem. The first time its read no
 219 * locks are held, and the only details which are looked at are those
 220 * relating to the locking protocol. Once locking is up and working,
 221 * the sb is read again under the lock to establish the location of
 222 * the master directory (contains pointers to journals etc) and the
 223 * root directory.
 224 *
 225 * Returns: 0 on success or error
 226 */
 227
 228static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
 229{
 230	struct super_block *sb = sdp->sd_vfs;
 231	struct gfs2_sb *p;
 232	struct page *page;
 233	struct bio *bio;
 
 
 234
 235	page = alloc_page(GFP_NOFS);
 236	if (unlikely(!page))
 237		return -ENOMEM;
 238
 239	ClearPageUptodate(page);
 240	ClearPageDirty(page);
 241	lock_page(page);
 242
 243	bio = bio_alloc(GFP_NOFS, 1);
 244	bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
 245	bio_set_dev(bio, sb->s_bdev);
 246	bio_add_page(bio, page, PAGE_SIZE, 0);
 247
 248	bio->bi_end_io = end_bio_io_page;
 249	bio->bi_private = page;
 250	bio_set_op_attrs(bio, REQ_OP_READ, REQ_META);
 251	submit_bio(bio);
 252	wait_on_page_locked(page);
 253	bio_put(bio);
 254	if (!PageUptodate(page)) {
 255		__free_page(page);
 256		return -EIO;
 257	}
 258	p = kmap(page);
 259	gfs2_sb_in(sdp, p);
 260	kunmap(page);
 261	__free_page(page);
 262	return gfs2_check_sb(sdp, silent);
 263}
 264
 265/**
 266 * gfs2_read_sb - Read super block
 267 * @sdp: The GFS2 superblock
 268 * @silent: Don't print message if mount fails
 269 *
 270 */
 271
 272static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
 273{
 274	u32 hash_blocks, ind_blocks, leaf_blocks;
 275	u32 tmp_blocks;
 276	unsigned int x;
 277	int error;
 278
 279	error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent);
 280	if (error) {
 281		if (!silent)
 282			fs_err(sdp, "can't read superblock\n");
 283		return error;
 284	}
 285
 286	sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
 287			       GFS2_BASIC_BLOCK_SHIFT;
 288	sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
 289	sdp->sd_diptrs = (sdp->sd_sb.sb_bsize -
 290			  sizeof(struct gfs2_dinode)) / sizeof(u64);
 291	sdp->sd_inptrs = (sdp->sd_sb.sb_bsize -
 292			  sizeof(struct gfs2_meta_header)) / sizeof(u64);
 
 
 293	sdp->sd_jbsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header);
 294	sdp->sd_hash_bsize = sdp->sd_sb.sb_bsize / 2;
 295	sdp->sd_hash_bsize_shift = sdp->sd_sb.sb_bsize_shift - 1;
 296	sdp->sd_hash_ptrs = sdp->sd_hash_bsize / sizeof(u64);
 297	sdp->sd_qc_per_block = (sdp->sd_sb.sb_bsize -
 298				sizeof(struct gfs2_meta_header)) /
 299			        sizeof(struct gfs2_quota_change);
 300	sdp->sd_blocks_per_bitmap = (sdp->sd_sb.sb_bsize -
 301				     sizeof(struct gfs2_meta_header))
 302		* GFS2_NBBY; /* not the rgrp bitmap, subsequent bitmaps only */
 303
 
 
 
 
 
 
 
 304	/* Compute maximum reservation required to add a entry to a directory */
 305
 306	hash_blocks = DIV_ROUND_UP(sizeof(u64) * BIT(GFS2_DIR_MAX_DEPTH),
 307			     sdp->sd_jbsize);
 308
 309	ind_blocks = 0;
 310	for (tmp_blocks = hash_blocks; tmp_blocks > sdp->sd_diptrs;) {
 311		tmp_blocks = DIV_ROUND_UP(tmp_blocks, sdp->sd_inptrs);
 312		ind_blocks += tmp_blocks;
 313	}
 314
 315	leaf_blocks = 2 + GFS2_DIR_MAX_DEPTH;
 316
 317	sdp->sd_max_dirres = hash_blocks + ind_blocks + leaf_blocks;
 318
 319	sdp->sd_heightsize[0] = sdp->sd_sb.sb_bsize -
 320				sizeof(struct gfs2_dinode);
 321	sdp->sd_heightsize[1] = sdp->sd_sb.sb_bsize * sdp->sd_diptrs;
 322	for (x = 2;; x++) {
 323		u64 space, d;
 324		u32 m;
 325
 326		space = sdp->sd_heightsize[x - 1] * sdp->sd_inptrs;
 327		d = space;
 328		m = do_div(d, sdp->sd_inptrs);
 329
 330		if (d != sdp->sd_heightsize[x - 1] || m)
 331			break;
 332		sdp->sd_heightsize[x] = space;
 333	}
 334	sdp->sd_max_height = x;
 335	sdp->sd_heightsize[x] = ~0;
 336	gfs2_assert(sdp, sdp->sd_max_height <= GFS2_MAX_META_HEIGHT);
 337
 338	sdp->sd_jheightsize[0] = sdp->sd_sb.sb_bsize -
 339				 sizeof(struct gfs2_dinode);
 340	sdp->sd_jheightsize[1] = sdp->sd_jbsize * sdp->sd_diptrs;
 341	for (x = 2;; x++) {
 342		u64 space, d;
 343		u32 m;
 344
 345		space = sdp->sd_jheightsize[x - 1] * sdp->sd_inptrs;
 346		d = space;
 347		m = do_div(d, sdp->sd_inptrs);
 348
 349		if (d != sdp->sd_jheightsize[x - 1] || m)
 350			break;
 351		sdp->sd_jheightsize[x] = space;
 352	}
 353	sdp->sd_max_jheight = x;
 354	sdp->sd_jheightsize[x] = ~0;
 355	gfs2_assert(sdp, sdp->sd_max_jheight <= GFS2_MAX_META_HEIGHT);
 356
 357	sdp->sd_max_dents_per_leaf = (sdp->sd_sb.sb_bsize -
 358				      sizeof(struct gfs2_leaf)) /
 359				     GFS2_MIN_DIRENT_SIZE;
 360	return 0;
 361}
 362
 363static int init_names(struct gfs2_sbd *sdp, int silent)
 364{
 365	char *proto, *table;
 366	int error = 0;
 367
 368	proto = sdp->sd_args.ar_lockproto;
 369	table = sdp->sd_args.ar_locktable;
 370
 371	/*  Try to autodetect  */
 372
 373	if (!proto[0] || !table[0]) {
 374		error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent);
 375		if (error)
 376			return error;
 377
 378		if (!proto[0])
 379			proto = sdp->sd_sb.sb_lockproto;
 380		if (!table[0])
 381			table = sdp->sd_sb.sb_locktable;
 382	}
 383
 384	if (!table[0])
 385		table = sdp->sd_vfs->s_id;
 386
 387	strlcpy(sdp->sd_proto_name, proto, GFS2_FSNAME_LEN);
 388	strlcpy(sdp->sd_table_name, table, GFS2_FSNAME_LEN);
 
 
 389
 390	table = sdp->sd_table_name;
 391	while ((table = strchr(table, '/')))
 392		*table = '_';
 393
 394	return error;
 395}
 396
 397static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
 398			int undo)
 399{
 400	int error = 0;
 401
 402	if (undo)
 403		goto fail_trans;
 404
 405	error = gfs2_glock_nq_num(sdp,
 406				  GFS2_MOUNT_LOCK, &gfs2_nondisk_glops,
 407				  LM_ST_EXCLUSIVE, LM_FLAG_NOEXP | GL_NOCACHE,
 
 408				  mount_gh);
 409	if (error) {
 410		fs_err(sdp, "can't acquire mount glock: %d\n", error);
 411		goto fail;
 412	}
 413
 414	error = gfs2_glock_nq_num(sdp,
 415				  GFS2_LIVE_LOCK, &gfs2_nondisk_glops,
 416				  LM_ST_SHARED,
 417				  LM_FLAG_NOEXP | GL_EXACT,
 418				  &sdp->sd_live_gh);
 419	if (error) {
 420		fs_err(sdp, "can't acquire live glock: %d\n", error);
 421		goto fail_mount;
 422	}
 423
 424	error = gfs2_glock_get(sdp, GFS2_RENAME_LOCK, &gfs2_nondisk_glops,
 425			       CREATE, &sdp->sd_rename_gl);
 426	if (error) {
 427		fs_err(sdp, "can't create rename glock: %d\n", error);
 428		goto fail_live;
 429	}
 430
 431	error = gfs2_glock_get(sdp, GFS2_FREEZE_LOCK, &gfs2_freeze_glops,
 432			       CREATE, &sdp->sd_freeze_gl);
 433	if (error) {
 434		fs_err(sdp, "can't create transaction glock: %d\n", error);
 435		goto fail_rename;
 436	}
 437
 438	return 0;
 439
 440fail_trans:
 441	gfs2_glock_put(sdp->sd_freeze_gl);
 442fail_rename:
 443	gfs2_glock_put(sdp->sd_rename_gl);
 444fail_live:
 445	gfs2_glock_dq_uninit(&sdp->sd_live_gh);
 446fail_mount:
 447	gfs2_glock_dq_uninit(mount_gh);
 448fail:
 449	return error;
 450}
 451
 452static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr,
 453			    u64 no_addr, const char *name)
 454{
 455	struct gfs2_sbd *sdp = sb->s_fs_info;
 456	struct dentry *dentry;
 457	struct inode *inode;
 458
 459	inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0,
 460				  GFS2_BLKST_FREE /* ignore */);
 461	if (IS_ERR(inode)) {
 462		fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode));
 463		return PTR_ERR(inode);
 464	}
 465	dentry = d_make_root(inode);
 466	if (!dentry) {
 467		fs_err(sdp, "can't alloc %s dentry\n", name);
 468		return -ENOMEM;
 469	}
 470	*dptr = dentry;
 471	return 0;
 472}
 473
 474static int init_sb(struct gfs2_sbd *sdp, int silent)
 475{
 476	struct super_block *sb = sdp->sd_vfs;
 477	struct gfs2_holder sb_gh;
 478	u64 no_addr;
 479	int ret;
 480
 481	ret = gfs2_glock_nq_num(sdp, GFS2_SB_LOCK, &gfs2_meta_glops,
 482				LM_ST_SHARED, 0, &sb_gh);
 483	if (ret) {
 484		fs_err(sdp, "can't acquire superblock glock: %d\n", ret);
 485		return ret;
 486	}
 487
 488	ret = gfs2_read_sb(sdp, silent);
 489	if (ret) {
 490		fs_err(sdp, "can't read superblock: %d\n", ret);
 491		goto out;
 492	}
 493
 
 
 
 
 
 
 
 
 
 
 
 
 
 494	/* Set up the buffer cache and SB for real */
 495	if (sdp->sd_sb.sb_bsize < bdev_logical_block_size(sb->s_bdev)) {
 496		ret = -EINVAL;
 497		fs_err(sdp, "FS block size (%u) is too small for device "
 498		       "block size (%u)\n",
 499		       sdp->sd_sb.sb_bsize, bdev_logical_block_size(sb->s_bdev));
 500		goto out;
 501	}
 502	if (sdp->sd_sb.sb_bsize > PAGE_SIZE) {
 503		ret = -EINVAL;
 504		fs_err(sdp, "FS block size (%u) is too big for machine "
 505		       "page size (%u)\n",
 506		       sdp->sd_sb.sb_bsize, (unsigned int)PAGE_SIZE);
 507		goto out;
 508	}
 509	sb_set_blocksize(sb, sdp->sd_sb.sb_bsize);
 510
 511	/* Get the root inode */
 512	no_addr = sdp->sd_sb.sb_root_dir.no_addr;
 513	ret = gfs2_lookup_root(sb, &sdp->sd_root_dir, no_addr, "root");
 514	if (ret)
 515		goto out;
 516
 517	/* Get the master inode */
 518	no_addr = sdp->sd_sb.sb_master_dir.no_addr;
 519	ret = gfs2_lookup_root(sb, &sdp->sd_master_dir, no_addr, "master");
 520	if (ret) {
 521		dput(sdp->sd_root_dir);
 522		goto out;
 523	}
 524	sb->s_root = dget(sdp->sd_args.ar_meta ? sdp->sd_master_dir : sdp->sd_root_dir);
 525out:
 526	gfs2_glock_dq_uninit(&sb_gh);
 527	return ret;
 528}
 529
 530static void gfs2_others_may_mount(struct gfs2_sbd *sdp)
 531{
 532	char *message = "FIRSTMOUNT=Done";
 533	char *envp[] = { message, NULL };
 534
 535	fs_info(sdp, "first mount done, others may mount\n");
 536
 537	if (sdp->sd_lockstruct.ls_ops->lm_first_done)
 538		sdp->sd_lockstruct.ls_ops->lm_first_done(sdp);
 539
 540	kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp);
 541}
 542
 543/**
 544 * gfs2_jindex_hold - Grab a lock on the jindex
 545 * @sdp: The GFS2 superblock
 546 * @ji_gh: the holder for the jindex glock
 547 *
 548 * Returns: errno
 549 */
 550
 551static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
 552{
 553	struct gfs2_inode *dip = GFS2_I(sdp->sd_jindex);
 554	struct qstr name;
 555	char buf[20];
 556	struct gfs2_jdesc *jd;
 557	int error;
 558
 559	name.name = buf;
 560
 561	mutex_lock(&sdp->sd_jindex_mutex);
 562
 563	for (;;) {
 
 
 564		error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh);
 565		if (error)
 566			break;
 567
 568		name.len = sprintf(buf, "journal%u", sdp->sd_journals);
 569		name.hash = gfs2_disk_hash(name.name, name.len);
 570
 571		error = gfs2_dir_check(sdp->sd_jindex, &name, NULL);
 572		if (error == -ENOENT) {
 573			error = 0;
 574			break;
 575		}
 576
 577		gfs2_glock_dq_uninit(ji_gh);
 578
 579		if (error)
 580			break;
 581
 582		error = -ENOMEM;
 583		jd = kzalloc(sizeof(struct gfs2_jdesc), GFP_KERNEL);
 584		if (!jd)
 585			break;
 586
 587		INIT_LIST_HEAD(&jd->extent_list);
 588		INIT_LIST_HEAD(&jd->jd_revoke_list);
 589
 590		INIT_WORK(&jd->jd_work, gfs2_recover_func);
 591		jd->jd_inode = gfs2_lookupi(sdp->sd_jindex, &name, 1);
 592		if (!jd->jd_inode || IS_ERR(jd->jd_inode)) {
 593			if (!jd->jd_inode)
 594				error = -ENOENT;
 595			else
 596				error = PTR_ERR(jd->jd_inode);
 597			kfree(jd);
 598			break;
 599		}
 600
 
 601		spin_lock(&sdp->sd_jindex_spin);
 602		jd->jd_jid = sdp->sd_journals++;
 
 
 603		list_add_tail(&jd->jd_list, &sdp->sd_jindex_list);
 604		spin_unlock(&sdp->sd_jindex_spin);
 605	}
 606
 607	mutex_unlock(&sdp->sd_jindex_mutex);
 608
 609	return error;
 610}
 611
 612/**
 613 * check_journal_clean - Make sure a journal is clean for a spectator mount
 614 * @sdp: The GFS2 superblock
 615 * @jd: The journal descriptor
 616 *
 617 * Returns: 0 if the journal is clean or locked, else an error
 
 
 
 
 618 */
 619static int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
 620{
 621	int error;
 622	struct gfs2_holder j_gh;
 623	struct gfs2_log_header_host head;
 
 
 624	struct gfs2_inode *ip;
 625
 626	ip = GFS2_I(jd->jd_inode);
 627	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_NOEXP |
 628				   GL_EXACT | GL_NOCACHE, &j_gh);
 629	if (error) {
 630		fs_err(sdp, "Error locking journal for spectator mount.\n");
 631		return -EPERM;
 632	}
 633	error = gfs2_jdesc_check(jd);
 634	if (error) {
 635		fs_err(sdp, "Error checking journal for spectator mount.\n");
 636		goto out_unlock;
 
 
 
 
 637	}
 638	error = gfs2_find_jhead(jd, &head);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 639	if (error) {
 640		fs_err(sdp, "Error parsing journal for spectator mount.\n");
 641		goto out_unlock;
 642	}
 643	if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
 644		error = -EPERM;
 645		fs_err(sdp, "jid=%u: Journal is dirty, so the first mounter "
 646		       "must not be a spectator.\n", jd->jd_jid);
 
 647	}
 
 648
 649out_unlock:
 650	gfs2_glock_dq_uninit(&j_gh);
 
 
 
 
 
 
 651	return error;
 652}
 653
 
 
 
 
 
 
 
 
 
 
 
 654static int init_journal(struct gfs2_sbd *sdp, int undo)
 655{
 656	struct inode *master = d_inode(sdp->sd_master_dir);
 657	struct gfs2_holder ji_gh;
 658	struct gfs2_inode *ip;
 659	int jindex = 1;
 660	int error = 0;
 661
 662	if (undo) {
 663		jindex = 0;
 664		goto fail_jinode_gh;
 665	}
 666
 667	sdp->sd_jindex = gfs2_lookup_simple(master, "jindex");
 668	if (IS_ERR(sdp->sd_jindex)) {
 669		fs_err(sdp, "can't lookup journal index: %d\n", error);
 670		return PTR_ERR(sdp->sd_jindex);
 671	}
 672
 673	/* Load in the journal index special file */
 674
 675	error = gfs2_jindex_hold(sdp, &ji_gh);
 676	if (error) {
 677		fs_err(sdp, "can't read journal index: %d\n", error);
 678		goto fail;
 679	}
 680
 681	error = -EUSERS;
 682	if (!gfs2_jindex_size(sdp)) {
 683		fs_err(sdp, "no journals!\n");
 684		goto fail_jindex;
 685	}
 686
 687	atomic_set(&sdp->sd_log_blks_needed, 0);
 688	if (sdp->sd_args.ar_spectator) {
 689		sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0);
 690		atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks);
 691		atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5);
 692		atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5);
 693	} else {
 694		if (sdp->sd_lockstruct.ls_jid >= gfs2_jindex_size(sdp)) {
 695			fs_err(sdp, "can't mount journal #%u\n",
 696			       sdp->sd_lockstruct.ls_jid);
 697			fs_err(sdp, "there are only %u journals (0 - %u)\n",
 698			       gfs2_jindex_size(sdp),
 699			       gfs2_jindex_size(sdp) - 1);
 700			goto fail_jindex;
 701		}
 702		sdp->sd_jdesc = gfs2_jdesc_find(sdp, sdp->sd_lockstruct.ls_jid);
 703
 704		error = gfs2_glock_nq_num(sdp, sdp->sd_lockstruct.ls_jid,
 705					  &gfs2_journal_glops,
 706					  LM_ST_EXCLUSIVE, LM_FLAG_NOEXP,
 
 707					  &sdp->sd_journal_gh);
 708		if (error) {
 709			fs_err(sdp, "can't acquire journal glock: %d\n", error);
 710			goto fail_jindex;
 711		}
 712
 713		ip = GFS2_I(sdp->sd_jdesc->jd_inode);
 
 714		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
 715					   LM_FLAG_NOEXP | GL_EXACT | GL_NOCACHE,
 
 716					   &sdp->sd_jinode_gh);
 717		if (error) {
 718			fs_err(sdp, "can't acquire journal inode glock: %d\n",
 719			       error);
 720			goto fail_journal_gh;
 721		}
 722
 723		error = gfs2_jdesc_check(sdp->sd_jdesc);
 724		if (error) {
 725			fs_err(sdp, "my journal (%u) is bad: %d\n",
 726			       sdp->sd_jdesc->jd_jid, error);
 727			goto fail_jinode_gh;
 728		}
 729		atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks);
 730		atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5);
 731		atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5);
 732
 733		/* Map the extents for this journal's blocks */
 734		gfs2_map_journal_extents(sdp, sdp->sd_jdesc);
 735	}
 736	trace_gfs2_log_blocks(sdp, atomic_read(&sdp->sd_log_blks_free));
 737
 
 
 
 
 
 738	if (sdp->sd_lockstruct.ls_first) {
 739		unsigned int x;
 740		for (x = 0; x < sdp->sd_journals; x++) {
 741			struct gfs2_jdesc *jd = gfs2_jdesc_find(sdp, x);
 742
 743			if (sdp->sd_args.ar_spectator) {
 744				error = check_journal_clean(sdp, jd);
 745				if (error)
 746					goto fail_jinode_gh;
 747				continue;
 748			}
 749			error = gfs2_recover_journal(jd, true);
 750			if (error) {
 751				fs_err(sdp, "error recovering journal %u: %d\n",
 752				       x, error);
 753				goto fail_jinode_gh;
 754			}
 755		}
 756
 757		gfs2_others_may_mount(sdp);
 758	} else if (!sdp->sd_args.ar_spectator) {
 759		error = gfs2_recover_journal(sdp->sd_jdesc, true);
 760		if (error) {
 761			fs_err(sdp, "error recovering my journal: %d\n", error);
 762			goto fail_jinode_gh;
 763		}
 764	}
 765
 766	sdp->sd_log_idle = 1;
 767	set_bit(SDF_JOURNAL_CHECKED, &sdp->sd_flags);
 768	gfs2_glock_dq_uninit(&ji_gh);
 769	jindex = 0;
 770	INIT_WORK(&sdp->sd_freeze_work, gfs2_freeze_func);
 771	return 0;
 772
 
 
 773fail_jinode_gh:
 774	if (!sdp->sd_args.ar_spectator)
 
 
 775		gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
 776fail_journal_gh:
 777	if (!sdp->sd_args.ar_spectator)
 
 778		gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
 779fail_jindex:
 780	gfs2_jindex_free(sdp);
 781	if (jindex)
 782		gfs2_glock_dq_uninit(&ji_gh);
 783fail:
 784	iput(sdp->sd_jindex);
 785	return error;
 786}
 787
 788static struct lock_class_key gfs2_quota_imutex_key;
 789
 790static int init_inodes(struct gfs2_sbd *sdp, int undo)
 791{
 792	int error = 0;
 793	struct inode *master = d_inode(sdp->sd_master_dir);
 794
 795	if (undo)
 796		goto fail_qinode;
 797
 798	error = init_journal(sdp, undo);
 799	complete_all(&sdp->sd_journal_ready);
 800	if (error)
 801		goto fail;
 802
 803	/* Read in the master statfs inode */
 804	sdp->sd_statfs_inode = gfs2_lookup_simple(master, "statfs");
 805	if (IS_ERR(sdp->sd_statfs_inode)) {
 806		error = PTR_ERR(sdp->sd_statfs_inode);
 807		fs_err(sdp, "can't read in statfs inode: %d\n", error);
 808		goto fail_journal;
 809	}
 810
 811	/* Read in the resource index inode */
 812	sdp->sd_rindex = gfs2_lookup_simple(master, "rindex");
 813	if (IS_ERR(sdp->sd_rindex)) {
 814		error = PTR_ERR(sdp->sd_rindex);
 815		fs_err(sdp, "can't get resource index inode: %d\n", error);
 816		goto fail_statfs;
 817	}
 818	sdp->sd_rindex_uptodate = 0;
 819
 820	/* Read in the quota inode */
 821	sdp->sd_quota_inode = gfs2_lookup_simple(master, "quota");
 822	if (IS_ERR(sdp->sd_quota_inode)) {
 823		error = PTR_ERR(sdp->sd_quota_inode);
 824		fs_err(sdp, "can't get quota file inode: %d\n", error);
 825		goto fail_rindex;
 826	}
 827	/*
 828	 * i_rwsem on quota files is special. Since this inode is hidden system
 829	 * file, we are safe to define locking ourselves.
 830	 */
 831	lockdep_set_class(&sdp->sd_quota_inode->i_rwsem,
 832			  &gfs2_quota_imutex_key);
 833
 834	error = gfs2_rindex_update(sdp);
 835	if (error)
 836		goto fail_qinode;
 837
 838	return 0;
 839
 840fail_qinode:
 841	iput(sdp->sd_quota_inode);
 842fail_rindex:
 843	gfs2_clear_rgrpd(sdp);
 844	iput(sdp->sd_rindex);
 845fail_statfs:
 846	iput(sdp->sd_statfs_inode);
 847fail_journal:
 848	init_journal(sdp, UNDO);
 849fail:
 850	return error;
 851}
 852
 853static int init_per_node(struct gfs2_sbd *sdp, int undo)
 854{
 855	struct inode *pn = NULL;
 856	char buf[30];
 857	int error = 0;
 858	struct gfs2_inode *ip;
 859	struct inode *master = d_inode(sdp->sd_master_dir);
 860
 861	if (sdp->sd_args.ar_spectator)
 862		return 0;
 863
 864	if (undo)
 865		goto fail_qc_gh;
 866
 867	pn = gfs2_lookup_simple(master, "per_node");
 868	if (IS_ERR(pn)) {
 869		error = PTR_ERR(pn);
 870		fs_err(sdp, "can't find per_node directory: %d\n", error);
 871		return error;
 872	}
 873
 874	sprintf(buf, "statfs_change%u", sdp->sd_jdesc->jd_jid);
 875	sdp->sd_sc_inode = gfs2_lookup_simple(pn, buf);
 876	if (IS_ERR(sdp->sd_sc_inode)) {
 877		error = PTR_ERR(sdp->sd_sc_inode);
 878		fs_err(sdp, "can't find local \"sc\" file: %d\n", error);
 879		goto fail;
 880	}
 881
 882	sprintf(buf, "quota_change%u", sdp->sd_jdesc->jd_jid);
 883	sdp->sd_qc_inode = gfs2_lookup_simple(pn, buf);
 884	if (IS_ERR(sdp->sd_qc_inode)) {
 885		error = PTR_ERR(sdp->sd_qc_inode);
 886		fs_err(sdp, "can't find local \"qc\" file: %d\n", error);
 887		goto fail_ut_i;
 888	}
 889
 890	iput(pn);
 891	pn = NULL;
 892
 893	ip = GFS2_I(sdp->sd_sc_inode);
 894	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0,
 895				   &sdp->sd_sc_gh);
 896	if (error) {
 897		fs_err(sdp, "can't lock local \"sc\" file: %d\n", error);
 898		goto fail_qc_i;
 899	}
 900
 901	ip = GFS2_I(sdp->sd_qc_inode);
 902	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0,
 903				   &sdp->sd_qc_gh);
 904	if (error) {
 905		fs_err(sdp, "can't lock local \"qc\" file: %d\n", error);
 906		goto fail_ut_gh;
 907	}
 908
 909	return 0;
 910
 911fail_qc_gh:
 912	gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
 913fail_ut_gh:
 914	gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
 915fail_qc_i:
 916	iput(sdp->sd_qc_inode);
 917fail_ut_i:
 918	iput(sdp->sd_sc_inode);
 919fail:
 920	iput(pn);
 921	return error;
 922}
 923
 924static const match_table_t nolock_tokens = {
 925	{ Opt_jid, "jid=%d\n", },
 926	{ Opt_err, NULL },
 927};
 928
 929static const struct lm_lockops nolock_ops = {
 930	.lm_proto_name = "lock_nolock",
 931	.lm_put_lock = gfs2_glock_free,
 932	.lm_tokens = &nolock_tokens,
 933};
 934
 935/**
 936 * gfs2_lm_mount - mount a locking protocol
 937 * @sdp: the filesystem
 938 * @args: mount arguments
 939 * @silent: if 1, don't complain if the FS isn't a GFS2 fs
 940 *
 941 * Returns: errno
 942 */
 943
 944static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
 945{
 946	const struct lm_lockops *lm;
 947	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
 948	struct gfs2_args *args = &sdp->sd_args;
 949	const char *proto = sdp->sd_proto_name;
 950	const char *table = sdp->sd_table_name;
 951	char *o, *options;
 952	int ret;
 953
 954	if (!strcmp("lock_nolock", proto)) {
 955		lm = &nolock_ops;
 956		sdp->sd_args.ar_localflocks = 1;
 957#ifdef CONFIG_GFS2_FS_LOCKING_DLM
 958	} else if (!strcmp("lock_dlm", proto)) {
 959		lm = &gfs2_dlm_ops;
 960#endif
 961	} else {
 962		pr_info("can't find protocol %s\n", proto);
 963		return -ENOENT;
 964	}
 965
 966	fs_info(sdp, "Trying to join cluster \"%s\", \"%s\"\n", proto, table);
 967
 968	ls->ls_ops = lm;
 969	ls->ls_first = 1;
 970
 971	for (options = args->ar_hostdata; (o = strsep(&options, ":")); ) {
 972		substring_t tmp[MAX_OPT_ARGS];
 973		int token, option;
 974
 975		if (!o || !*o)
 976			continue;
 977
 978		token = match_token(o, *lm->lm_tokens, tmp);
 979		switch (token) {
 980		case Opt_jid:
 981			ret = match_int(&tmp[0], &option);
 982			if (ret || option < 0) 
 983				goto hostdata_error;
 984			if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags))
 985				ls->ls_jid = option;
 986			break;
 987		case Opt_id:
 988		case Opt_nodir:
 989			/* Obsolete, but left for backward compat purposes */
 990			break;
 991		case Opt_first:
 992			ret = match_int(&tmp[0], &option);
 993			if (ret || (option != 0 && option != 1))
 994				goto hostdata_error;
 995			ls->ls_first = option;
 996			break;
 997		case Opt_err:
 998		default:
 999hostdata_error:
1000			fs_info(sdp, "unknown hostdata (%s)\n", o);
1001			return -EINVAL;
1002		}
1003	}
1004
1005	if (lm->lm_mount == NULL) {
1006		fs_info(sdp, "Now mounting FS...\n");
1007		complete_all(&sdp->sd_locking_init);
1008		return 0;
1009	}
1010	ret = lm->lm_mount(sdp, table);
1011	if (ret == 0)
1012		fs_info(sdp, "Joined cluster. Now mounting FS...\n");
 
1013	complete_all(&sdp->sd_locking_init);
1014	return ret;
1015}
1016
1017void gfs2_lm_unmount(struct gfs2_sbd *sdp)
1018{
1019	const struct lm_lockops *lm = sdp->sd_lockstruct.ls_ops;
1020	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) &&
1021	    lm->lm_unmount)
1022		lm->lm_unmount(sdp);
1023}
1024
1025static int wait_on_journal(struct gfs2_sbd *sdp)
1026{
1027	if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
1028		return 0;
1029
1030	return wait_on_bit(&sdp->sd_flags, SDF_NOJOURNALID, TASK_INTERRUPTIBLE)
1031		? -EINTR : 0;
1032}
1033
1034void gfs2_online_uevent(struct gfs2_sbd *sdp)
1035{
1036	struct super_block *sb = sdp->sd_vfs;
1037	char ro[20];
1038	char spectator[20];
1039	char *envp[] = { ro, spectator, NULL };
1040	sprintf(ro, "RDONLY=%d", sb_rdonly(sb));
1041	sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0);
1042	kobject_uevent_env(&sdp->sd_kobj, KOBJ_ONLINE, envp);
1043}
1044
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1045/**
1046 * fill_super - Read in superblock
1047 * @sb: The VFS superblock
1048 * @data: Mount options
1049 * @silent: Don't complain if it's not a GFS2 filesystem
1050 *
1051 * Returns: errno
1052 */
1053
1054static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent)
1055{
 
 
1056	struct gfs2_sbd *sdp;
1057	struct gfs2_holder mount_gh;
1058	int error;
1059
1060	sdp = init_sbd(sb);
1061	if (!sdp) {
1062		pr_warn("can't alloc struct gfs2_sbd\n");
1063		return -ENOMEM;
1064	}
1065	sdp->sd_args = *args;
1066
1067	if (sdp->sd_args.ar_spectator) {
1068                sb->s_flags |= SB_RDONLY;
1069		set_bit(SDF_RORECOVERY, &sdp->sd_flags);
1070	}
1071	if (sdp->sd_args.ar_posix_acl)
1072		sb->s_flags |= SB_POSIXACL;
1073	if (sdp->sd_args.ar_nobarrier)
1074		set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1075
1076	sb->s_flags |= SB_NOSEC;
1077	sb->s_magic = GFS2_MAGIC;
1078	sb->s_op = &gfs2_super_ops;
1079	sb->s_d_op = &gfs2_dops;
1080	sb->s_export_op = &gfs2_export_ops;
1081	sb->s_xattr = gfs2_xattr_handlers;
1082	sb->s_qcop = &gfs2_quotactl_ops;
1083	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
1084	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
1085	sb->s_time_gran = 1;
1086	sb->s_maxbytes = MAX_LFS_FILESIZE;
1087
1088	/* Set up the buffer cache and fill in some fake block size values
1089	   to allow us to read-in the on-disk superblock. */
1090	sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, GFS2_BASIC_BLOCK);
1091	sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits;
1092	sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
1093                               GFS2_BASIC_BLOCK_SHIFT;
1094	sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
1095
1096	sdp->sd_tune.gt_logd_secs = sdp->sd_args.ar_commit;
1097	sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum;
1098	if (sdp->sd_args.ar_statfs_quantum) {
1099		sdp->sd_tune.gt_statfs_slow = 0;
1100		sdp->sd_tune.gt_statfs_quantum = sdp->sd_args.ar_statfs_quantum;
1101	} else {
1102		sdp->sd_tune.gt_statfs_slow = 1;
1103		sdp->sd_tune.gt_statfs_quantum = 30;
1104	}
1105
1106	error = init_names(sdp, silent);
1107	if (error) {
1108		/* In this case, we haven't initialized sysfs, so we have to
1109		   manually free the sdp. */
1110		free_percpu(sdp->sd_lkstats);
1111		kfree(sdp);
1112		sb->s_fs_info = NULL;
1113		return error;
1114	}
1115
1116	snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s", sdp->sd_table_name);
1117
 
 
 
 
 
 
 
 
 
 
 
 
1118	error = gfs2_sys_fs_add(sdp);
1119	/*
1120	 * If we hit an error here, gfs2_sys_fs_add will have called function
1121	 * kobject_put which causes the sysfs usage count to go to zero, which
1122	 * causes sysfs to call function gfs2_sbd_release, which frees sdp.
1123	 * Subsequent error paths here will call gfs2_sys_fs_del, which also
1124	 * kobject_put to free sdp.
1125	 */
1126	if (error)
1127		return error;
1128
1129	gfs2_create_debugfs_file(sdp);
1130
1131	error = gfs2_lm_mount(sdp, silent);
1132	if (error)
1133		goto fail_debug;
1134
1135	error = init_locking(sdp, &mount_gh, DO);
1136	if (error)
1137		goto fail_lm;
1138
1139	error = init_sb(sdp, silent);
1140	if (error)
1141		goto fail_locking;
1142
 
 
 
 
1143	error = wait_on_journal(sdp);
1144	if (error)
1145		goto fail_sb;
1146
1147	/*
1148	 * If user space has failed to join the cluster or some similar
1149	 * failure has occurred, then the journal id will contain a
1150	 * negative (error) number. This will then be returned to the
1151	 * caller (of the mount syscall). We do this even for spectator
1152	 * mounts (which just write a jid of 0 to indicate "ok" even though
1153	 * the jid is unused in the spectator case)
1154	 */
1155	if (sdp->sd_lockstruct.ls_jid < 0) {
1156		error = sdp->sd_lockstruct.ls_jid;
1157		sdp->sd_lockstruct.ls_jid = 0;
1158		goto fail_sb;
1159	}
1160
1161	if (sdp->sd_args.ar_spectator)
1162		snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s.s",
1163			 sdp->sd_table_name);
1164	else
1165		snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s.%u",
1166			 sdp->sd_table_name, sdp->sd_lockstruct.ls_jid);
1167
1168	error = init_inodes(sdp, DO);
1169	if (error)
1170		goto fail_sb;
1171
1172	error = init_per_node(sdp, DO);
1173	if (error)
1174		goto fail_inodes;
1175
1176	error = gfs2_statfs_init(sdp);
1177	if (error) {
1178		fs_err(sdp, "can't initialize statfs subsystem: %d\n", error);
1179		goto fail_per_node;
1180	}
1181
1182	if (!sb_rdonly(sb)) {
1183		error = gfs2_make_fs_rw(sdp);
1184		if (error) {
1185			fs_err(sdp, "can't make FS RW: %d\n", error);
1186			goto fail_per_node;
1187		}
1188	}
1189
 
 
 
 
 
 
 
 
 
 
 
 
 
1190	gfs2_glock_dq_uninit(&mount_gh);
1191	gfs2_online_uevent(sdp);
1192	return 0;
1193
1194fail_per_node:
1195	init_per_node(sdp, UNDO);
1196fail_inodes:
1197	init_inodes(sdp, UNDO);
1198fail_sb:
1199	if (sdp->sd_root_dir)
1200		dput(sdp->sd_root_dir);
1201	if (sdp->sd_master_dir)
1202		dput(sdp->sd_master_dir);
1203	if (sb->s_root)
1204		dput(sb->s_root);
1205	sb->s_root = NULL;
1206fail_locking:
1207	init_locking(sdp, &mount_gh, UNDO);
1208fail_lm:
1209	complete_all(&sdp->sd_journal_ready);
1210	gfs2_gl_hash_clear(sdp);
1211	gfs2_lm_unmount(sdp);
1212fail_debug:
1213	gfs2_delete_debugfs_file(sdp);
1214	free_percpu(sdp->sd_lkstats);
1215	/* gfs2_sys_fs_del must be the last thing we do, since it causes
1216	 * sysfs to call function gfs2_sbd_release, which frees sdp. */
1217	gfs2_sys_fs_del(sdp);
 
 
 
 
 
 
 
1218	sb->s_fs_info = NULL;
1219	return error;
1220}
1221
1222static int set_gfs2_super(struct super_block *s, void *data)
 
 
 
 
 
 
1223{
1224	s->s_bdev = data;
1225	s->s_dev = s->s_bdev->bd_dev;
1226	s->s_bdi = bdi_get(s->s_bdev->bd_bdi);
 
 
 
 
 
 
 
 
 
 
 
1227	return 0;
1228}
1229
1230static int test_gfs2_super(struct super_block *s, void *ptr)
1231{
1232	struct block_device *bdev = ptr;
1233	return (bdev == s->s_bdev);
 
1234}
1235
1236/**
1237 * gfs2_mount - Get the GFS2 superblock
1238 * @fs_type: The GFS2 filesystem type
1239 * @flags: Mount flags
1240 * @dev_name: The name of the device
1241 * @data: The mount arguments
1242 *
1243 * Q. Why not use get_sb_bdev() ?
1244 * A. We need to select one of two root directories to mount, independent
1245 *    of whether this is the initial, or subsequent, mount of this sb
1246 *
1247 * Returns: 0 or -ve on error
1248 */
 
 
 
 
 
 
 
 
 
 
 
 
 
1249
1250static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
1251		       const char *dev_name, void *data)
1252{
1253	struct block_device *bdev;
1254	struct super_block *s;
1255	fmode_t mode = FMODE_READ | FMODE_EXCL;
1256	int error;
1257	struct gfs2_args args;
1258	struct gfs2_sbd *sdp;
1259
1260	if (!(flags & SB_RDONLY))
1261		mode |= FMODE_WRITE;
 
 
1262
1263	bdev = blkdev_get_by_path(dev_name, mode, fs_type);
1264	if (IS_ERR(bdev))
1265		return ERR_CAST(bdev);
 
 
1266
1267	/*
1268	 * once the super is inserted into the list by sget, s_umount
1269	 * will protect the lockfs code from trying to start a snapshot
1270	 * while we are mounting
1271	 */
1272	mutex_lock(&bdev->bd_fsfreeze_mutex);
1273	if (bdev->bd_fsfreeze_count > 0) {
1274		mutex_unlock(&bdev->bd_fsfreeze_mutex);
1275		error = -EBUSY;
1276		goto error_bdev;
1277	}
1278	s = sget(fs_type, test_gfs2_super, set_gfs2_super, flags, bdev);
1279	mutex_unlock(&bdev->bd_fsfreeze_mutex);
1280	error = PTR_ERR(s);
1281	if (IS_ERR(s))
1282		goto error_bdev;
1283
1284	if (s->s_root) {
1285		/*
1286		 * s_umount nests inside bd_mutex during
1287		 * __invalidate_device().  blkdev_put() acquires
1288		 * bd_mutex and can't be called under s_umount.  Drop
1289		 * s_umount temporarily.  This is safe as we're
1290		 * holding an active reference.
1291		 */
1292		up_write(&s->s_umount);
1293		blkdev_put(bdev, mode);
1294		down_write(&s->s_umount);
1295	} else {
1296		/* s_mode must be set before deactivate_locked_super calls */
1297		s->s_mode = mode;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1298	}
 
 
1299
1300	memset(&args, 0, sizeof(args));
1301	args.ar_quota = GFS2_QUOTA_DEFAULT;
1302	args.ar_data = GFS2_DATA_DEFAULT;
1303	args.ar_commit = 30;
1304	args.ar_statfs_quantum = 30;
1305	args.ar_quota_quantum = 60;
1306	args.ar_errors = GFS2_ERRORS_DEFAULT;
 
1307
1308	error = gfs2_mount_args(&args, data);
1309	if (error) {
1310		pr_warn("can't parse mount arguments\n");
1311		goto error_super;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1312	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1313
1314	if (s->s_root) {
1315		error = -EBUSY;
1316		if ((flags ^ s->s_flags) & SB_RDONLY)
1317			goto error_super;
1318	} else {
1319		snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
1320		sb_set_blocksize(s, block_size(bdev));
1321		error = fill_super(s, &args, flags & SB_SILENT ? 1 : 0);
1322		if (error)
1323			goto error_super;
1324		s->s_flags |= SB_ACTIVE;
1325		bdev->bd_super = s;
1326	}
 
1327
1328	sdp = s->s_fs_info;
1329	if (args.ar_meta)
1330		return dget(sdp->sd_master_dir);
1331	else
1332		return dget(sdp->sd_root_dir);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1333
1334error_super:
1335	deactivate_locked_super(s);
1336	return ERR_PTR(error);
1337error_bdev:
1338	blkdev_put(bdev, mode);
1339	return ERR_PTR(error);
1340}
1341
1342static int set_meta_super(struct super_block *s, void *ptr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1343{
1344	return -EINVAL;
1345}
1346
1347static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type,
1348			int flags, const char *dev_name, void *data)
 
 
 
 
1349{
1350	struct super_block *s;
1351	struct gfs2_sbd *sdp;
1352	struct path path;
1353	int error;
1354
1355	error = kern_path(dev_name, LOOKUP_FOLLOW, &path);
 
 
 
1356	if (error) {
1357		pr_warn("path_lookup on %s returned error %d\n",
1358			dev_name, error);
1359		return ERR_PTR(error);
1360	}
1361	s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super, flags,
1362		 path.dentry->d_sb->s_bdev);
 
1363	path_put(&path);
1364	if (IS_ERR(s)) {
1365		pr_warn("gfs2 mount does not exist\n");
1366		return ERR_CAST(s);
1367	}
1368	if ((flags ^ s->s_flags) & SB_RDONLY) {
1369		deactivate_locked_super(s);
1370		return ERR_PTR(-EBUSY);
1371	}
1372	sdp = s->s_fs_info;
1373	return dget(sdp->sd_master_dir);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1374}
1375
1376static void gfs2_kill_sb(struct super_block *sb)
1377{
1378	struct gfs2_sbd *sdp = sb->s_fs_info;
1379
1380	if (sdp == NULL) {
1381		kill_block_super(sb);
1382		return;
1383	}
1384
1385	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SYNC | GFS2_LFC_KILL_SB);
1386	dput(sdp->sd_root_dir);
1387	dput(sdp->sd_master_dir);
1388	sdp->sd_root_dir = NULL;
1389	sdp->sd_master_dir = NULL;
1390	shrink_dcache_sb(sb);
1391	free_percpu(sdp->sd_lkstats);
 
 
 
 
 
 
 
 
 
 
 
1392	kill_block_super(sb);
1393}
1394
1395struct file_system_type gfs2_fs_type = {
1396	.name = "gfs2",
1397	.fs_flags = FS_REQUIRES_DEV,
1398	.mount = gfs2_mount,
 
1399	.kill_sb = gfs2_kill_sb,
1400	.owner = THIS_MODULE,
1401};
1402MODULE_ALIAS_FS("gfs2");
1403
1404struct file_system_type gfs2meta_fs_type = {
1405	.name = "gfs2meta",
1406	.fs_flags = FS_REQUIRES_DEV,
1407	.mount = gfs2_mount_meta,
1408	.owner = THIS_MODULE,
1409};
1410MODULE_ALIAS_FS("gfs2meta");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   4 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
 
 
 
 
   5 */
   6
   7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   8
   9#include <linux/sched.h>
  10#include <linux/slab.h>
  11#include <linux/spinlock.h>
  12#include <linux/completion.h>
  13#include <linux/buffer_head.h>
  14#include <linux/blkdev.h>
  15#include <linux/kthread.h>
  16#include <linux/export.h>
  17#include <linux/namei.h>
  18#include <linux/mount.h>
  19#include <linux/gfs2_ondisk.h>
  20#include <linux/quotaops.h>
  21#include <linux/lockdep.h>
  22#include <linux/module.h>
  23#include <linux/backing-dev.h>
  24#include <linux/fs_parser.h>
  25
  26#include "gfs2.h"
  27#include "incore.h"
  28#include "bmap.h"
  29#include "glock.h"
  30#include "glops.h"
  31#include "inode.h"
  32#include "recovery.h"
  33#include "rgrp.h"
  34#include "super.h"
  35#include "sys.h"
  36#include "util.h"
  37#include "log.h"
  38#include "quota.h"
  39#include "dir.h"
  40#include "meta_io.h"
  41#include "trace_gfs2.h"
  42#include "lops.h"
  43
  44#define DO 0
  45#define UNDO 1
  46
  47/**
  48 * gfs2_tune_init - Fill a gfs2_tune structure with default values
  49 * @gt: tune
  50 *
  51 */
  52
  53static void gfs2_tune_init(struct gfs2_tune *gt)
  54{
  55	spin_lock_init(&gt->gt_spin);
  56
  57	gt->gt_quota_warn_period = 10;
  58	gt->gt_quota_scale_num = 1;
  59	gt->gt_quota_scale_den = 1;
  60	gt->gt_new_files_jdata = 0;
  61	gt->gt_max_readahead = BIT(18);
  62	gt->gt_complain_secs = 10;
  63}
  64
  65void free_sbd(struct gfs2_sbd *sdp)
  66{
  67	if (sdp->sd_lkstats)
  68		free_percpu(sdp->sd_lkstats);
  69	kfree(sdp);
  70}
  71
  72static struct gfs2_sbd *init_sbd(struct super_block *sb)
  73{
  74	struct gfs2_sbd *sdp;
  75	struct address_space *mapping;
  76
  77	sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL);
  78	if (!sdp)
  79		return NULL;
  80
 
  81	sdp->sd_vfs = sb;
  82	sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats);
  83	if (!sdp->sd_lkstats)
  84		goto fail;
  85	sb->s_fs_info = sdp;
 
  86
  87	set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
  88	gfs2_tune_init(&sdp->sd_tune);
  89
  90	init_waitqueue_head(&sdp->sd_kill_wait);
  91	init_waitqueue_head(&sdp->sd_async_glock_wait);
  92	atomic_set(&sdp->sd_glock_disposal, 0);
  93	init_completion(&sdp->sd_locking_init);
  94	init_completion(&sdp->sd_wdack);
  95	spin_lock_init(&sdp->sd_statfs_spin);
  96
  97	spin_lock_init(&sdp->sd_rindex_spin);
  98	sdp->sd_rindex_tree.rb_node = NULL;
  99
 100	INIT_LIST_HEAD(&sdp->sd_jindex_list);
 101	spin_lock_init(&sdp->sd_jindex_spin);
 102	mutex_init(&sdp->sd_jindex_mutex);
 103	init_completion(&sdp->sd_journal_ready);
 104
 105	INIT_LIST_HEAD(&sdp->sd_quota_list);
 
 106	mutex_init(&sdp->sd_quota_sync_mutex);
 107	init_waitqueue_head(&sdp->sd_quota_wait);
 
 
 108	spin_lock_init(&sdp->sd_bitmap_lock);
 109
 110	INIT_LIST_HEAD(&sdp->sd_sc_inodes_list);
 111
 112	mapping = &sdp->sd_aspace;
 113
 114	address_space_init_once(mapping);
 115	mapping->a_ops = &gfs2_rgrp_aops;
 116	mapping->host = sb->s_bdev->bd_mapping->host;
 117	mapping->flags = 0;
 118	mapping_set_gfp_mask(mapping, GFP_NOFS);
 119	mapping->i_private_data = NULL;
 120	mapping->writeback_index = 0;
 121
 122	spin_lock_init(&sdp->sd_log_lock);
 123	atomic_set(&sdp->sd_log_pinned, 0);
 124	INIT_LIST_HEAD(&sdp->sd_log_revokes);
 125	INIT_LIST_HEAD(&sdp->sd_log_ordered);
 126	spin_lock_init(&sdp->sd_ordered_lock);
 127
 128	init_waitqueue_head(&sdp->sd_log_waitq);
 129	init_waitqueue_head(&sdp->sd_logd_waitq);
 130	spin_lock_init(&sdp->sd_ail_lock);
 131	INIT_LIST_HEAD(&sdp->sd_ail1_list);
 132	INIT_LIST_HEAD(&sdp->sd_ail2_list);
 133
 134	init_rwsem(&sdp->sd_log_flush_lock);
 135	atomic_set(&sdp->sd_log_in_flight, 0);
 
 
 136	init_waitqueue_head(&sdp->sd_log_flush_wait);
 
 137	mutex_init(&sdp->sd_freeze_mutex);
 138	INIT_LIST_HEAD(&sdp->sd_dead_glocks);
 139
 140	return sdp;
 
 141
 142fail:
 143	free_sbd(sdp);
 144	return NULL;
 145}
 146
 147/**
 148 * gfs2_check_sb - Check superblock
 149 * @sdp: the filesystem
 
 150 * @silent: Don't print a message if the check fails
 151 *
 152 * Checks the version code of the FS is one that we understand how to
 153 * read and that the sizes of the various on-disk structures have not
 154 * changed.
 155 */
 156
 157static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
 158{
 159	struct gfs2_sb_host *sb = &sdp->sd_sb;
 160
 161	if (sb->sb_magic != GFS2_MAGIC ||
 162	    sb->sb_type != GFS2_METATYPE_SB) {
 163		if (!silent)
 164			pr_warn("not a GFS2 filesystem\n");
 165		return -EINVAL;
 166	}
 167
 168	if (sb->sb_fs_format < GFS2_FS_FORMAT_MIN ||
 169	    sb->sb_fs_format > GFS2_FS_FORMAT_MAX ||
 170	    sb->sb_multihost_format != GFS2_FORMAT_MULTI) {
 171		fs_warn(sdp, "Unknown on-disk format, unable to mount\n");
 172		return -EINVAL;
 173	}
 
 
 
 
 
 
 
 
 174
 175	if (sb->sb_bsize < 512 || sb->sb_bsize > PAGE_SIZE ||
 176	    (sb->sb_bsize & (sb->sb_bsize - 1))) {
 177		pr_warn("Invalid block size\n");
 178		return -EINVAL;
 179	}
 180	if (sb->sb_bsize_shift != ffs(sb->sb_bsize) - 1) {
 181		pr_warn("Invalid block size shift\n");
 182		return -EINVAL;
 183	}
 184	return 0;
 185}
 186
 187static void gfs2_sb_in(struct gfs2_sbd *sdp, const struct gfs2_sb *str)
 188{
 189	struct gfs2_sb_host *sb = &sdp->sd_sb;
 190	struct super_block *s = sdp->sd_vfs;
 
 191
 192	sb->sb_magic = be32_to_cpu(str->sb_header.mh_magic);
 193	sb->sb_type = be32_to_cpu(str->sb_header.mh_type);
 
 194	sb->sb_fs_format = be32_to_cpu(str->sb_fs_format);
 195	sb->sb_multihost_format = be32_to_cpu(str->sb_multihost_format);
 196	sb->sb_bsize = be32_to_cpu(str->sb_bsize);
 197	sb->sb_bsize_shift = be32_to_cpu(str->sb_bsize_shift);
 198	sb->sb_master_dir.no_addr = be64_to_cpu(str->sb_master_dir.no_addr);
 199	sb->sb_master_dir.no_formal_ino = be64_to_cpu(str->sb_master_dir.no_formal_ino);
 200	sb->sb_root_dir.no_addr = be64_to_cpu(str->sb_root_dir.no_addr);
 201	sb->sb_root_dir.no_formal_ino = be64_to_cpu(str->sb_root_dir.no_formal_ino);
 202
 203	memcpy(sb->sb_lockproto, str->sb_lockproto, GFS2_LOCKNAME_LEN);
 204	memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN);
 205	super_set_uuid(s, str->sb_uuid, 16);
 206}
 207
 208/**
 209 * gfs2_read_super - Read the gfs2 super block from disk
 210 * @sdp: The GFS2 super block
 211 * @sector: The location of the super block
 212 * @silent: Don't print a message if the check fails
 213 *
 214 * This uses the bio functions to read the super block from disk
 215 * because we want to be 100% sure that we never read cached data.
 216 * A super block is read twice only during each GFS2 mount and is
 217 * never written to by the filesystem. The first time its read no
 218 * locks are held, and the only details which are looked at are those
 219 * relating to the locking protocol. Once locking is up and working,
 220 * the sb is read again under the lock to establish the location of
 221 * the master directory (contains pointers to journals etc) and the
 222 * root directory.
 223 *
 224 * Returns: 0 on success or error
 225 */
 226
 227static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
 228{
 229	struct super_block *sb = sdp->sd_vfs;
 
 230	struct page *page;
 231	struct bio_vec bvec;
 232	struct bio bio;
 233	int err;
 234
 235	page = alloc_page(GFP_KERNEL);
 236	if (unlikely(!page))
 237		return -ENOMEM;
 238
 239	bio_init(&bio, sb->s_bdev, &bvec, 1, REQ_OP_READ | REQ_META);
 240	bio.bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
 241	__bio_add_page(&bio, page, PAGE_SIZE, 0);
 242
 243	err = submit_bio_wait(&bio);
 244	if (err) {
 245		pr_warn("error %d reading superblock\n", err);
 
 
 
 
 
 
 
 
 
 246		__free_page(page);
 247		return err;
 248	}
 249	gfs2_sb_in(sdp, page_address(page));
 
 
 250	__free_page(page);
 251	return gfs2_check_sb(sdp, silent);
 252}
 253
 254/**
 255 * gfs2_read_sb - Read super block
 256 * @sdp: The GFS2 superblock
 257 * @silent: Don't print message if mount fails
 258 *
 259 */
 260
 261static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
 262{
 263	u32 hash_blocks, ind_blocks, leaf_blocks;
 264	u32 tmp_blocks;
 265	unsigned int x;
 266	int error;
 267
 268	error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent);
 269	if (error) {
 270		if (!silent)
 271			fs_err(sdp, "can't read superblock\n");
 272		return error;
 273	}
 274
 275	sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - 9;
 
 276	sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
 277	sdp->sd_diptrs = (sdp->sd_sb.sb_bsize -
 278			  sizeof(struct gfs2_dinode)) / sizeof(u64);
 279	sdp->sd_inptrs = (sdp->sd_sb.sb_bsize -
 280			  sizeof(struct gfs2_meta_header)) / sizeof(u64);
 281	sdp->sd_ldptrs = (sdp->sd_sb.sb_bsize -
 282			  sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
 283	sdp->sd_jbsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header);
 284	sdp->sd_hash_bsize = sdp->sd_sb.sb_bsize / 2;
 285	sdp->sd_hash_bsize_shift = sdp->sd_sb.sb_bsize_shift - 1;
 286	sdp->sd_hash_ptrs = sdp->sd_hash_bsize / sizeof(u64);
 287	sdp->sd_qc_per_block = (sdp->sd_sb.sb_bsize -
 288				sizeof(struct gfs2_meta_header)) /
 289			        sizeof(struct gfs2_quota_change);
 290	sdp->sd_blocks_per_bitmap = (sdp->sd_sb.sb_bsize -
 291				     sizeof(struct gfs2_meta_header))
 292		* GFS2_NBBY; /* not the rgrp bitmap, subsequent bitmaps only */
 293
 294	/*
 295	 * We always keep at least one block reserved for revokes in
 296	 * transactions.  This greatly simplifies allocating additional
 297	 * revoke blocks.
 298	 */
 299	atomic_set(&sdp->sd_log_revokes_available, sdp->sd_ldptrs);
 300
 301	/* Compute maximum reservation required to add a entry to a directory */
 302
 303	hash_blocks = DIV_ROUND_UP(sizeof(u64) * BIT(GFS2_DIR_MAX_DEPTH),
 304			     sdp->sd_jbsize);
 305
 306	ind_blocks = 0;
 307	for (tmp_blocks = hash_blocks; tmp_blocks > sdp->sd_diptrs;) {
 308		tmp_blocks = DIV_ROUND_UP(tmp_blocks, sdp->sd_inptrs);
 309		ind_blocks += tmp_blocks;
 310	}
 311
 312	leaf_blocks = 2 + GFS2_DIR_MAX_DEPTH;
 313
 314	sdp->sd_max_dirres = hash_blocks + ind_blocks + leaf_blocks;
 315
 316	sdp->sd_heightsize[0] = sdp->sd_sb.sb_bsize -
 317				sizeof(struct gfs2_dinode);
 318	sdp->sd_heightsize[1] = sdp->sd_sb.sb_bsize * sdp->sd_diptrs;
 319	for (x = 2;; x++) {
 320		u64 space, d;
 321		u32 m;
 322
 323		space = sdp->sd_heightsize[x - 1] * sdp->sd_inptrs;
 324		d = space;
 325		m = do_div(d, sdp->sd_inptrs);
 326
 327		if (d != sdp->sd_heightsize[x - 1] || m)
 328			break;
 329		sdp->sd_heightsize[x] = space;
 330	}
 331	sdp->sd_max_height = x;
 332	sdp->sd_heightsize[x] = ~0;
 333	gfs2_assert(sdp, sdp->sd_max_height <= GFS2_MAX_META_HEIGHT);
 334
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 335	sdp->sd_max_dents_per_leaf = (sdp->sd_sb.sb_bsize -
 336				      sizeof(struct gfs2_leaf)) /
 337				     GFS2_MIN_DIRENT_SIZE;
 338	return 0;
 339}
 340
 341static int init_names(struct gfs2_sbd *sdp, int silent)
 342{
 343	char *proto, *table;
 344	int error = 0;
 345
 346	proto = sdp->sd_args.ar_lockproto;
 347	table = sdp->sd_args.ar_locktable;
 348
 349	/*  Try to autodetect  */
 350
 351	if (!proto[0] || !table[0]) {
 352		error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent);
 353		if (error)
 354			return error;
 355
 356		if (!proto[0])
 357			proto = sdp->sd_sb.sb_lockproto;
 358		if (!table[0])
 359			table = sdp->sd_sb.sb_locktable;
 360	}
 361
 362	if (!table[0])
 363		table = sdp->sd_vfs->s_id;
 364
 365	BUILD_BUG_ON(GFS2_LOCKNAME_LEN > GFS2_FSNAME_LEN);
 366
 367	strscpy(sdp->sd_proto_name, proto, GFS2_LOCKNAME_LEN);
 368	strscpy(sdp->sd_table_name, table, GFS2_LOCKNAME_LEN);
 369
 370	table = sdp->sd_table_name;
 371	while ((table = strchr(table, '/')))
 372		*table = '_';
 373
 374	return error;
 375}
 376
 377static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
 378			int undo)
 379{
 380	int error = 0;
 381
 382	if (undo)
 383		goto fail_trans;
 384
 385	error = gfs2_glock_nq_num(sdp,
 386				  GFS2_MOUNT_LOCK, &gfs2_nondisk_glops,
 387				  LM_ST_EXCLUSIVE,
 388				  LM_FLAG_NOEXP | GL_NOCACHE | GL_NOPID,
 389				  mount_gh);
 390	if (error) {
 391		fs_err(sdp, "can't acquire mount glock: %d\n", error);
 392		goto fail;
 393	}
 394
 395	error = gfs2_glock_nq_num(sdp,
 396				  GFS2_LIVE_LOCK, &gfs2_nondisk_glops,
 397				  LM_ST_SHARED,
 398				  LM_FLAG_NOEXP | GL_EXACT | GL_NOPID,
 399				  &sdp->sd_live_gh);
 400	if (error) {
 401		fs_err(sdp, "can't acquire live glock: %d\n", error);
 402		goto fail_mount;
 403	}
 404
 405	error = gfs2_glock_get(sdp, GFS2_RENAME_LOCK, &gfs2_nondisk_glops,
 406			       CREATE, &sdp->sd_rename_gl);
 407	if (error) {
 408		fs_err(sdp, "can't create rename glock: %d\n", error);
 409		goto fail_live;
 410	}
 411
 412	error = gfs2_glock_get(sdp, GFS2_FREEZE_LOCK, &gfs2_freeze_glops,
 413			       CREATE, &sdp->sd_freeze_gl);
 414	if (error) {
 415		fs_err(sdp, "can't create freeze glock: %d\n", error);
 416		goto fail_rename;
 417	}
 418
 419	return 0;
 420
 421fail_trans:
 422	gfs2_glock_put(sdp->sd_freeze_gl);
 423fail_rename:
 424	gfs2_glock_put(sdp->sd_rename_gl);
 425fail_live:
 426	gfs2_glock_dq_uninit(&sdp->sd_live_gh);
 427fail_mount:
 428	gfs2_glock_dq_uninit(mount_gh);
 429fail:
 430	return error;
 431}
 432
 433static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr,
 434			    u64 no_addr, const char *name)
 435{
 436	struct gfs2_sbd *sdp = sb->s_fs_info;
 437	struct dentry *dentry;
 438	struct inode *inode;
 439
 440	inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0,
 441				  GFS2_BLKST_FREE /* ignore */);
 442	if (IS_ERR(inode)) {
 443		fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode));
 444		return PTR_ERR(inode);
 445	}
 446	dentry = d_make_root(inode);
 447	if (!dentry) {
 448		fs_err(sdp, "can't alloc %s dentry\n", name);
 449		return -ENOMEM;
 450	}
 451	*dptr = dentry;
 452	return 0;
 453}
 454
 455static int init_sb(struct gfs2_sbd *sdp, int silent)
 456{
 457	struct super_block *sb = sdp->sd_vfs;
 458	struct gfs2_holder sb_gh;
 459	u64 no_addr;
 460	int ret;
 461
 462	ret = gfs2_glock_nq_num(sdp, GFS2_SB_LOCK, &gfs2_meta_glops,
 463				LM_ST_SHARED, 0, &sb_gh);
 464	if (ret) {
 465		fs_err(sdp, "can't acquire superblock glock: %d\n", ret);
 466		return ret;
 467	}
 468
 469	ret = gfs2_read_sb(sdp, silent);
 470	if (ret) {
 471		fs_err(sdp, "can't read superblock: %d\n", ret);
 472		goto out;
 473	}
 474
 475	switch(sdp->sd_sb.sb_fs_format) {
 476	case GFS2_FS_FORMAT_MAX:
 477		sb->s_xattr = gfs2_xattr_handlers_max;
 478		break;
 479
 480	case GFS2_FS_FORMAT_MIN:
 481		sb->s_xattr = gfs2_xattr_handlers_min;
 482		break;
 483
 484	default:
 485		BUG();
 486	}
 487
 488	/* Set up the buffer cache and SB for real */
 489	if (sdp->sd_sb.sb_bsize < bdev_logical_block_size(sb->s_bdev)) {
 490		ret = -EINVAL;
 491		fs_err(sdp, "FS block size (%u) is too small for device "
 492		       "block size (%u)\n",
 493		       sdp->sd_sb.sb_bsize, bdev_logical_block_size(sb->s_bdev));
 494		goto out;
 495	}
 496	if (sdp->sd_sb.sb_bsize > PAGE_SIZE) {
 497		ret = -EINVAL;
 498		fs_err(sdp, "FS block size (%u) is too big for machine "
 499		       "page size (%u)\n",
 500		       sdp->sd_sb.sb_bsize, (unsigned int)PAGE_SIZE);
 501		goto out;
 502	}
 503	sb_set_blocksize(sb, sdp->sd_sb.sb_bsize);
 504
 505	/* Get the root inode */
 506	no_addr = sdp->sd_sb.sb_root_dir.no_addr;
 507	ret = gfs2_lookup_root(sb, &sdp->sd_root_dir, no_addr, "root");
 508	if (ret)
 509		goto out;
 510
 511	/* Get the master inode */
 512	no_addr = sdp->sd_sb.sb_master_dir.no_addr;
 513	ret = gfs2_lookup_root(sb, &sdp->sd_master_dir, no_addr, "master");
 514	if (ret) {
 515		dput(sdp->sd_root_dir);
 516		goto out;
 517	}
 518	sb->s_root = dget(sdp->sd_args.ar_meta ? sdp->sd_master_dir : sdp->sd_root_dir);
 519out:
 520	gfs2_glock_dq_uninit(&sb_gh);
 521	return ret;
 522}
 523
 524static void gfs2_others_may_mount(struct gfs2_sbd *sdp)
 525{
 526	char *message = "FIRSTMOUNT=Done";
 527	char *envp[] = { message, NULL };
 528
 529	fs_info(sdp, "first mount done, others may mount\n");
 530
 531	if (sdp->sd_lockstruct.ls_ops->lm_first_done)
 532		sdp->sd_lockstruct.ls_ops->lm_first_done(sdp);
 533
 534	kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp);
 535}
 536
 537/**
 538 * gfs2_jindex_hold - Grab a lock on the jindex
 539 * @sdp: The GFS2 superblock
 540 * @ji_gh: the holder for the jindex glock
 541 *
 542 * Returns: errno
 543 */
 544
 545static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
 546{
 547	struct gfs2_inode *dip = GFS2_I(sdp->sd_jindex);
 548	struct qstr name;
 549	char buf[20];
 550	struct gfs2_jdesc *jd;
 551	int error;
 552
 553	name.name = buf;
 554
 555	mutex_lock(&sdp->sd_jindex_mutex);
 556
 557	for (;;) {
 558		struct gfs2_inode *jip;
 559
 560		error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh);
 561		if (error)
 562			break;
 563
 564		name.len = sprintf(buf, "journal%u", sdp->sd_journals);
 565		name.hash = gfs2_disk_hash(name.name, name.len);
 566
 567		error = gfs2_dir_check(sdp->sd_jindex, &name, NULL);
 568		if (error == -ENOENT) {
 569			error = 0;
 570			break;
 571		}
 572
 573		gfs2_glock_dq_uninit(ji_gh);
 574
 575		if (error)
 576			break;
 577
 578		error = -ENOMEM;
 579		jd = kzalloc(sizeof(struct gfs2_jdesc), GFP_KERNEL);
 580		if (!jd)
 581			break;
 582
 583		INIT_LIST_HEAD(&jd->extent_list);
 584		INIT_LIST_HEAD(&jd->jd_revoke_list);
 585
 586		INIT_WORK(&jd->jd_work, gfs2_recover_func);
 587		jd->jd_inode = gfs2_lookupi(sdp->sd_jindex, &name, 1);
 588		if (IS_ERR_OR_NULL(jd->jd_inode)) {
 589			if (!jd->jd_inode)
 590				error = -ENOENT;
 591			else
 592				error = PTR_ERR(jd->jd_inode);
 593			kfree(jd);
 594			break;
 595		}
 596
 597		d_mark_dontcache(jd->jd_inode);
 598		spin_lock(&sdp->sd_jindex_spin);
 599		jd->jd_jid = sdp->sd_journals++;
 600		jip = GFS2_I(jd->jd_inode);
 601		jd->jd_no_addr = jip->i_no_addr;
 602		list_add_tail(&jd->jd_list, &sdp->sd_jindex_list);
 603		spin_unlock(&sdp->sd_jindex_spin);
 604	}
 605
 606	mutex_unlock(&sdp->sd_jindex_mutex);
 607
 608	return error;
 609}
 610
 611/**
 612 * init_statfs - look up and initialize master and local (per node) statfs inodes
 613 * @sdp: The GFS2 superblock
 
 614 *
 615 * This should be called after the jindex is initialized in init_journal() and
 616 * before gfs2_journal_recovery() is called because we need to be able to write
 617 * to these inodes during recovery.
 618 *
 619 * Returns: errno
 620 */
 621static int init_statfs(struct gfs2_sbd *sdp)
 622{
 623	int error = 0;
 624	struct inode *master = d_inode(sdp->sd_master_dir);
 625	struct inode *pn = NULL;
 626	char buf[30];
 627	struct gfs2_jdesc *jd;
 628	struct gfs2_inode *ip;
 629
 630	sdp->sd_statfs_inode = gfs2_lookup_meta(master, "statfs");
 631	if (IS_ERR(sdp->sd_statfs_inode)) {
 632		error = PTR_ERR(sdp->sd_statfs_inode);
 633		fs_err(sdp, "can't read in statfs inode: %d\n", error);
 634		goto out;
 
 635	}
 636	if (sdp->sd_args.ar_spectator)
 637		goto out;
 638
 639	pn = gfs2_lookup_meta(master, "per_node");
 640	if (IS_ERR(pn)) {
 641		error = PTR_ERR(pn);
 642		fs_err(sdp, "can't find per_node directory: %d\n", error);
 643		goto put_statfs;
 644	}
 645
 646	/* For each jid, lookup the corresponding local statfs inode in the
 647	 * per_node metafs directory and save it in the sdp->sd_sc_inodes_list. */
 648	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
 649		struct local_statfs_inode *lsi =
 650			kmalloc(sizeof(struct local_statfs_inode), GFP_NOFS);
 651		if (!lsi) {
 652			error = -ENOMEM;
 653			goto free_local;
 654		}
 655		sprintf(buf, "statfs_change%u", jd->jd_jid);
 656		lsi->si_sc_inode = gfs2_lookup_meta(pn, buf);
 657		if (IS_ERR(lsi->si_sc_inode)) {
 658			error = PTR_ERR(lsi->si_sc_inode);
 659			fs_err(sdp, "can't find local \"sc\" file#%u: %d\n",
 660			       jd->jd_jid, error);
 661			kfree(lsi);
 662			goto free_local;
 663		}
 664		lsi->si_jid = jd->jd_jid;
 665		if (jd->jd_jid == sdp->sd_jdesc->jd_jid)
 666			sdp->sd_sc_inode = lsi->si_sc_inode;
 667
 668		list_add_tail(&lsi->si_list, &sdp->sd_sc_inodes_list);
 669	}
 670
 671	iput(pn);
 672	pn = NULL;
 673	ip = GFS2_I(sdp->sd_sc_inode);
 674	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_NOPID,
 675				   &sdp->sd_sc_gh);
 676	if (error) {
 677		fs_err(sdp, "can't lock local \"sc\" file: %d\n", error);
 678		goto free_local;
 679	}
 680	/* read in the local statfs buffer - other nodes don't change it. */
 681	error = gfs2_meta_inode_buffer(ip, &sdp->sd_sc_bh);
 682	if (error) {
 683		fs_err(sdp, "Cannot read in local statfs: %d\n", error);
 684		goto unlock_sd_gh;
 685	}
 686	return 0;
 687
 688unlock_sd_gh:
 689	gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
 690free_local:
 691	free_local_statfs_inodes(sdp);
 692	iput(pn);
 693put_statfs:
 694	iput(sdp->sd_statfs_inode);
 695out:
 696	return error;
 697}
 698
 699/* Uninitialize and free up memory used by the list of statfs inodes */
 700static void uninit_statfs(struct gfs2_sbd *sdp)
 701{
 702	if (!sdp->sd_args.ar_spectator) {
 703		brelse(sdp->sd_sc_bh);
 704		gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
 705		free_local_statfs_inodes(sdp);
 706	}
 707	iput(sdp->sd_statfs_inode);
 708}
 709
 710static int init_journal(struct gfs2_sbd *sdp, int undo)
 711{
 712	struct inode *master = d_inode(sdp->sd_master_dir);
 713	struct gfs2_holder ji_gh;
 714	struct gfs2_inode *ip;
 
 715	int error = 0;
 716
 717	gfs2_holder_mark_uninitialized(&ji_gh);
 718	if (undo)
 719		goto fail_statfs;
 
 720
 721	sdp->sd_jindex = gfs2_lookup_meta(master, "jindex");
 722	if (IS_ERR(sdp->sd_jindex)) {
 723		fs_err(sdp, "can't lookup journal index: %d\n", error);
 724		return PTR_ERR(sdp->sd_jindex);
 725	}
 726
 727	/* Load in the journal index special file */
 728
 729	error = gfs2_jindex_hold(sdp, &ji_gh);
 730	if (error) {
 731		fs_err(sdp, "can't read journal index: %d\n", error);
 732		goto fail;
 733	}
 734
 735	error = -EUSERS;
 736	if (!gfs2_jindex_size(sdp)) {
 737		fs_err(sdp, "no journals!\n");
 738		goto fail_jindex;
 739	}
 740
 741	atomic_set(&sdp->sd_log_blks_needed, 0);
 742	if (sdp->sd_args.ar_spectator) {
 743		sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0);
 744		atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks);
 745		atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5);
 746		atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5);
 747	} else {
 748		if (sdp->sd_lockstruct.ls_jid >= gfs2_jindex_size(sdp)) {
 749			fs_err(sdp, "can't mount journal #%u\n",
 750			       sdp->sd_lockstruct.ls_jid);
 751			fs_err(sdp, "there are only %u journals (0 - %u)\n",
 752			       gfs2_jindex_size(sdp),
 753			       gfs2_jindex_size(sdp) - 1);
 754			goto fail_jindex;
 755		}
 756		sdp->sd_jdesc = gfs2_jdesc_find(sdp, sdp->sd_lockstruct.ls_jid);
 757
 758		error = gfs2_glock_nq_num(sdp, sdp->sd_lockstruct.ls_jid,
 759					  &gfs2_journal_glops,
 760					  LM_ST_EXCLUSIVE,
 761					  LM_FLAG_NOEXP | GL_NOCACHE | GL_NOPID,
 762					  &sdp->sd_journal_gh);
 763		if (error) {
 764			fs_err(sdp, "can't acquire journal glock: %d\n", error);
 765			goto fail_jindex;
 766		}
 767
 768		ip = GFS2_I(sdp->sd_jdesc->jd_inode);
 769		sdp->sd_jinode_gl = ip->i_gl;
 770		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
 771					   LM_FLAG_NOEXP | GL_EXACT |
 772					   GL_NOCACHE | GL_NOPID,
 773					   &sdp->sd_jinode_gh);
 774		if (error) {
 775			fs_err(sdp, "can't acquire journal inode glock: %d\n",
 776			       error);
 777			goto fail_journal_gh;
 778		}
 779
 780		error = gfs2_jdesc_check(sdp->sd_jdesc);
 781		if (error) {
 782			fs_err(sdp, "my journal (%u) is bad: %d\n",
 783			       sdp->sd_jdesc->jd_jid, error);
 784			goto fail_jinode_gh;
 785		}
 786		atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks);
 787		atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5);
 788		atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5);
 789
 790		/* Map the extents for this journal's blocks */
 791		gfs2_map_journal_extents(sdp, sdp->sd_jdesc);
 792	}
 793	trace_gfs2_log_blocks(sdp, atomic_read(&sdp->sd_log_blks_free));
 794
 795	/* Lookup statfs inodes here so journal recovery can use them. */
 796	error = init_statfs(sdp);
 797	if (error)
 798		goto fail_jinode_gh;
 799
 800	if (sdp->sd_lockstruct.ls_first) {
 801		unsigned int x;
 802		for (x = 0; x < sdp->sd_journals; x++) {
 803			struct gfs2_jdesc *jd = gfs2_jdesc_find(sdp, x);
 804
 805			if (sdp->sd_args.ar_spectator) {
 806				error = check_journal_clean(sdp, jd, true);
 807				if (error)
 808					goto fail_statfs;
 809				continue;
 810			}
 811			error = gfs2_recover_journal(jd, true);
 812			if (error) {
 813				fs_err(sdp, "error recovering journal %u: %d\n",
 814				       x, error);
 815				goto fail_statfs;
 816			}
 817		}
 818
 819		gfs2_others_may_mount(sdp);
 820	} else if (!sdp->sd_args.ar_spectator) {
 821		error = gfs2_recover_journal(sdp->sd_jdesc, true);
 822		if (error) {
 823			fs_err(sdp, "error recovering my journal: %d\n", error);
 824			goto fail_statfs;
 825		}
 826	}
 827
 828	sdp->sd_log_idle = 1;
 829	set_bit(SDF_JOURNAL_CHECKED, &sdp->sd_flags);
 830	gfs2_glock_dq_uninit(&ji_gh);
 
 831	INIT_WORK(&sdp->sd_freeze_work, gfs2_freeze_func);
 832	return 0;
 833
 834fail_statfs:
 835	uninit_statfs(sdp);
 836fail_jinode_gh:
 837	/* A withdraw may have done dq/uninit so now we need to check it */
 838	if (!sdp->sd_args.ar_spectator &&
 839	    gfs2_holder_initialized(&sdp->sd_jinode_gh))
 840		gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
 841fail_journal_gh:
 842	if (!sdp->sd_args.ar_spectator &&
 843	    gfs2_holder_initialized(&sdp->sd_journal_gh))
 844		gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
 845fail_jindex:
 846	gfs2_jindex_free(sdp);
 847	if (gfs2_holder_initialized(&ji_gh))
 848		gfs2_glock_dq_uninit(&ji_gh);
 849fail:
 850	iput(sdp->sd_jindex);
 851	return error;
 852}
 853
 854static struct lock_class_key gfs2_quota_imutex_key;
 855
 856static int init_inodes(struct gfs2_sbd *sdp, int undo)
 857{
 858	int error = 0;
 859	struct inode *master = d_inode(sdp->sd_master_dir);
 860
 861	if (undo)
 862		goto fail_qinode;
 863
 864	error = init_journal(sdp, undo);
 865	complete_all(&sdp->sd_journal_ready);
 866	if (error)
 867		goto fail;
 868
 
 
 
 
 
 
 
 
 869	/* Read in the resource index inode */
 870	sdp->sd_rindex = gfs2_lookup_meta(master, "rindex");
 871	if (IS_ERR(sdp->sd_rindex)) {
 872		error = PTR_ERR(sdp->sd_rindex);
 873		fs_err(sdp, "can't get resource index inode: %d\n", error);
 874		goto fail_journal;
 875	}
 876	sdp->sd_rindex_uptodate = 0;
 877
 878	/* Read in the quota inode */
 879	sdp->sd_quota_inode = gfs2_lookup_meta(master, "quota");
 880	if (IS_ERR(sdp->sd_quota_inode)) {
 881		error = PTR_ERR(sdp->sd_quota_inode);
 882		fs_err(sdp, "can't get quota file inode: %d\n", error);
 883		goto fail_rindex;
 884	}
 885	/*
 886	 * i_rwsem on quota files is special. Since this inode is hidden system
 887	 * file, we are safe to define locking ourselves.
 888	 */
 889	lockdep_set_class(&sdp->sd_quota_inode->i_rwsem,
 890			  &gfs2_quota_imutex_key);
 891
 892	error = gfs2_rindex_update(sdp);
 893	if (error)
 894		goto fail_qinode;
 895
 896	return 0;
 897
 898fail_qinode:
 899	iput(sdp->sd_quota_inode);
 900fail_rindex:
 901	gfs2_clear_rgrpd(sdp);
 902	iput(sdp->sd_rindex);
 
 
 903fail_journal:
 904	init_journal(sdp, UNDO);
 905fail:
 906	return error;
 907}
 908
 909static int init_per_node(struct gfs2_sbd *sdp, int undo)
 910{
 911	struct inode *pn = NULL;
 912	char buf[30];
 913	int error = 0;
 914	struct gfs2_inode *ip;
 915	struct inode *master = d_inode(sdp->sd_master_dir);
 916
 917	if (sdp->sd_args.ar_spectator)
 918		return 0;
 919
 920	if (undo)
 921		goto fail_qc_gh;
 922
 923	pn = gfs2_lookup_meta(master, "per_node");
 924	if (IS_ERR(pn)) {
 925		error = PTR_ERR(pn);
 926		fs_err(sdp, "can't find per_node directory: %d\n", error);
 927		return error;
 928	}
 929
 
 
 
 
 
 
 
 
 930	sprintf(buf, "quota_change%u", sdp->sd_jdesc->jd_jid);
 931	sdp->sd_qc_inode = gfs2_lookup_meta(pn, buf);
 932	if (IS_ERR(sdp->sd_qc_inode)) {
 933		error = PTR_ERR(sdp->sd_qc_inode);
 934		fs_err(sdp, "can't find local \"qc\" file: %d\n", error);
 935		goto fail_ut_i;
 936	}
 937
 938	iput(pn);
 939	pn = NULL;
 940
 
 
 
 
 
 
 
 
 941	ip = GFS2_I(sdp->sd_qc_inode);
 942	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_NOPID,
 943				   &sdp->sd_qc_gh);
 944	if (error) {
 945		fs_err(sdp, "can't lock local \"qc\" file: %d\n", error);
 946		goto fail_qc_i;
 947	}
 948
 949	return 0;
 950
 951fail_qc_gh:
 952	gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
 
 
 953fail_qc_i:
 954	iput(sdp->sd_qc_inode);
 955fail_ut_i:
 
 
 956	iput(pn);
 957	return error;
 958}
 959
 960static const match_table_t nolock_tokens = {
 961	{ Opt_jid, "jid=%d", },
 962	{ Opt_err, NULL },
 963};
 964
 965static const struct lm_lockops nolock_ops = {
 966	.lm_proto_name = "lock_nolock",
 967	.lm_put_lock = gfs2_glock_free,
 968	.lm_tokens = &nolock_tokens,
 969};
 970
 971/**
 972 * gfs2_lm_mount - mount a locking protocol
 973 * @sdp: the filesystem
 
 974 * @silent: if 1, don't complain if the FS isn't a GFS2 fs
 975 *
 976 * Returns: errno
 977 */
 978
 979static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
 980{
 981	const struct lm_lockops *lm;
 982	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
 983	struct gfs2_args *args = &sdp->sd_args;
 984	const char *proto = sdp->sd_proto_name;
 985	const char *table = sdp->sd_table_name;
 986	char *o, *options;
 987	int ret;
 988
 989	if (!strcmp("lock_nolock", proto)) {
 990		lm = &nolock_ops;
 991		sdp->sd_args.ar_localflocks = 1;
 992#ifdef CONFIG_GFS2_FS_LOCKING_DLM
 993	} else if (!strcmp("lock_dlm", proto)) {
 994		lm = &gfs2_dlm_ops;
 995#endif
 996	} else {
 997		pr_info("can't find protocol %s\n", proto);
 998		return -ENOENT;
 999	}
1000
1001	fs_info(sdp, "Trying to join cluster \"%s\", \"%s\"\n", proto, table);
1002
1003	ls->ls_ops = lm;
1004	ls->ls_first = 1;
1005
1006	for (options = args->ar_hostdata; (o = strsep(&options, ":")); ) {
1007		substring_t tmp[MAX_OPT_ARGS];
1008		int token, option;
1009
1010		if (!o || !*o)
1011			continue;
1012
1013		token = match_token(o, *lm->lm_tokens, tmp);
1014		switch (token) {
1015		case Opt_jid:
1016			ret = match_int(&tmp[0], &option);
1017			if (ret || option < 0) 
1018				goto hostdata_error;
1019			if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags))
1020				ls->ls_jid = option;
1021			break;
1022		case Opt_id:
1023		case Opt_nodir:
1024			/* Obsolete, but left for backward compat purposes */
1025			break;
1026		case Opt_first:
1027			ret = match_int(&tmp[0], &option);
1028			if (ret || (option != 0 && option != 1))
1029				goto hostdata_error;
1030			ls->ls_first = option;
1031			break;
1032		case Opt_err:
1033		default:
1034hostdata_error:
1035			fs_info(sdp, "unknown hostdata (%s)\n", o);
1036			return -EINVAL;
1037		}
1038	}
1039
1040	if (lm->lm_mount == NULL) {
1041		fs_info(sdp, "Now mounting FS (format %u)...\n", sdp->sd_sb.sb_fs_format);
1042		complete_all(&sdp->sd_locking_init);
1043		return 0;
1044	}
1045	ret = lm->lm_mount(sdp, table);
1046	if (ret == 0)
1047		fs_info(sdp, "Joined cluster. Now mounting FS (format %u)...\n",
1048		        sdp->sd_sb.sb_fs_format);
1049	complete_all(&sdp->sd_locking_init);
1050	return ret;
1051}
1052
1053void gfs2_lm_unmount(struct gfs2_sbd *sdp)
1054{
1055	const struct lm_lockops *lm = sdp->sd_lockstruct.ls_ops;
1056	if (!gfs2_withdrawing_or_withdrawn(sdp) && lm->lm_unmount)
 
1057		lm->lm_unmount(sdp);
1058}
1059
1060static int wait_on_journal(struct gfs2_sbd *sdp)
1061{
1062	if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
1063		return 0;
1064
1065	return wait_on_bit(&sdp->sd_flags, SDF_NOJOURNALID, TASK_INTERRUPTIBLE)
1066		? -EINTR : 0;
1067}
1068
1069void gfs2_online_uevent(struct gfs2_sbd *sdp)
1070{
1071	struct super_block *sb = sdp->sd_vfs;
1072	char ro[20];
1073	char spectator[20];
1074	char *envp[] = { ro, spectator, NULL };
1075	sprintf(ro, "RDONLY=%d", sb_rdonly(sb));
1076	sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0);
1077	kobject_uevent_env(&sdp->sd_kobj, KOBJ_ONLINE, envp);
1078}
1079
1080static int init_threads(struct gfs2_sbd *sdp)
1081{
1082	struct task_struct *p;
1083	int error = 0;
1084
1085	p = kthread_create(gfs2_logd, sdp, "gfs2_logd/%s", sdp->sd_fsname);
1086	if (IS_ERR(p)) {
1087		error = PTR_ERR(p);
1088		fs_err(sdp, "can't create logd thread: %d\n", error);
1089		return error;
1090	}
1091	get_task_struct(p);
1092	sdp->sd_logd_process = p;
1093
1094	p = kthread_create(gfs2_quotad, sdp, "gfs2_quotad/%s", sdp->sd_fsname);
1095	if (IS_ERR(p)) {
1096		error = PTR_ERR(p);
1097		fs_err(sdp, "can't create quotad thread: %d\n", error);
1098		goto fail;
1099	}
1100	get_task_struct(p);
1101	sdp->sd_quotad_process = p;
1102
1103	wake_up_process(sdp->sd_logd_process);
1104	wake_up_process(sdp->sd_quotad_process);
1105	return 0;
1106
1107fail:
1108	kthread_stop_put(sdp->sd_logd_process);
1109	sdp->sd_logd_process = NULL;
1110	return error;
1111}
1112
1113void gfs2_destroy_threads(struct gfs2_sbd *sdp)
1114{
1115	if (sdp->sd_logd_process) {
1116		kthread_stop_put(sdp->sd_logd_process);
1117		sdp->sd_logd_process = NULL;
1118	}
1119	if (sdp->sd_quotad_process) {
1120		kthread_stop_put(sdp->sd_quotad_process);
1121		sdp->sd_quotad_process = NULL;
1122	}
1123}
1124
1125/**
1126 * gfs2_fill_super - Read in superblock
1127 * @sb: The VFS superblock
1128 * @fc: Mount options and flags
 
1129 *
1130 * Returns: -errno
1131 */
1132static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
 
1133{
1134	struct gfs2_args *args = fc->fs_private;
1135	int silent = fc->sb_flags & SB_SILENT;
1136	struct gfs2_sbd *sdp;
1137	struct gfs2_holder mount_gh;
1138	int error;
1139
1140	sdp = init_sbd(sb);
1141	if (!sdp) {
1142		pr_warn("can't alloc struct gfs2_sbd\n");
1143		return -ENOMEM;
1144	}
1145	sdp->sd_args = *args;
1146
1147	if (sdp->sd_args.ar_spectator) {
1148                sb->s_flags |= SB_RDONLY;
1149		set_bit(SDF_RORECOVERY, &sdp->sd_flags);
1150	}
1151	if (sdp->sd_args.ar_posix_acl)
1152		sb->s_flags |= SB_POSIXACL;
1153	if (sdp->sd_args.ar_nobarrier)
1154		set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1155
1156	sb->s_flags |= SB_NOSEC;
1157	sb->s_magic = GFS2_MAGIC;
1158	sb->s_op = &gfs2_super_ops;
1159	sb->s_d_op = &gfs2_dops;
1160	sb->s_export_op = &gfs2_export_ops;
 
1161	sb->s_qcop = &gfs2_quotactl_ops;
1162	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
1163	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
1164	sb->s_time_gran = 1;
1165	sb->s_maxbytes = MAX_LFS_FILESIZE;
1166
1167	/* Set up the buffer cache and fill in some fake block size values
1168	   to allow us to read-in the on-disk superblock. */
1169	sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, 512);
1170	sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits;
1171	sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - 9;
 
1172	sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
1173
1174	sdp->sd_tune.gt_logd_secs = sdp->sd_args.ar_commit;
1175	sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum;
1176	if (sdp->sd_args.ar_statfs_quantum) {
1177		sdp->sd_tune.gt_statfs_slow = 0;
1178		sdp->sd_tune.gt_statfs_quantum = sdp->sd_args.ar_statfs_quantum;
1179	} else {
1180		sdp->sd_tune.gt_statfs_slow = 1;
1181		sdp->sd_tune.gt_statfs_quantum = 30;
1182	}
1183
1184	error = init_names(sdp, silent);
1185	if (error)
1186		goto fail_free;
 
 
 
 
 
 
1187
1188	snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s", sdp->sd_table_name);
1189
1190	error = -ENOMEM;
1191	sdp->sd_glock_wq = alloc_workqueue("gfs2-glock/%s",
1192			WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 0,
1193			sdp->sd_fsname);
1194	if (!sdp->sd_glock_wq)
1195		goto fail_free;
1196
1197	sdp->sd_delete_wq = alloc_workqueue("gfs2-delete/%s",
1198			WQ_MEM_RECLAIM | WQ_FREEZABLE, 0, sdp->sd_fsname);
1199	if (!sdp->sd_delete_wq)
1200		goto fail_glock_wq;
1201
1202	error = gfs2_sys_fs_add(sdp);
 
 
 
 
 
 
 
1203	if (error)
1204		goto fail_delete_wq;
1205
1206	gfs2_create_debugfs_file(sdp);
1207
1208	error = gfs2_lm_mount(sdp, silent);
1209	if (error)
1210		goto fail_debug;
1211
1212	error = init_locking(sdp, &mount_gh, DO);
1213	if (error)
1214		goto fail_lm;
1215
1216	error = init_sb(sdp, silent);
1217	if (error)
1218		goto fail_locking;
1219
1220	/* Turn rgrplvb on by default if fs format is recent enough */
1221	if (!sdp->sd_args.ar_got_rgrplvb && sdp->sd_sb.sb_fs_format > 1801)
1222		sdp->sd_args.ar_rgrplvb = 1;
1223
1224	error = wait_on_journal(sdp);
1225	if (error)
1226		goto fail_sb;
1227
1228	/*
1229	 * If user space has failed to join the cluster or some similar
1230	 * failure has occurred, then the journal id will contain a
1231	 * negative (error) number. This will then be returned to the
1232	 * caller (of the mount syscall). We do this even for spectator
1233	 * mounts (which just write a jid of 0 to indicate "ok" even though
1234	 * the jid is unused in the spectator case)
1235	 */
1236	if (sdp->sd_lockstruct.ls_jid < 0) {
1237		error = sdp->sd_lockstruct.ls_jid;
1238		sdp->sd_lockstruct.ls_jid = 0;
1239		goto fail_sb;
1240	}
1241
1242	if (sdp->sd_args.ar_spectator)
1243		snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s.s",
1244			 sdp->sd_table_name);
1245	else
1246		snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s.%u",
1247			 sdp->sd_table_name, sdp->sd_lockstruct.ls_jid);
1248
1249	error = init_inodes(sdp, DO);
1250	if (error)
1251		goto fail_sb;
1252
1253	error = init_per_node(sdp, DO);
1254	if (error)
1255		goto fail_inodes;
1256
1257	error = gfs2_statfs_init(sdp);
1258	if (error) {
1259		fs_err(sdp, "can't initialize statfs subsystem: %d\n", error);
1260		goto fail_per_node;
1261	}
1262
1263	if (!sb_rdonly(sb)) {
1264		error = init_threads(sdp);
1265		if (error)
 
1266			goto fail_per_node;
 
1267	}
1268
1269	error = gfs2_freeze_lock_shared(sdp);
1270	if (error)
1271		goto fail_per_node;
1272
1273	if (!sb_rdonly(sb))
1274		error = gfs2_make_fs_rw(sdp);
1275
1276	if (error) {
1277		gfs2_freeze_unlock(sdp);
1278		gfs2_destroy_threads(sdp);
1279		fs_err(sdp, "can't make FS RW: %d\n", error);
1280		goto fail_per_node;
1281	}
1282	gfs2_glock_dq_uninit(&mount_gh);
1283	gfs2_online_uevent(sdp);
1284	return 0;
1285
1286fail_per_node:
1287	init_per_node(sdp, UNDO);
1288fail_inodes:
1289	init_inodes(sdp, UNDO);
1290fail_sb:
1291	if (sdp->sd_root_dir)
1292		dput(sdp->sd_root_dir);
1293	if (sdp->sd_master_dir)
1294		dput(sdp->sd_master_dir);
1295	if (sb->s_root)
1296		dput(sb->s_root);
1297	sb->s_root = NULL;
1298fail_locking:
1299	init_locking(sdp, &mount_gh, UNDO);
1300fail_lm:
1301	complete_all(&sdp->sd_journal_ready);
1302	gfs2_gl_hash_clear(sdp);
1303	gfs2_lm_unmount(sdp);
1304fail_debug:
1305	gfs2_delete_debugfs_file(sdp);
 
 
 
1306	gfs2_sys_fs_del(sdp);
1307fail_delete_wq:
1308	destroy_workqueue(sdp->sd_delete_wq);
1309fail_glock_wq:
1310	if (sdp->sd_glock_wq)
1311		destroy_workqueue(sdp->sd_glock_wq);
1312fail_free:
1313	free_sbd(sdp);
1314	sb->s_fs_info = NULL;
1315	return error;
1316}
1317
1318/**
1319 * gfs2_get_tree - Get the GFS2 superblock and root directory
1320 * @fc: The filesystem context
1321 *
1322 * Returns: 0 or -errno on error
1323 */
1324static int gfs2_get_tree(struct fs_context *fc)
1325{
1326	struct gfs2_args *args = fc->fs_private;
1327	struct gfs2_sbd *sdp;
1328	int error;
1329
1330	error = get_tree_bdev(fc, gfs2_fill_super);
1331	if (error)
1332		return error;
1333
1334	sdp = fc->root->d_sb->s_fs_info;
1335	dput(fc->root);
1336	if (args->ar_meta)
1337		fc->root = dget(sdp->sd_master_dir);
1338	else
1339		fc->root = dget(sdp->sd_root_dir);
1340	return 0;
1341}
1342
1343static void gfs2_fc_free(struct fs_context *fc)
1344{
1345	struct gfs2_args *args = fc->fs_private;
1346
1347	kfree(args);
1348}
1349
1350enum gfs2_param {
1351	Opt_lockproto,
1352	Opt_locktable,
1353	Opt_hostdata,
1354	Opt_spectator,
1355	Opt_ignore_local_fs,
1356	Opt_localflocks,
1357	Opt_localcaching,
1358	Opt_debug,
1359	Opt_upgrade,
1360	Opt_acl,
1361	Opt_quota,
1362	Opt_quota_flag,
1363	Opt_suiddir,
1364	Opt_data,
1365	Opt_meta,
1366	Opt_discard,
1367	Opt_commit,
1368	Opt_errors,
1369	Opt_statfs_quantum,
1370	Opt_statfs_percent,
1371	Opt_quota_quantum,
1372	Opt_barrier,
1373	Opt_rgrplvb,
1374	Opt_loccookie,
1375};
1376
1377static const struct constant_table gfs2_param_quota[] = {
1378	{"off",        GFS2_QUOTA_OFF},
1379	{"account",    GFS2_QUOTA_ACCOUNT},
1380	{"on",         GFS2_QUOTA_ON},
1381	{"quiet",      GFS2_QUOTA_QUIET},
1382	{}
1383};
 
 
1384
1385enum opt_data {
1386	Opt_data_writeback = GFS2_DATA_WRITEBACK,
1387	Opt_data_ordered   = GFS2_DATA_ORDERED,
1388};
1389
1390static const struct constant_table gfs2_param_data[] = {
1391	{"writeback",  Opt_data_writeback },
1392	{"ordered",    Opt_data_ordered },
1393	{}
1394};
1395
1396enum opt_errors {
1397	Opt_errors_withdraw = GFS2_ERRORS_WITHDRAW,
1398	Opt_errors_panic    = GFS2_ERRORS_PANIC,
1399};
1400
1401static const struct constant_table gfs2_param_errors[] = {
1402	{"withdraw",   Opt_errors_withdraw },
1403	{"panic",      Opt_errors_panic },
1404	{}
1405};
1406
1407static const struct fs_parameter_spec gfs2_fs_parameters[] = {
1408	fsparam_string ("lockproto",          Opt_lockproto),
1409	fsparam_string ("locktable",          Opt_locktable),
1410	fsparam_string ("hostdata",           Opt_hostdata),
1411	fsparam_flag   ("spectator",          Opt_spectator),
1412	fsparam_flag   ("norecovery",         Opt_spectator),
1413	fsparam_flag   ("ignore_local_fs",    Opt_ignore_local_fs),
1414	fsparam_flag   ("localflocks",        Opt_localflocks),
1415	fsparam_flag   ("localcaching",       Opt_localcaching),
1416	fsparam_flag_no("debug",              Opt_debug),
1417	fsparam_flag   ("upgrade",            Opt_upgrade),
1418	fsparam_flag_no("acl",                Opt_acl),
1419	fsparam_flag_no("suiddir",            Opt_suiddir),
1420	fsparam_enum   ("data",               Opt_data, gfs2_param_data),
1421	fsparam_flag   ("meta",               Opt_meta),
1422	fsparam_flag_no("discard",            Opt_discard),
1423	fsparam_s32    ("commit",             Opt_commit),
1424	fsparam_enum   ("errors",             Opt_errors, gfs2_param_errors),
1425	fsparam_s32    ("statfs_quantum",     Opt_statfs_quantum),
1426	fsparam_s32    ("statfs_percent",     Opt_statfs_percent),
1427	fsparam_s32    ("quota_quantum",      Opt_quota_quantum),
1428	fsparam_flag_no("barrier",            Opt_barrier),
1429	fsparam_flag_no("rgrplvb",            Opt_rgrplvb),
1430	fsparam_flag_no("loccookie",          Opt_loccookie),
1431	/* quota can be a flag or an enum so it gets special treatment */
1432	fsparam_flag_no("quota",	      Opt_quota_flag),
1433	fsparam_enum("quota",		      Opt_quota, gfs2_param_quota),
1434	{}
1435};
1436
1437/* Parse a single mount parameter */
1438static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
1439{
1440	struct gfs2_args *args = fc->fs_private;
1441	struct fs_parse_result result;
1442	int o;
1443
1444	o = fs_parse(fc, gfs2_fs_parameters, param, &result);
1445	if (o < 0)
1446		return o;
1447
1448	switch (o) {
1449	case Opt_lockproto:
1450		strscpy(args->ar_lockproto, param->string, GFS2_LOCKNAME_LEN);
1451		break;
1452	case Opt_locktable:
1453		strscpy(args->ar_locktable, param->string, GFS2_LOCKNAME_LEN);
1454		break;
1455	case Opt_hostdata:
1456		strscpy(args->ar_hostdata, param->string, GFS2_LOCKNAME_LEN);
1457		break;
1458	case Opt_spectator:
1459		args->ar_spectator = 1;
1460		break;
1461	case Opt_ignore_local_fs:
1462		/* Retained for backwards compat only */
1463		break;
1464	case Opt_localflocks:
1465		args->ar_localflocks = 1;
1466		break;
1467	case Opt_localcaching:
1468		/* Retained for backwards compat only */
1469		break;
1470	case Opt_debug:
1471		if (result.boolean && args->ar_errors == GFS2_ERRORS_PANIC)
1472			return invalfc(fc, "-o debug and -o errors=panic are mutually exclusive");
1473		args->ar_debug = result.boolean;
1474		break;
1475	case Opt_upgrade:
1476		/* Retained for backwards compat only */
1477		break;
1478	case Opt_acl:
1479		args->ar_posix_acl = result.boolean;
1480		break;
1481	case Opt_quota_flag:
1482		args->ar_quota = result.negated ? GFS2_QUOTA_OFF : GFS2_QUOTA_ON;
1483		break;
1484	case Opt_quota:
1485		args->ar_quota = result.int_32;
1486		break;
1487	case Opt_suiddir:
1488		args->ar_suiddir = result.boolean;
1489		break;
1490	case Opt_data:
1491		/* The uint_32 result maps directly to GFS2_DATA_* */
1492		args->ar_data = result.uint_32;
1493		break;
1494	case Opt_meta:
1495		args->ar_meta = 1;
1496		break;
1497	case Opt_discard:
1498		args->ar_discard = result.boolean;
1499		break;
1500	case Opt_commit:
1501		if (result.int_32 <= 0)
1502			return invalfc(fc, "commit mount option requires a positive numeric argument");
1503		args->ar_commit = result.int_32;
1504		break;
1505	case Opt_statfs_quantum:
1506		if (result.int_32 < 0)
1507			return invalfc(fc, "statfs_quantum mount option requires a non-negative numeric argument");
1508		args->ar_statfs_quantum = result.int_32;
1509		break;
1510	case Opt_quota_quantum:
1511		if (result.int_32 <= 0)
1512			return invalfc(fc, "quota_quantum mount option requires a positive numeric argument");
1513		args->ar_quota_quantum = result.int_32;
1514		break;
1515	case Opt_statfs_percent:
1516		if (result.int_32 < 0 || result.int_32 > 100)
1517			return invalfc(fc, "statfs_percent mount option requires a numeric argument between 0 and 100");
1518		args->ar_statfs_percent = result.int_32;
1519		break;
1520	case Opt_errors:
1521		if (args->ar_debug && result.uint_32 == GFS2_ERRORS_PANIC)
1522			return invalfc(fc, "-o debug and -o errors=panic are mutually exclusive");
1523		args->ar_errors = result.uint_32;
1524		break;
1525	case Opt_barrier:
1526		args->ar_nobarrier = result.boolean;
1527		break;
1528	case Opt_rgrplvb:
1529		args->ar_rgrplvb = result.boolean;
1530		args->ar_got_rgrplvb = 1;
1531		break;
1532	case Opt_loccookie:
1533		args->ar_loccookie = result.boolean;
1534		break;
1535	default:
1536		return invalfc(fc, "invalid mount option: %s", param->key);
1537	}
1538	return 0;
1539}
1540
1541static int gfs2_reconfigure(struct fs_context *fc)
1542{
1543	struct super_block *sb = fc->root->d_sb;
1544	struct gfs2_sbd *sdp = sb->s_fs_info;
1545	struct gfs2_args *oldargs = &sdp->sd_args;
1546	struct gfs2_args *newargs = fc->fs_private;
1547	struct gfs2_tune *gt = &sdp->sd_tune;
1548	int error = 0;
1549
1550	sync_filesystem(sb);
1551
1552	spin_lock(&gt->gt_spin);
1553	oldargs->ar_commit = gt->gt_logd_secs;
1554	oldargs->ar_quota_quantum = gt->gt_quota_quantum;
1555	if (gt->gt_statfs_slow)
1556		oldargs->ar_statfs_quantum = 0;
1557	else
1558		oldargs->ar_statfs_quantum = gt->gt_statfs_quantum;
1559	spin_unlock(&gt->gt_spin);
1560
1561	if (strcmp(newargs->ar_lockproto, oldargs->ar_lockproto)) {
1562		errorfc(fc, "reconfiguration of locking protocol not allowed");
1563		return -EINVAL;
1564	}
1565	if (strcmp(newargs->ar_locktable, oldargs->ar_locktable)) {
1566		errorfc(fc, "reconfiguration of lock table not allowed");
1567		return -EINVAL;
1568	}
1569	if (strcmp(newargs->ar_hostdata, oldargs->ar_hostdata)) {
1570		errorfc(fc, "reconfiguration of host data not allowed");
1571		return -EINVAL;
1572	}
1573	if (newargs->ar_spectator != oldargs->ar_spectator) {
1574		errorfc(fc, "reconfiguration of spectator mode not allowed");
1575		return -EINVAL;
1576	}
1577	if (newargs->ar_localflocks != oldargs->ar_localflocks) {
1578		errorfc(fc, "reconfiguration of localflocks not allowed");
1579		return -EINVAL;
1580	}
1581	if (newargs->ar_meta != oldargs->ar_meta) {
1582		errorfc(fc, "switching between gfs2 and gfs2meta not allowed");
1583		return -EINVAL;
1584	}
1585	if (oldargs->ar_spectator)
1586		fc->sb_flags |= SB_RDONLY;
1587
1588	if ((sb->s_flags ^ fc->sb_flags) & SB_RDONLY) {
1589		if (fc->sb_flags & SB_RDONLY) {
1590			gfs2_make_fs_ro(sdp);
1591		} else {
1592			error = gfs2_make_fs_rw(sdp);
1593			if (error)
1594				errorfc(fc, "unable to remount read-write");
1595		}
 
 
 
 
1596	}
1597	sdp->sd_args = *newargs;
1598
1599	if (sdp->sd_args.ar_posix_acl)
1600		sb->s_flags |= SB_POSIXACL;
 
1601	else
1602		sb->s_flags &= ~SB_POSIXACL;
1603	if (sdp->sd_args.ar_nobarrier)
1604		set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1605	else
1606		clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1607	spin_lock(&gt->gt_spin);
1608	gt->gt_logd_secs = newargs->ar_commit;
1609	gt->gt_quota_quantum = newargs->ar_quota_quantum;
1610	if (newargs->ar_statfs_quantum) {
1611		gt->gt_statfs_slow = 0;
1612		gt->gt_statfs_quantum = newargs->ar_statfs_quantum;
1613	}
1614	else {
1615		gt->gt_statfs_slow = 1;
1616		gt->gt_statfs_quantum = 30;
1617	}
1618	spin_unlock(&gt->gt_spin);
1619
1620	gfs2_online_uevent(sdp);
1621	return error;
 
 
 
 
1622}
1623
1624static const struct fs_context_operations gfs2_context_ops = {
1625	.free        = gfs2_fc_free,
1626	.parse_param = gfs2_parse_param,
1627	.get_tree    = gfs2_get_tree,
1628	.reconfigure = gfs2_reconfigure,
1629};
1630
1631/* Set up the filesystem mount context */
1632static int gfs2_init_fs_context(struct fs_context *fc)
1633{
1634	struct gfs2_args *args;
1635
1636	args = kmalloc(sizeof(*args), GFP_KERNEL);
1637	if (args == NULL)
1638		return -ENOMEM;
1639
1640	if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
1641		struct gfs2_sbd *sdp = fc->root->d_sb->s_fs_info;
1642
1643		*args = sdp->sd_args;
1644	} else {
1645		memset(args, 0, sizeof(*args));
1646		args->ar_quota = GFS2_QUOTA_DEFAULT;
1647		args->ar_data = GFS2_DATA_DEFAULT;
1648		args->ar_commit = 30;
1649		args->ar_statfs_quantum = 30;
1650		args->ar_quota_quantum = 60;
1651		args->ar_errors = GFS2_ERRORS_DEFAULT;
1652	}
1653	fc->fs_private = args;
1654	fc->ops = &gfs2_context_ops;
1655	return 0;
1656}
1657
1658static int set_meta_super(struct super_block *s, struct fs_context *fc)
1659{
1660	return -EINVAL;
1661}
1662
1663static int test_meta_super(struct super_block *s, struct fs_context *fc)
1664{
1665	return (fc->sget_key == s->s_bdev);
1666}
1667
1668static int gfs2_meta_get_tree(struct fs_context *fc)
1669{
1670	struct super_block *s;
1671	struct gfs2_sbd *sdp;
1672	struct path path;
1673	int error;
1674
1675	if (!fc->source || !*fc->source)
1676		return -EINVAL;
1677
1678	error = kern_path(fc->source, LOOKUP_FOLLOW, &path);
1679	if (error) {
1680		pr_warn("path_lookup on %s returned error %d\n",
1681		        fc->source, error);
1682		return error;
1683	}
1684	fc->fs_type = &gfs2_fs_type;
1685	fc->sget_key = path.dentry->d_sb->s_bdev;
1686	s = sget_fc(fc, test_meta_super, set_meta_super);
1687	path_put(&path);
1688	if (IS_ERR(s)) {
1689		pr_warn("gfs2 mount does not exist\n");
1690		return PTR_ERR(s);
1691	}
1692	if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
1693		deactivate_locked_super(s);
1694		return -EBUSY;
1695	}
1696	sdp = s->s_fs_info;
1697	fc->root = dget(sdp->sd_master_dir);
1698	return 0;
1699}
1700
1701static const struct fs_context_operations gfs2_meta_context_ops = {
1702	.free        = gfs2_fc_free,
1703	.get_tree    = gfs2_meta_get_tree,
1704};
1705
1706static int gfs2_meta_init_fs_context(struct fs_context *fc)
1707{
1708	int ret = gfs2_init_fs_context(fc);
1709
1710	if (ret)
1711		return ret;
1712
1713	fc->ops = &gfs2_meta_context_ops;
1714	return 0;
1715}
1716
1717/**
1718 * gfs2_evict_inodes - evict inodes cooperatively
1719 * @sb: the superblock
1720 *
1721 * When evicting an inode with a zero link count, we are trying to upgrade the
1722 * inode's iopen glock from SH to EX mode in order to determine if we can
1723 * delete the inode.  The other nodes are supposed to evict the inode from
1724 * their caches if they can, and to poke the inode's inode glock if they cannot
1725 * do so.  Either behavior allows gfs2_upgrade_iopen_glock() to proceed
1726 * quickly, but if the other nodes are not cooperating, the lock upgrading
1727 * attempt will time out.  Since inodes are evicted sequentially, this can add
1728 * up quickly.
1729 *
1730 * Function evict_inodes() tries to keep the s_inode_list_lock list locked over
1731 * a long time, which prevents other inodes from being evicted concurrently.
1732 * This precludes the cooperative behavior we are looking for.  This special
1733 * version of evict_inodes() avoids that.
1734 *
1735 * Modeled after drop_pagecache_sb().
1736 */
1737static void gfs2_evict_inodes(struct super_block *sb)
1738{
1739	struct inode *inode, *toput_inode = NULL;
1740	struct gfs2_sbd *sdp = sb->s_fs_info;
1741
1742	set_bit(SDF_EVICTING, &sdp->sd_flags);
1743
1744	spin_lock(&sb->s_inode_list_lock);
1745	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1746		spin_lock(&inode->i_lock);
1747		if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) &&
1748		    !need_resched()) {
1749			spin_unlock(&inode->i_lock);
1750			continue;
1751		}
1752		atomic_inc(&inode->i_count);
1753		spin_unlock(&inode->i_lock);
1754		spin_unlock(&sb->s_inode_list_lock);
1755
1756		iput(toput_inode);
1757		toput_inode = inode;
1758
1759		cond_resched();
1760		spin_lock(&sb->s_inode_list_lock);
1761	}
1762	spin_unlock(&sb->s_inode_list_lock);
1763	iput(toput_inode);
1764}
1765
1766static void gfs2_kill_sb(struct super_block *sb)
1767{
1768	struct gfs2_sbd *sdp = sb->s_fs_info;
1769
1770	if (sdp == NULL) {
1771		kill_block_super(sb);
1772		return;
1773	}
1774
1775	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SYNC | GFS2_LFC_KILL_SB);
1776	dput(sdp->sd_root_dir);
1777	dput(sdp->sd_master_dir);
1778	sdp->sd_root_dir = NULL;
1779	sdp->sd_master_dir = NULL;
1780	shrink_dcache_sb(sb);
1781
1782	gfs2_evict_inodes(sb);
1783
1784	/*
1785	 * Flush and then drain the delete workqueue here (via
1786	 * destroy_workqueue()) to ensure that any delete work that
1787	 * may be running will also see the SDF_KILL flag.
1788	 */
1789	set_bit(SDF_KILL, &sdp->sd_flags);
1790	gfs2_flush_delete_work(sdp);
1791	destroy_workqueue(sdp->sd_delete_wq);
1792
1793	kill_block_super(sb);
1794}
1795
1796struct file_system_type gfs2_fs_type = {
1797	.name = "gfs2",
1798	.fs_flags = FS_REQUIRES_DEV,
1799	.init_fs_context = gfs2_init_fs_context,
1800	.parameters = gfs2_fs_parameters,
1801	.kill_sb = gfs2_kill_sb,
1802	.owner = THIS_MODULE,
1803};
1804MODULE_ALIAS_FS("gfs2");
1805
1806struct file_system_type gfs2meta_fs_type = {
1807	.name = "gfs2meta",
1808	.fs_flags = FS_REQUIRES_DEV,
1809	.init_fs_context = gfs2_meta_init_fs_context,
1810	.owner = THIS_MODULE,
1811};
1812MODULE_ALIAS_FS("gfs2meta");