Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * fs/fs-writeback.c
   4 *
   5 * Copyright (C) 2002, Linus Torvalds.
   6 *
   7 * Contains all the functions related to writing back and waiting
   8 * upon dirty inodes against superblocks, and writing back dirty
   9 * pages against inodes.  ie: data writeback.  Writeout of the
  10 * inode itself is not handled here.
  11 *
  12 * 10Apr2002	Andrew Morton
  13 *		Split out of fs/inode.c
  14 *		Additions for address_space-based writeback
  15 */
  16
  17#include <linux/kernel.h>
  18#include <linux/export.h>
  19#include <linux/spinlock.h>
  20#include <linux/slab.h>
  21#include <linux/sched.h>
  22#include <linux/fs.h>
  23#include <linux/mm.h>
  24#include <linux/pagemap.h>
  25#include <linux/kthread.h>
  26#include <linux/writeback.h>
  27#include <linux/blkdev.h>
  28#include <linux/backing-dev.h>
  29#include <linux/tracepoint.h>
  30#include <linux/device.h>
  31#include <linux/memcontrol.h>
  32#include "internal.h"
  33
  34/*
  35 * 4MB minimal write chunk size
  36 */
  37#define MIN_WRITEBACK_PAGES	(4096UL >> (PAGE_SHIFT - 10))
  38
  39/*
  40 * Passed into wb_writeback(), essentially a subset of writeback_control
  41 */
  42struct wb_writeback_work {
  43	long nr_pages;
  44	struct super_block *sb;
  45	enum writeback_sync_modes sync_mode;
  46	unsigned int tagged_writepages:1;
  47	unsigned int for_kupdate:1;
  48	unsigned int range_cyclic:1;
  49	unsigned int for_background:1;
  50	unsigned int for_sync:1;	/* sync(2) WB_SYNC_ALL writeback */
  51	unsigned int auto_free:1;	/* free on completion */
  52	enum wb_reason reason;		/* why was writeback initiated? */
  53
  54	struct list_head list;		/* pending work list */
  55	struct wb_completion *done;	/* set if the caller waits */
  56};
  57
  58/*
  59 * If an inode is constantly having its pages dirtied, but then the
  60 * updates stop dirtytime_expire_interval seconds in the past, it's
  61 * possible for the worst case time between when an inode has its
  62 * timestamps updated and when they finally get written out to be two
  63 * dirtytime_expire_intervals.  We set the default to 12 hours (in
  64 * seconds), which means most of the time inodes will have their
  65 * timestamps written to disk after 12 hours, but in the worst case a
  66 * few inodes might not their timestamps updated for 24 hours.
  67 */
  68unsigned int dirtytime_expire_interval = 12 * 60 * 60;
  69
  70static inline struct inode *wb_inode(struct list_head *head)
  71{
  72	return list_entry(head, struct inode, i_io_list);
  73}
  74
  75/*
  76 * Include the creation of the trace points after defining the
  77 * wb_writeback_work structure and inline functions so that the definition
  78 * remains local to this file.
  79 */
  80#define CREATE_TRACE_POINTS
  81#include <trace/events/writeback.h>
  82
  83EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage);
  84
  85static bool wb_io_lists_populated(struct bdi_writeback *wb)
  86{
  87	if (wb_has_dirty_io(wb)) {
  88		return false;
  89	} else {
  90		set_bit(WB_has_dirty_io, &wb->state);
  91		WARN_ON_ONCE(!wb->avg_write_bandwidth);
  92		atomic_long_add(wb->avg_write_bandwidth,
  93				&wb->bdi->tot_write_bandwidth);
  94		return true;
  95	}
  96}
  97
  98static void wb_io_lists_depopulated(struct bdi_writeback *wb)
  99{
 100	if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) &&
 101	    list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) {
 102		clear_bit(WB_has_dirty_io, &wb->state);
 103		WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth,
 104					&wb->bdi->tot_write_bandwidth) < 0);
 105	}
 106}
 107
 108/**
 109 * inode_io_list_move_locked - move an inode onto a bdi_writeback IO list
 110 * @inode: inode to be moved
 111 * @wb: target bdi_writeback
 112 * @head: one of @wb->b_{dirty|io|more_io|dirty_time}
 113 *
 114 * Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io.
 115 * Returns %true if @inode is the first occupant of the !dirty_time IO
 116 * lists; otherwise, %false.
 117 */
 118static bool inode_io_list_move_locked(struct inode *inode,
 119				      struct bdi_writeback *wb,
 120				      struct list_head *head)
 121{
 122	assert_spin_locked(&wb->list_lock);
 123
 124	list_move(&inode->i_io_list, head);
 125
 126	/* dirty_time doesn't count as dirty_io until expiration */
 127	if (head != &wb->b_dirty_time)
 128		return wb_io_lists_populated(wb);
 129
 130	wb_io_lists_depopulated(wb);
 131	return false;
 132}
 133
 134/**
 135 * inode_io_list_del_locked - remove an inode from its bdi_writeback IO list
 136 * @inode: inode to be removed
 137 * @wb: bdi_writeback @inode is being removed from
 138 *
 139 * Remove @inode which may be on one of @wb->b_{dirty|io|more_io} lists and
 140 * clear %WB_has_dirty_io if all are empty afterwards.
 141 */
 142static void inode_io_list_del_locked(struct inode *inode,
 143				     struct bdi_writeback *wb)
 144{
 145	assert_spin_locked(&wb->list_lock);
 146	assert_spin_locked(&inode->i_lock);
 147
 148	inode->i_state &= ~I_SYNC_QUEUED;
 149	list_del_init(&inode->i_io_list);
 150	wb_io_lists_depopulated(wb);
 151}
 152
 153static void wb_wakeup(struct bdi_writeback *wb)
 154{
 155	spin_lock_bh(&wb->work_lock);
 156	if (test_bit(WB_registered, &wb->state))
 157		mod_delayed_work(bdi_wq, &wb->dwork, 0);
 158	spin_unlock_bh(&wb->work_lock);
 159}
 160
 161static void finish_writeback_work(struct bdi_writeback *wb,
 162				  struct wb_writeback_work *work)
 163{
 164	struct wb_completion *done = work->done;
 165
 166	if (work->auto_free)
 167		kfree(work);
 168	if (done) {
 169		wait_queue_head_t *waitq = done->waitq;
 170
 171		/* @done can't be accessed after the following dec */
 172		if (atomic_dec_and_test(&done->cnt))
 173			wake_up_all(waitq);
 174	}
 175}
 176
 177static void wb_queue_work(struct bdi_writeback *wb,
 178			  struct wb_writeback_work *work)
 179{
 180	trace_writeback_queue(wb, work);
 181
 182	if (work->done)
 183		atomic_inc(&work->done->cnt);
 184
 185	spin_lock_bh(&wb->work_lock);
 186
 187	if (test_bit(WB_registered, &wb->state)) {
 188		list_add_tail(&work->list, &wb->work_list);
 189		mod_delayed_work(bdi_wq, &wb->dwork, 0);
 190	} else
 191		finish_writeback_work(wb, work);
 192
 193	spin_unlock_bh(&wb->work_lock);
 194}
 195
 196/**
 197 * wb_wait_for_completion - wait for completion of bdi_writeback_works
 198 * @done: target wb_completion
 199 *
 200 * Wait for one or more work items issued to @bdi with their ->done field
 201 * set to @done, which should have been initialized with
 202 * DEFINE_WB_COMPLETION().  This function returns after all such work items
 203 * are completed.  Work items which are waited upon aren't freed
 204 * automatically on completion.
 205 */
 206void wb_wait_for_completion(struct wb_completion *done)
 207{
 208	atomic_dec(&done->cnt);		/* put down the initial count */
 209	wait_event(*done->waitq, !atomic_read(&done->cnt));
 210}
 211
 212#ifdef CONFIG_CGROUP_WRITEBACK
 213
 214/*
 215 * Parameters for foreign inode detection, see wbc_detach_inode() to see
 216 * how they're used.
 217 *
 218 * These paramters are inherently heuristical as the detection target
 219 * itself is fuzzy.  All we want to do is detaching an inode from the
 220 * current owner if it's being written to by some other cgroups too much.
 221 *
 222 * The current cgroup writeback is built on the assumption that multiple
 223 * cgroups writing to the same inode concurrently is very rare and a mode
 224 * of operation which isn't well supported.  As such, the goal is not
 225 * taking too long when a different cgroup takes over an inode while
 226 * avoiding too aggressive flip-flops from occasional foreign writes.
 227 *
 228 * We record, very roughly, 2s worth of IO time history and if more than
 229 * half of that is foreign, trigger the switch.  The recording is quantized
 230 * to 16 slots.  To avoid tiny writes from swinging the decision too much,
 231 * writes smaller than 1/8 of avg size are ignored.
 232 */
 233#define WB_FRN_TIME_SHIFT	13	/* 1s = 2^13, upto 8 secs w/ 16bit */
 234#define WB_FRN_TIME_AVG_SHIFT	3	/* avg = avg * 7/8 + new * 1/8 */
 235#define WB_FRN_TIME_CUT_DIV	8	/* ignore rounds < avg / 8 */
 236#define WB_FRN_TIME_PERIOD	(2 * (1 << WB_FRN_TIME_SHIFT))	/* 2s */
 237
 238#define WB_FRN_HIST_SLOTS	16	/* inode->i_wb_frn_history is 16bit */
 239#define WB_FRN_HIST_UNIT	(WB_FRN_TIME_PERIOD / WB_FRN_HIST_SLOTS)
 240					/* each slot's duration is 2s / 16 */
 241#define WB_FRN_HIST_THR_SLOTS	(WB_FRN_HIST_SLOTS / 2)
 242					/* if foreign slots >= 8, switch */
 243#define WB_FRN_HIST_MAX_SLOTS	(WB_FRN_HIST_THR_SLOTS / 2 + 1)
 244					/* one round can affect upto 5 slots */
 245#define WB_FRN_MAX_IN_FLIGHT	1024	/* don't queue too many concurrently */
 246
 
 
 
 
 
 
 
 247static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
 248static struct workqueue_struct *isw_wq;
 249
 250void __inode_attach_wb(struct inode *inode, struct page *page)
 251{
 252	struct backing_dev_info *bdi = inode_to_bdi(inode);
 253	struct bdi_writeback *wb = NULL;
 254
 255	if (inode_cgwb_enabled(inode)) {
 256		struct cgroup_subsys_state *memcg_css;
 257
 258		if (page) {
 259			memcg_css = mem_cgroup_css_from_page(page);
 260			wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
 261		} else {
 262			/* must pin memcg_css, see wb_get_create() */
 263			memcg_css = task_get_css(current, memory_cgrp_id);
 264			wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
 265			css_put(memcg_css);
 266		}
 267	}
 268
 269	if (!wb)
 270		wb = &bdi->wb;
 271
 272	/*
 273	 * There may be multiple instances of this function racing to
 274	 * update the same inode.  Use cmpxchg() to tell the winner.
 275	 */
 276	if (unlikely(cmpxchg(&inode->i_wb, NULL, wb)))
 277		wb_put(wb);
 278}
 279EXPORT_SYMBOL_GPL(__inode_attach_wb);
 280
 281/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 282 * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it
 283 * @inode: inode of interest with i_lock held
 284 *
 285 * Returns @inode's wb with its list_lock held.  @inode->i_lock must be
 286 * held on entry and is released on return.  The returned wb is guaranteed
 287 * to stay @inode's associated wb until its list_lock is released.
 288 */
 289static struct bdi_writeback *
 290locked_inode_to_wb_and_lock_list(struct inode *inode)
 291	__releases(&inode->i_lock)
 292	__acquires(&wb->list_lock)
 293{
 294	while (true) {
 295		struct bdi_writeback *wb = inode_to_wb(inode);
 296
 297		/*
 298		 * inode_to_wb() association is protected by both
 299		 * @inode->i_lock and @wb->list_lock but list_lock nests
 300		 * outside i_lock.  Drop i_lock and verify that the
 301		 * association hasn't changed after acquiring list_lock.
 302		 */
 303		wb_get(wb);
 304		spin_unlock(&inode->i_lock);
 305		spin_lock(&wb->list_lock);
 306
 307		/* i_wb may have changed inbetween, can't use inode_to_wb() */
 308		if (likely(wb == inode->i_wb)) {
 309			wb_put(wb);	/* @inode already has ref */
 310			return wb;
 311		}
 312
 313		spin_unlock(&wb->list_lock);
 314		wb_put(wb);
 315		cpu_relax();
 316		spin_lock(&inode->i_lock);
 317	}
 318}
 319
 320/**
 321 * inode_to_wb_and_lock_list - determine an inode's wb and lock it
 322 * @inode: inode of interest
 323 *
 324 * Same as locked_inode_to_wb_and_lock_list() but @inode->i_lock isn't held
 325 * on entry.
 326 */
 327static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
 328	__acquires(&wb->list_lock)
 329{
 330	spin_lock(&inode->i_lock);
 331	return locked_inode_to_wb_and_lock_list(inode);
 332}
 333
 334struct inode_switch_wbs_context {
 335	struct inode		*inode;
 336	struct bdi_writeback	*new_wb;
 337
 338	struct rcu_head		rcu_head;
 339	struct work_struct	work;
 
 
 
 
 
 
 
 
 340};
 341
 342static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
 343{
 344	down_write(&bdi->wb_switch_rwsem);
 345}
 346
 347static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
 348{
 349	up_write(&bdi->wb_switch_rwsem);
 350}
 351
 352static void inode_switch_wbs_work_fn(struct work_struct *work)
 
 
 353{
 354	struct inode_switch_wbs_context *isw =
 355		container_of(work, struct inode_switch_wbs_context, work);
 356	struct inode *inode = isw->inode;
 357	struct backing_dev_info *bdi = inode_to_bdi(inode);
 358	struct address_space *mapping = inode->i_mapping;
 359	struct bdi_writeback *old_wb = inode->i_wb;
 360	struct bdi_writeback *new_wb = isw->new_wb;
 361	XA_STATE(xas, &mapping->i_pages, 0);
 362	struct page *page;
 363	bool switched = false;
 364
 365	/*
 366	 * If @inode switches cgwb membership while sync_inodes_sb() is
 367	 * being issued, sync_inodes_sb() might miss it.  Synchronize.
 368	 */
 369	down_read(&bdi->wb_switch_rwsem);
 370
 371	/*
 372	 * By the time control reaches here, RCU grace period has passed
 373	 * since I_WB_SWITCH assertion and all wb stat update transactions
 374	 * between unlocked_inode_to_wb_begin/end() are guaranteed to be
 375	 * synchronizing against the i_pages lock.
 376	 *
 377	 * Grabbing old_wb->list_lock, inode->i_lock and the i_pages lock
 378	 * gives us exclusion against all wb related operations on @inode
 379	 * including IO list manipulations and stat updates.
 380	 */
 381	if (old_wb < new_wb) {
 382		spin_lock(&old_wb->list_lock);
 383		spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
 384	} else {
 385		spin_lock(&new_wb->list_lock);
 386		spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
 387	}
 388	spin_lock(&inode->i_lock);
 389	xa_lock_irq(&mapping->i_pages);
 390
 391	/*
 392	 * Once I_FREEING is visible under i_lock, the eviction path owns
 393	 * the inode and we shouldn't modify ->i_io_list.
 394	 */
 395	if (unlikely(inode->i_state & I_FREEING))
 396		goto skip_switch;
 397
 398	trace_inode_switch_wbs(inode, old_wb, new_wb);
 399
 400	/*
 401	 * Count and transfer stats.  Note that PAGECACHE_TAG_DIRTY points
 402	 * to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
 403	 * pages actually under writeback.
 404	 */
 405	xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
 406		if (PageDirty(page)) {
 407			dec_wb_stat(old_wb, WB_RECLAIMABLE);
 408			inc_wb_stat(new_wb, WB_RECLAIMABLE);
 409		}
 410	}
 411
 412	xas_set(&xas, 0);
 413	xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
 414		WARN_ON_ONCE(!PageWriteback(page));
 415		dec_wb_stat(old_wb, WB_WRITEBACK);
 416		inc_wb_stat(new_wb, WB_WRITEBACK);
 417	}
 418
 419	wb_get(new_wb);
 420
 421	/*
 422	 * Transfer to @new_wb's IO list if necessary.  The specific list
 423	 * @inode was on is ignored and the inode is put on ->b_dirty which
 424	 * is always correct including from ->b_dirty_time.  The transfer
 425	 * preserves @inode->dirtied_when ordering.
 
 
 426	 */
 427	if (!list_empty(&inode->i_io_list)) {
 428		struct inode *pos;
 429
 430		inode_io_list_del_locked(inode, old_wb);
 431		inode->i_wb = new_wb;
 432		list_for_each_entry(pos, &new_wb->b_dirty, i_io_list)
 433			if (time_after_eq(inode->dirtied_when,
 434					  pos->dirtied_when))
 435				break;
 436		inode_io_list_move_locked(inode, new_wb, pos->i_io_list.prev);
 
 
 
 
 
 
 
 
 437	} else {
 438		inode->i_wb = new_wb;
 439	}
 440
 441	/* ->i_wb_frn updates may race wbc_detach_inode() but doesn't matter */
 442	inode->i_wb_frn_winner = 0;
 443	inode->i_wb_frn_avg_time = 0;
 444	inode->i_wb_frn_history = 0;
 445	switched = true;
 446skip_switch:
 447	/*
 448	 * Paired with load_acquire in unlocked_inode_to_wb_begin() and
 449	 * ensures that the new wb is visible if they see !I_WB_SWITCH.
 450	 */
 451	smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH);
 452
 453	xa_unlock_irq(&mapping->i_pages);
 454	spin_unlock(&inode->i_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 455	spin_unlock(&new_wb->list_lock);
 456	spin_unlock(&old_wb->list_lock);
 457
 458	up_read(&bdi->wb_switch_rwsem);
 459
 460	if (switched) {
 461		wb_wakeup(new_wb);
 462		wb_put(old_wb);
 463	}
 464	wb_put(new_wb);
 465
 466	iput(inode);
 
 
 467	kfree(isw);
 468
 469	atomic_dec(&isw_nr_in_flight);
 470}
 471
 472static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head)
 
 473{
 474	struct inode_switch_wbs_context *isw = container_of(rcu_head,
 475				struct inode_switch_wbs_context, rcu_head);
 
 
 
 
 
 
 
 
 476
 477	/* needs to grab bh-unsafe locks, bounce to work item */
 478	INIT_WORK(&isw->work, inode_switch_wbs_work_fn);
 479	queue_work(isw_wq, &isw->work);
 
 
 
 
 
 
 
 
 
 
 480}
 481
 482/**
 483 * inode_switch_wbs - change the wb association of an inode
 484 * @inode: target inode
 485 * @new_wb_id: ID of the new wb
 486 *
 487 * Switch @inode's wb association to the wb identified by @new_wb_id.  The
 488 * switching is performed asynchronously and may fail silently.
 489 */
 490static void inode_switch_wbs(struct inode *inode, int new_wb_id)
 491{
 492	struct backing_dev_info *bdi = inode_to_bdi(inode);
 493	struct cgroup_subsys_state *memcg_css;
 494	struct inode_switch_wbs_context *isw;
 495
 496	/* noop if seems to be already in progress */
 497	if (inode->i_state & I_WB_SWITCH)
 498		return;
 499
 500	/* avoid queueing a new switch if too many are already in flight */
 501	if (atomic_read(&isw_nr_in_flight) > WB_FRN_MAX_IN_FLIGHT)
 502		return;
 503
 504	isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
 505	if (!isw)
 506		return;
 507
 
 
 508	/* find and pin the new wb */
 509	rcu_read_lock();
 510	memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys);
 511	if (memcg_css)
 512		isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
 513	rcu_read_unlock();
 
 
 
 
 
 514	if (!isw->new_wb)
 515		goto out_free;
 516
 517	/* while holding I_WB_SWITCH, no one else can update the association */
 518	spin_lock(&inode->i_lock);
 519	if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
 520	    inode->i_state & (I_WB_SWITCH | I_FREEING) ||
 521	    inode_to_wb(inode) == isw->new_wb) {
 522		spin_unlock(&inode->i_lock);
 523		goto out_free;
 524	}
 525	inode->i_state |= I_WB_SWITCH;
 526	__iget(inode);
 527	spin_unlock(&inode->i_lock);
 528
 529	isw->inode = inode;
 530
 531	/*
 532	 * In addition to synchronizing among switchers, I_WB_SWITCH tells
 533	 * the RCU protected stat update paths to grab the i_page
 534	 * lock so that stat transfer can synchronize against them.
 535	 * Let's continue after I_WB_SWITCH is guaranteed to be visible.
 536	 */
 537	call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
 538
 539	atomic_inc(&isw_nr_in_flight);
 540	return;
 541
 542out_free:
 
 543	if (isw->new_wb)
 544		wb_put(isw->new_wb);
 545	kfree(isw);
 546}
 547
 548/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 549 * wbc_attach_and_unlock_inode - associate wbc with target inode and unlock it
 550 * @wbc: writeback_control of interest
 551 * @inode: target inode
 552 *
 553 * @inode is locked and about to be written back under the control of @wbc.
 554 * Record @inode's writeback context into @wbc and unlock the i_lock.  On
 555 * writeback completion, wbc_detach_inode() should be called.  This is used
 556 * to track the cgroup writeback context.
 557 */
 558void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
 559				 struct inode *inode)
 560{
 561	if (!inode_cgwb_enabled(inode)) {
 562		spin_unlock(&inode->i_lock);
 563		return;
 564	}
 565
 566	wbc->wb = inode_to_wb(inode);
 567	wbc->inode = inode;
 568
 569	wbc->wb_id = wbc->wb->memcg_css->id;
 570	wbc->wb_lcand_id = inode->i_wb_frn_winner;
 571	wbc->wb_tcand_id = 0;
 572	wbc->wb_bytes = 0;
 573	wbc->wb_lcand_bytes = 0;
 574	wbc->wb_tcand_bytes = 0;
 575
 576	wb_get(wbc->wb);
 577	spin_unlock(&inode->i_lock);
 578
 579	/*
 580	 * A dying wb indicates that either the blkcg associated with the
 581	 * memcg changed or the associated memcg is dying.  In the first
 582	 * case, a replacement wb should already be available and we should
 583	 * refresh the wb immediately.  In the second case, trying to
 584	 * refresh will keep failing.
 585	 */
 586	if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css)))
 587		inode_switch_wbs(inode, wbc->wb_id);
 588}
 589EXPORT_SYMBOL_GPL(wbc_attach_and_unlock_inode);
 590
 591/**
 592 * wbc_detach_inode - disassociate wbc from inode and perform foreign detection
 593 * @wbc: writeback_control of the just finished writeback
 594 *
 595 * To be called after a writeback attempt of an inode finishes and undoes
 596 * wbc_attach_and_unlock_inode().  Can be called under any context.
 597 *
 598 * As concurrent write sharing of an inode is expected to be very rare and
 599 * memcg only tracks page ownership on first-use basis severely confining
 600 * the usefulness of such sharing, cgroup writeback tracks ownership
 601 * per-inode.  While the support for concurrent write sharing of an inode
 602 * is deemed unnecessary, an inode being written to by different cgroups at
 603 * different points in time is a lot more common, and, more importantly,
 604 * charging only by first-use can too readily lead to grossly incorrect
 605 * behaviors (single foreign page can lead to gigabytes of writeback to be
 606 * incorrectly attributed).
 607 *
 608 * To resolve this issue, cgroup writeback detects the majority dirtier of
 609 * an inode and transfers the ownership to it.  To avoid unnnecessary
 610 * oscillation, the detection mechanism keeps track of history and gives
 611 * out the switch verdict only if the foreign usage pattern is stable over
 612 * a certain amount of time and/or writeback attempts.
 613 *
 614 * On each writeback attempt, @wbc tries to detect the majority writer
 615 * using Boyer-Moore majority vote algorithm.  In addition to the byte
 616 * count from the majority voting, it also counts the bytes written for the
 617 * current wb and the last round's winner wb (max of last round's current
 618 * wb, the winner from two rounds ago, and the last round's majority
 619 * candidate).  Keeping track of the historical winner helps the algorithm
 620 * to semi-reliably detect the most active writer even when it's not the
 621 * absolute majority.
 622 *
 623 * Once the winner of the round is determined, whether the winner is
 624 * foreign or not and how much IO time the round consumed is recorded in
 625 * inode->i_wb_frn_history.  If the amount of recorded foreign IO time is
 626 * over a certain threshold, the switch verdict is given.
 627 */
 628void wbc_detach_inode(struct writeback_control *wbc)
 629{
 630	struct bdi_writeback *wb = wbc->wb;
 631	struct inode *inode = wbc->inode;
 632	unsigned long avg_time, max_bytes, max_time;
 633	u16 history;
 634	int max_id;
 635
 636	if (!wb)
 637		return;
 638
 639	history = inode->i_wb_frn_history;
 640	avg_time = inode->i_wb_frn_avg_time;
 641
 642	/* pick the winner of this round */
 643	if (wbc->wb_bytes >= wbc->wb_lcand_bytes &&
 644	    wbc->wb_bytes >= wbc->wb_tcand_bytes) {
 645		max_id = wbc->wb_id;
 646		max_bytes = wbc->wb_bytes;
 647	} else if (wbc->wb_lcand_bytes >= wbc->wb_tcand_bytes) {
 648		max_id = wbc->wb_lcand_id;
 649		max_bytes = wbc->wb_lcand_bytes;
 650	} else {
 651		max_id = wbc->wb_tcand_id;
 652		max_bytes = wbc->wb_tcand_bytes;
 653	}
 654
 655	/*
 656	 * Calculate the amount of IO time the winner consumed and fold it
 657	 * into the running average kept per inode.  If the consumed IO
 658	 * time is lower than avag / WB_FRN_TIME_CUT_DIV, ignore it for
 659	 * deciding whether to switch or not.  This is to prevent one-off
 660	 * small dirtiers from skewing the verdict.
 661	 */
 662	max_time = DIV_ROUND_UP((max_bytes >> PAGE_SHIFT) << WB_FRN_TIME_SHIFT,
 663				wb->avg_write_bandwidth);
 664	if (avg_time)
 665		avg_time += (max_time >> WB_FRN_TIME_AVG_SHIFT) -
 666			    (avg_time >> WB_FRN_TIME_AVG_SHIFT);
 667	else
 668		avg_time = max_time;	/* immediate catch up on first run */
 669
 670	if (max_time >= avg_time / WB_FRN_TIME_CUT_DIV) {
 671		int slots;
 672
 673		/*
 674		 * The switch verdict is reached if foreign wb's consume
 675		 * more than a certain proportion of IO time in a
 676		 * WB_FRN_TIME_PERIOD.  This is loosely tracked by 16 slot
 677		 * history mask where each bit represents one sixteenth of
 678		 * the period.  Determine the number of slots to shift into
 679		 * history from @max_time.
 680		 */
 681		slots = min(DIV_ROUND_UP(max_time, WB_FRN_HIST_UNIT),
 682			    (unsigned long)WB_FRN_HIST_MAX_SLOTS);
 683		history <<= slots;
 684		if (wbc->wb_id != max_id)
 685			history |= (1U << slots) - 1;
 686
 687		if (history)
 688			trace_inode_foreign_history(inode, wbc, history);
 689
 690		/*
 691		 * Switch if the current wb isn't the consistent winner.
 692		 * If there are multiple closely competing dirtiers, the
 693		 * inode may switch across them repeatedly over time, which
 694		 * is okay.  The main goal is avoiding keeping an inode on
 695		 * the wrong wb for an extended period of time.
 696		 */
 697		if (hweight32(history) > WB_FRN_HIST_THR_SLOTS)
 698			inode_switch_wbs(inode, max_id);
 699	}
 700
 701	/*
 702	 * Multiple instances of this function may race to update the
 703	 * following fields but we don't mind occassional inaccuracies.
 704	 */
 705	inode->i_wb_frn_winner = max_id;
 706	inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX);
 707	inode->i_wb_frn_history = history;
 708
 709	wb_put(wbc->wb);
 710	wbc->wb = NULL;
 711}
 712EXPORT_SYMBOL_GPL(wbc_detach_inode);
 713
 714/**
 715 * wbc_account_cgroup_owner - account writeback to update inode cgroup ownership
 716 * @wbc: writeback_control of the writeback in progress
 717 * @page: page being written out
 718 * @bytes: number of bytes being written out
 719 *
 720 * @bytes from @page are about to written out during the writeback
 721 * controlled by @wbc.  Keep the book for foreign inode detection.  See
 722 * wbc_detach_inode().
 723 */
 724void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
 725			      size_t bytes)
 726{
 727	struct cgroup_subsys_state *css;
 728	int id;
 729
 730	/*
 731	 * pageout() path doesn't attach @wbc to the inode being written
 732	 * out.  This is intentional as we don't want the function to block
 733	 * behind a slow cgroup.  Ultimately, we want pageout() to kick off
 734	 * regular writeback instead of writing things out itself.
 735	 */
 736	if (!wbc->wb || wbc->no_cgroup_owner)
 737		return;
 738
 739	css = mem_cgroup_css_from_page(page);
 740	/* dead cgroups shouldn't contribute to inode ownership arbitration */
 741	if (!(css->flags & CSS_ONLINE))
 742		return;
 743
 744	id = css->id;
 745
 746	if (id == wbc->wb_id) {
 747		wbc->wb_bytes += bytes;
 748		return;
 749	}
 750
 751	if (id == wbc->wb_lcand_id)
 752		wbc->wb_lcand_bytes += bytes;
 753
 754	/* Boyer-Moore majority vote algorithm */
 755	if (!wbc->wb_tcand_bytes)
 756		wbc->wb_tcand_id = id;
 757	if (id == wbc->wb_tcand_id)
 758		wbc->wb_tcand_bytes += bytes;
 759	else
 760		wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
 761}
 762EXPORT_SYMBOL_GPL(wbc_account_cgroup_owner);
 763
 764/**
 765 * inode_congested - test whether an inode is congested
 766 * @inode: inode to test for congestion (may be NULL)
 767 * @cong_bits: mask of WB_[a]sync_congested bits to test
 768 *
 769 * Tests whether @inode is congested.  @cong_bits is the mask of congestion
 770 * bits to test and the return value is the mask of set bits.
 771 *
 772 * If cgroup writeback is enabled for @inode, the congestion state is
 773 * determined by whether the cgwb (cgroup bdi_writeback) for the blkcg
 774 * associated with @inode is congested; otherwise, the root wb's congestion
 775 * state is used.
 776 *
 777 * @inode is allowed to be NULL as this function is often called on
 778 * mapping->host which is NULL for the swapper space.
 779 */
 780int inode_congested(struct inode *inode, int cong_bits)
 781{
 782	/*
 783	 * Once set, ->i_wb never becomes NULL while the inode is alive.
 784	 * Start transaction iff ->i_wb is visible.
 785	 */
 786	if (inode && inode_to_wb_is_valid(inode)) {
 787		struct bdi_writeback *wb;
 788		struct wb_lock_cookie lock_cookie = {};
 789		bool congested;
 790
 791		wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
 792		congested = wb_congested(wb, cong_bits);
 793		unlocked_inode_to_wb_end(inode, &lock_cookie);
 794		return congested;
 795	}
 796
 797	return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
 798}
 799EXPORT_SYMBOL_GPL(inode_congested);
 800
 801/**
 802 * wb_split_bdi_pages - split nr_pages to write according to bandwidth
 803 * @wb: target bdi_writeback to split @nr_pages to
 804 * @nr_pages: number of pages to write for the whole bdi
 805 *
 806 * Split @wb's portion of @nr_pages according to @wb's write bandwidth in
 807 * relation to the total write bandwidth of all wb's w/ dirty inodes on
 808 * @wb->bdi.
 809 */
 810static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
 811{
 812	unsigned long this_bw = wb->avg_write_bandwidth;
 813	unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
 814
 815	if (nr_pages == LONG_MAX)
 816		return LONG_MAX;
 817
 818	/*
 819	 * This may be called on clean wb's and proportional distribution
 820	 * may not make sense, just use the original @nr_pages in those
 821	 * cases.  In general, we wanna err on the side of writing more.
 822	 */
 823	if (!tot_bw || this_bw >= tot_bw)
 824		return nr_pages;
 825	else
 826		return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw);
 827}
 828
 829/**
 830 * bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi
 831 * @bdi: target backing_dev_info
 832 * @base_work: wb_writeback_work to issue
 833 * @skip_if_busy: skip wb's which already have writeback in progress
 834 *
 835 * Split and issue @base_work to all wb's (bdi_writeback's) of @bdi which
 836 * have dirty inodes.  If @base_work->nr_page isn't %LONG_MAX, it's
 837 * distributed to the busy wbs according to each wb's proportion in the
 838 * total active write bandwidth of @bdi.
 839 */
 840static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
 841				  struct wb_writeback_work *base_work,
 842				  bool skip_if_busy)
 843{
 844	struct bdi_writeback *last_wb = NULL;
 845	struct bdi_writeback *wb = list_entry(&bdi->wb_list,
 846					      struct bdi_writeback, bdi_node);
 847
 848	might_sleep();
 849restart:
 850	rcu_read_lock();
 851	list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
 852		DEFINE_WB_COMPLETION(fallback_work_done, bdi);
 853		struct wb_writeback_work fallback_work;
 854		struct wb_writeback_work *work;
 855		long nr_pages;
 856
 857		if (last_wb) {
 858			wb_put(last_wb);
 859			last_wb = NULL;
 860		}
 861
 862		/* SYNC_ALL writes out I_DIRTY_TIME too */
 863		if (!wb_has_dirty_io(wb) &&
 864		    (base_work->sync_mode == WB_SYNC_NONE ||
 865		     list_empty(&wb->b_dirty_time)))
 866			continue;
 867		if (skip_if_busy && writeback_in_progress(wb))
 868			continue;
 869
 870		nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages);
 871
 872		work = kmalloc(sizeof(*work), GFP_ATOMIC);
 873		if (work) {
 874			*work = *base_work;
 875			work->nr_pages = nr_pages;
 876			work->auto_free = 1;
 877			wb_queue_work(wb, work);
 878			continue;
 879		}
 880
 881		/* alloc failed, execute synchronously using on-stack fallback */
 882		work = &fallback_work;
 883		*work = *base_work;
 884		work->nr_pages = nr_pages;
 885		work->auto_free = 0;
 886		work->done = &fallback_work_done;
 887
 888		wb_queue_work(wb, work);
 889
 890		/*
 891		 * Pin @wb so that it stays on @bdi->wb_list.  This allows
 892		 * continuing iteration from @wb after dropping and
 893		 * regrabbing rcu read lock.
 894		 */
 895		wb_get(wb);
 896		last_wb = wb;
 897
 898		rcu_read_unlock();
 899		wb_wait_for_completion(&fallback_work_done);
 900		goto restart;
 901	}
 902	rcu_read_unlock();
 903
 904	if (last_wb)
 905		wb_put(last_wb);
 906}
 907
 908/**
 909 * cgroup_writeback_by_id - initiate cgroup writeback from bdi and memcg IDs
 910 * @bdi_id: target bdi id
 911 * @memcg_id: target memcg css id
 912 * @nr: number of pages to write, 0 for best-effort dirty flushing
 913 * @reason: reason why some writeback work initiated
 914 * @done: target wb_completion
 915 *
 916 * Initiate flush of the bdi_writeback identified by @bdi_id and @memcg_id
 917 * with the specified parameters.
 918 */
 919int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, unsigned long nr,
 920			   enum wb_reason reason, struct wb_completion *done)
 921{
 922	struct backing_dev_info *bdi;
 923	struct cgroup_subsys_state *memcg_css;
 924	struct bdi_writeback *wb;
 925	struct wb_writeback_work *work;
 926	int ret;
 927
 928	/* lookup bdi and memcg */
 929	bdi = bdi_get_by_id(bdi_id);
 930	if (!bdi)
 931		return -ENOENT;
 932
 933	rcu_read_lock();
 934	memcg_css = css_from_id(memcg_id, &memory_cgrp_subsys);
 935	if (memcg_css && !css_tryget(memcg_css))
 936		memcg_css = NULL;
 937	rcu_read_unlock();
 938	if (!memcg_css) {
 939		ret = -ENOENT;
 940		goto out_bdi_put;
 941	}
 942
 943	/*
 944	 * And find the associated wb.  If the wb isn't there already
 945	 * there's nothing to flush, don't create one.
 946	 */
 947	wb = wb_get_lookup(bdi, memcg_css);
 948	if (!wb) {
 949		ret = -ENOENT;
 950		goto out_css_put;
 951	}
 952
 953	/*
 954	 * If @nr is zero, the caller is attempting to write out most of
 955	 * the currently dirty pages.  Let's take the current dirty page
 956	 * count and inflate it by 25% which should be large enough to
 957	 * flush out most dirty pages while avoiding getting livelocked by
 958	 * concurrent dirtiers.
 959	 */
 960	if (!nr) {
 961		unsigned long filepages, headroom, dirty, writeback;
 962
 963		mem_cgroup_wb_stats(wb, &filepages, &headroom, &dirty,
 964				      &writeback);
 965		nr = dirty * 10 / 8;
 966	}
 967
 968	/* issue the writeback work */
 969	work = kzalloc(sizeof(*work), GFP_NOWAIT | __GFP_NOWARN);
 970	if (work) {
 971		work->nr_pages = nr;
 972		work->sync_mode = WB_SYNC_NONE;
 973		work->range_cyclic = 1;
 974		work->reason = reason;
 975		work->done = done;
 976		work->auto_free = 1;
 977		wb_queue_work(wb, work);
 978		ret = 0;
 979	} else {
 980		ret = -ENOMEM;
 981	}
 982
 983	wb_put(wb);
 984out_css_put:
 985	css_put(memcg_css);
 986out_bdi_put:
 987	bdi_put(bdi);
 988	return ret;
 989}
 990
 991/**
 992 * cgroup_writeback_umount - flush inode wb switches for umount
 993 *
 994 * This function is called when a super_block is about to be destroyed and
 995 * flushes in-flight inode wb switches.  An inode wb switch goes through
 996 * RCU and then workqueue, so the two need to be flushed in order to ensure
 997 * that all previously scheduled switches are finished.  As wb switches are
 998 * rare occurrences and synchronize_rcu() can take a while, perform
 999 * flushing iff wb switches are in flight.
1000 */
1001void cgroup_writeback_umount(void)
1002{
 
 
 
 
 
 
1003	if (atomic_read(&isw_nr_in_flight)) {
1004		/*
1005		 * Use rcu_barrier() to wait for all pending callbacks to
1006		 * ensure that all in-flight wb switches are in the workqueue.
1007		 */
1008		rcu_barrier();
1009		flush_workqueue(isw_wq);
1010	}
1011}
1012
1013static int __init cgroup_writeback_init(void)
1014{
1015	isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0);
1016	if (!isw_wq)
1017		return -ENOMEM;
1018	return 0;
1019}
1020fs_initcall(cgroup_writeback_init);
1021
1022#else	/* CONFIG_CGROUP_WRITEBACK */
1023
1024static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
1025static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
1026
 
 
 
 
 
 
 
 
 
 
 
1027static struct bdi_writeback *
1028locked_inode_to_wb_and_lock_list(struct inode *inode)
1029	__releases(&inode->i_lock)
1030	__acquires(&wb->list_lock)
1031{
1032	struct bdi_writeback *wb = inode_to_wb(inode);
1033
1034	spin_unlock(&inode->i_lock);
1035	spin_lock(&wb->list_lock);
1036	return wb;
1037}
1038
1039static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
1040	__acquires(&wb->list_lock)
1041{
1042	struct bdi_writeback *wb = inode_to_wb(inode);
1043
1044	spin_lock(&wb->list_lock);
1045	return wb;
1046}
1047
1048static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
1049{
1050	return nr_pages;
1051}
1052
1053static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
1054				  struct wb_writeback_work *base_work,
1055				  bool skip_if_busy)
1056{
1057	might_sleep();
1058
1059	if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) {
1060		base_work->auto_free = 0;
1061		wb_queue_work(&bdi->wb, base_work);
1062	}
1063}
1064
1065#endif	/* CONFIG_CGROUP_WRITEBACK */
1066
1067/*
1068 * Add in the number of potentially dirty inodes, because each inode
1069 * write can dirty pagecache in the underlying blockdev.
1070 */
1071static unsigned long get_nr_dirty_pages(void)
1072{
1073	return global_node_page_state(NR_FILE_DIRTY) +
1074		get_nr_dirty_inodes();
1075}
1076
1077static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason)
1078{
1079	if (!wb_has_dirty_io(wb))
1080		return;
1081
1082	/*
1083	 * All callers of this function want to start writeback of all
1084	 * dirty pages. Places like vmscan can call this at a very
1085	 * high frequency, causing pointless allocations of tons of
1086	 * work items and keeping the flusher threads busy retrieving
1087	 * that work. Ensure that we only allow one of them pending and
1088	 * inflight at the time.
1089	 */
1090	if (test_bit(WB_start_all, &wb->state) ||
1091	    test_and_set_bit(WB_start_all, &wb->state))
1092		return;
1093
1094	wb->start_all_reason = reason;
1095	wb_wakeup(wb);
1096}
1097
1098/**
1099 * wb_start_background_writeback - start background writeback
1100 * @wb: bdi_writback to write from
1101 *
1102 * Description:
1103 *   This makes sure WB_SYNC_NONE background writeback happens. When
1104 *   this function returns, it is only guaranteed that for given wb
1105 *   some IO is happening if we are over background dirty threshold.
1106 *   Caller need not hold sb s_umount semaphore.
1107 */
1108void wb_start_background_writeback(struct bdi_writeback *wb)
1109{
1110	/*
1111	 * We just wake up the flusher thread. It will perform background
1112	 * writeback as soon as there is no other work to do.
1113	 */
1114	trace_writeback_wake_background(wb);
1115	wb_wakeup(wb);
1116}
1117
1118/*
1119 * Remove the inode from the writeback list it is on.
1120 */
1121void inode_io_list_del(struct inode *inode)
1122{
1123	struct bdi_writeback *wb;
1124
1125	wb = inode_to_wb_and_lock_list(inode);
1126	spin_lock(&inode->i_lock);
1127	inode_io_list_del_locked(inode, wb);
 
 
 
 
1128	spin_unlock(&inode->i_lock);
1129	spin_unlock(&wb->list_lock);
1130}
1131EXPORT_SYMBOL(inode_io_list_del);
1132
1133/*
1134 * mark an inode as under writeback on the sb
1135 */
1136void sb_mark_inode_writeback(struct inode *inode)
1137{
1138	struct super_block *sb = inode->i_sb;
1139	unsigned long flags;
1140
1141	if (list_empty(&inode->i_wb_list)) {
1142		spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
1143		if (list_empty(&inode->i_wb_list)) {
1144			list_add_tail(&inode->i_wb_list, &sb->s_inodes_wb);
1145			trace_sb_mark_inode_writeback(inode);
1146		}
1147		spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
1148	}
1149}
1150
1151/*
1152 * clear an inode as under writeback on the sb
1153 */
1154void sb_clear_inode_writeback(struct inode *inode)
1155{
1156	struct super_block *sb = inode->i_sb;
1157	unsigned long flags;
1158
1159	if (!list_empty(&inode->i_wb_list)) {
1160		spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
1161		if (!list_empty(&inode->i_wb_list)) {
1162			list_del_init(&inode->i_wb_list);
1163			trace_sb_clear_inode_writeback(inode);
1164		}
1165		spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
1166	}
1167}
1168
1169/*
1170 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
1171 * furthest end of its superblock's dirty-inode list.
1172 *
1173 * Before stamping the inode's ->dirtied_when, we check to see whether it is
1174 * already the most-recently-dirtied inode on the b_dirty list.  If that is
1175 * the case then the inode must have been redirtied while it was being written
1176 * out and we don't reset its dirtied_when.
1177 */
1178static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
1179{
1180	assert_spin_locked(&inode->i_lock);
1181
1182	if (!list_empty(&wb->b_dirty)) {
1183		struct inode *tail;
1184
1185		tail = wb_inode(wb->b_dirty.next);
1186		if (time_before(inode->dirtied_when, tail->dirtied_when))
1187			inode->dirtied_when = jiffies;
1188	}
1189	inode_io_list_move_locked(inode, wb, &wb->b_dirty);
1190	inode->i_state &= ~I_SYNC_QUEUED;
1191}
1192
1193static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
1194{
1195	spin_lock(&inode->i_lock);
1196	redirty_tail_locked(inode, wb);
1197	spin_unlock(&inode->i_lock);
1198}
1199
1200/*
1201 * requeue inode for re-scanning after bdi->b_io list is exhausted.
1202 */
1203static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
1204{
1205	inode_io_list_move_locked(inode, wb, &wb->b_more_io);
1206}
1207
1208static void inode_sync_complete(struct inode *inode)
1209{
1210	inode->i_state &= ~I_SYNC;
1211	/* If inode is clean an unused, put it into LRU now... */
1212	inode_add_lru(inode);
1213	/* Waiters must see I_SYNC cleared before being woken up */
1214	smp_mb();
1215	wake_up_bit(&inode->i_state, __I_SYNC);
1216}
1217
1218static bool inode_dirtied_after(struct inode *inode, unsigned long t)
1219{
1220	bool ret = time_after(inode->dirtied_when, t);
1221#ifndef CONFIG_64BIT
1222	/*
1223	 * For inodes being constantly redirtied, dirtied_when can get stuck.
1224	 * It _appears_ to be in the future, but is actually in distant past.
1225	 * This test is necessary to prevent such wrapped-around relative times
1226	 * from permanently stopping the whole bdi writeback.
1227	 */
1228	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
1229#endif
1230	return ret;
1231}
1232
1233#define EXPIRE_DIRTY_ATIME 0x0001
1234
1235/*
1236 * Move expired (dirtied before dirtied_before) dirty inodes from
1237 * @delaying_queue to @dispatch_queue.
1238 */
1239static int move_expired_inodes(struct list_head *delaying_queue,
1240			       struct list_head *dispatch_queue,
1241			       unsigned long dirtied_before)
1242{
1243	LIST_HEAD(tmp);
1244	struct list_head *pos, *node;
1245	struct super_block *sb = NULL;
1246	struct inode *inode;
1247	int do_sb_sort = 0;
1248	int moved = 0;
1249
1250	while (!list_empty(delaying_queue)) {
1251		inode = wb_inode(delaying_queue->prev);
1252		if (inode_dirtied_after(inode, dirtied_before))
1253			break;
1254		list_move(&inode->i_io_list, &tmp);
1255		moved++;
1256		spin_lock(&inode->i_lock);
1257		inode->i_state |= I_SYNC_QUEUED;
1258		spin_unlock(&inode->i_lock);
1259		if (sb_is_blkdev_sb(inode->i_sb))
1260			continue;
1261		if (sb && sb != inode->i_sb)
1262			do_sb_sort = 1;
1263		sb = inode->i_sb;
1264	}
1265
1266	/* just one sb in list, splice to dispatch_queue and we're done */
1267	if (!do_sb_sort) {
1268		list_splice(&tmp, dispatch_queue);
1269		goto out;
1270	}
1271
1272	/* Move inodes from one superblock together */
1273	while (!list_empty(&tmp)) {
1274		sb = wb_inode(tmp.prev)->i_sb;
1275		list_for_each_prev_safe(pos, node, &tmp) {
1276			inode = wb_inode(pos);
1277			if (inode->i_sb == sb)
1278				list_move(&inode->i_io_list, dispatch_queue);
1279		}
1280	}
1281out:
1282	return moved;
1283}
1284
1285/*
1286 * Queue all expired dirty inodes for io, eldest first.
1287 * Before
1288 *         newly dirtied     b_dirty    b_io    b_more_io
1289 *         =============>    gf         edc     BA
1290 * After
1291 *         newly dirtied     b_dirty    b_io    b_more_io
1292 *         =============>    g          fBAedc
1293 *                                           |
1294 *                                           +--> dequeue for IO
1295 */
1296static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work,
1297		     unsigned long dirtied_before)
1298{
1299	int moved;
1300	unsigned long time_expire_jif = dirtied_before;
1301
1302	assert_spin_locked(&wb->list_lock);
1303	list_splice_init(&wb->b_more_io, &wb->b_io);
1304	moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before);
1305	if (!work->for_sync)
1306		time_expire_jif = jiffies - dirtytime_expire_interval * HZ;
1307	moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
1308				     time_expire_jif);
1309	if (moved)
1310		wb_io_lists_populated(wb);
1311	trace_writeback_queue_io(wb, work, dirtied_before, moved);
1312}
1313
1314static int write_inode(struct inode *inode, struct writeback_control *wbc)
1315{
1316	int ret;
1317
1318	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
1319		trace_writeback_write_inode_start(inode, wbc);
1320		ret = inode->i_sb->s_op->write_inode(inode, wbc);
1321		trace_writeback_write_inode(inode, wbc);
1322		return ret;
1323	}
1324	return 0;
1325}
1326
1327/*
1328 * Wait for writeback on an inode to complete. Called with i_lock held.
1329 * Caller must make sure inode cannot go away when we drop i_lock.
1330 */
1331static void __inode_wait_for_writeback(struct inode *inode)
1332	__releases(inode->i_lock)
1333	__acquires(inode->i_lock)
1334{
1335	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
1336	wait_queue_head_t *wqh;
1337
1338	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
1339	while (inode->i_state & I_SYNC) {
1340		spin_unlock(&inode->i_lock);
1341		__wait_on_bit(wqh, &wq, bit_wait,
1342			      TASK_UNINTERRUPTIBLE);
1343		spin_lock(&inode->i_lock);
1344	}
1345}
1346
1347/*
1348 * Wait for writeback on an inode to complete. Caller must have inode pinned.
1349 */
1350void inode_wait_for_writeback(struct inode *inode)
1351{
1352	spin_lock(&inode->i_lock);
1353	__inode_wait_for_writeback(inode);
1354	spin_unlock(&inode->i_lock);
1355}
1356
1357/*
1358 * Sleep until I_SYNC is cleared. This function must be called with i_lock
1359 * held and drops it. It is aimed for callers not holding any inode reference
1360 * so once i_lock is dropped, inode can go away.
1361 */
1362static void inode_sleep_on_writeback(struct inode *inode)
1363	__releases(inode->i_lock)
1364{
1365	DEFINE_WAIT(wait);
1366	wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
1367	int sleep;
1368
1369	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1370	sleep = inode->i_state & I_SYNC;
1371	spin_unlock(&inode->i_lock);
1372	if (sleep)
1373		schedule();
1374	finish_wait(wqh, &wait);
1375}
1376
1377/*
1378 * Find proper writeback list for the inode depending on its current state and
1379 * possibly also change of its state while we were doing writeback.  Here we
1380 * handle things such as livelock prevention or fairness of writeback among
1381 * inodes. This function can be called only by flusher thread - noone else
1382 * processes all inodes in writeback lists and requeueing inodes behind flusher
1383 * thread's back can have unexpected consequences.
1384 */
1385static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
1386			  struct writeback_control *wbc)
1387{
1388	if (inode->i_state & I_FREEING)
1389		return;
1390
1391	/*
1392	 * Sync livelock prevention. Each inode is tagged and synced in one
1393	 * shot. If still dirty, it will be redirty_tail()'ed below.  Update
1394	 * the dirty time to prevent enqueue and sync it again.
1395	 */
1396	if ((inode->i_state & I_DIRTY) &&
1397	    (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
1398		inode->dirtied_when = jiffies;
1399
1400	if (wbc->pages_skipped) {
1401		/*
1402		 * writeback is not making progress due to locked
1403		 * buffers. Skip this inode for now.
1404		 */
1405		redirty_tail_locked(inode, wb);
1406		return;
1407	}
1408
1409	if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
1410		/*
1411		 * We didn't write back all the pages.  nfs_writepages()
1412		 * sometimes bales out without doing anything.
1413		 */
1414		if (wbc->nr_to_write <= 0) {
1415			/* Slice used up. Queue for next turn. */
1416			requeue_io(inode, wb);
1417		} else {
1418			/*
1419			 * Writeback blocked by something other than
1420			 * congestion. Delay the inode for some time to
1421			 * avoid spinning on the CPU (100% iowait)
1422			 * retrying writeback of the dirty page/inode
1423			 * that cannot be performed immediately.
1424			 */
1425			redirty_tail_locked(inode, wb);
1426		}
1427	} else if (inode->i_state & I_DIRTY) {
1428		/*
1429		 * Filesystems can dirty the inode during writeback operations,
1430		 * such as delayed allocation during submission or metadata
1431		 * updates after data IO completion.
1432		 */
1433		redirty_tail_locked(inode, wb);
1434	} else if (inode->i_state & I_DIRTY_TIME) {
1435		inode->dirtied_when = jiffies;
1436		inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
1437		inode->i_state &= ~I_SYNC_QUEUED;
1438	} else {
1439		/* The inode is clean. Remove from writeback lists. */
1440		inode_io_list_del_locked(inode, wb);
1441	}
1442}
1443
1444/*
1445 * Write out an inode and its dirty pages. Do not update the writeback list
1446 * linkage. That is left to the caller. The caller is also responsible for
1447 * setting I_SYNC flag and calling inode_sync_complete() to clear it.
 
 
 
 
 
 
1448 */
1449static int
1450__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
1451{
1452	struct address_space *mapping = inode->i_mapping;
1453	long nr_to_write = wbc->nr_to_write;
1454	unsigned dirty;
1455	int ret;
1456
1457	WARN_ON(!(inode->i_state & I_SYNC));
1458
1459	trace_writeback_single_inode_start(inode, wbc, nr_to_write);
1460
1461	ret = do_writepages(mapping, wbc);
1462
1463	/*
1464	 * Make sure to wait on the data before writing out the metadata.
1465	 * This is important for filesystems that modify metadata on data
1466	 * I/O completion. We don't do it for sync(2) writeback because it has a
1467	 * separate, external IO completion path and ->sync_fs for guaranteeing
1468	 * inode metadata is written back correctly.
1469	 */
1470	if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
1471		int err = filemap_fdatawait(mapping);
1472		if (ret == 0)
1473			ret = err;
1474	}
1475
1476	/*
1477	 * Some filesystems may redirty the inode during the writeback
1478	 * due to delalloc, clear dirty metadata flags right before
1479	 * write_inode()
1480	 */
1481	spin_lock(&inode->i_lock);
1482
1483	dirty = inode->i_state & I_DIRTY;
1484	if ((inode->i_state & I_DIRTY_TIME) &&
1485	    ((dirty & I_DIRTY_INODE) ||
1486	     wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync ||
1487	     time_after(jiffies, inode->dirtied_time_when +
1488			dirtytime_expire_interval * HZ))) {
1489		dirty |= I_DIRTY_TIME;
1490		trace_writeback_lazytime(inode);
 
1491	}
 
 
 
 
 
 
 
 
 
1492	inode->i_state &= ~dirty;
1493
1494	/*
1495	 * Paired with smp_mb() in __mark_inode_dirty().  This allows
1496	 * __mark_inode_dirty() to test i_state without grabbing i_lock -
1497	 * either they see the I_DIRTY bits cleared or we see the dirtied
1498	 * inode.
1499	 *
1500	 * I_DIRTY_PAGES is always cleared together above even if @mapping
1501	 * still has dirty pages.  The flag is reinstated after smp_mb() if
1502	 * necessary.  This guarantees that either __mark_inode_dirty()
1503	 * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
1504	 */
1505	smp_mb();
1506
1507	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
1508		inode->i_state |= I_DIRTY_PAGES;
1509
1510	spin_unlock(&inode->i_lock);
1511
1512	if (dirty & I_DIRTY_TIME)
1513		mark_inode_dirty_sync(inode);
1514	/* Don't write the inode if only I_DIRTY_PAGES was set */
1515	if (dirty & ~I_DIRTY_PAGES) {
1516		int err = write_inode(inode, wbc);
1517		if (ret == 0)
1518			ret = err;
1519	}
1520	trace_writeback_single_inode(inode, wbc, nr_to_write);
1521	return ret;
1522}
1523
1524/*
1525 * Write out an inode's dirty pages. Either the caller has an active reference
1526 * on the inode or the inode has I_WILL_FREE set.
 
 
1527 *
1528 * This function is designed to be called for writing back one inode which
1529 * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
1530 * and does more profound writeback list handling in writeback_sb_inodes().
1531 */
1532static int writeback_single_inode(struct inode *inode,
1533				  struct writeback_control *wbc)
1534{
1535	struct bdi_writeback *wb;
1536	int ret = 0;
1537
1538	spin_lock(&inode->i_lock);
1539	if (!atomic_read(&inode->i_count))
1540		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
1541	else
1542		WARN_ON(inode->i_state & I_WILL_FREE);
1543
1544	if (inode->i_state & I_SYNC) {
1545		if (wbc->sync_mode != WB_SYNC_ALL)
1546			goto out;
1547		/*
1548		 * It's a data-integrity sync. We must wait. Since callers hold
1549		 * inode reference or inode has I_WILL_FREE set, it cannot go
1550		 * away under us.
 
1551		 */
 
 
1552		__inode_wait_for_writeback(inode);
1553	}
1554	WARN_ON(inode->i_state & I_SYNC);
1555	/*
1556	 * Skip inode if it is clean and we have no outstanding writeback in
1557	 * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this
1558	 * function since flusher thread may be doing for example sync in
1559	 * parallel and if we move the inode, it could get skipped. So here we
1560	 * make sure inode is on some writeback list and leave it there unless
1561	 * we have completely cleaned the inode.
1562	 */
1563	if (!(inode->i_state & I_DIRTY_ALL) &&
1564	    (wbc->sync_mode != WB_SYNC_ALL ||
1565	     !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
1566		goto out;
1567	inode->i_state |= I_SYNC;
1568	wbc_attach_and_unlock_inode(wbc, inode);
1569
1570	ret = __writeback_single_inode(inode, wbc);
1571
1572	wbc_detach_inode(wbc);
1573
1574	wb = inode_to_wb_and_lock_list(inode);
1575	spin_lock(&inode->i_lock);
1576	/*
1577	 * If inode is clean, remove it from writeback lists. Otherwise don't
1578	 * touch it. See comment above for explanation.
 
1579	 */
1580	if (!(inode->i_state & I_DIRTY_ALL))
1581		inode_io_list_del_locked(inode, wb);
1582	spin_unlock(&wb->list_lock);
1583	inode_sync_complete(inode);
1584out:
1585	spin_unlock(&inode->i_lock);
1586	return ret;
1587}
1588
1589static long writeback_chunk_size(struct bdi_writeback *wb,
1590				 struct wb_writeback_work *work)
1591{
1592	long pages;
1593
1594	/*
1595	 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
1596	 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
1597	 * here avoids calling into writeback_inodes_wb() more than once.
1598	 *
1599	 * The intended call sequence for WB_SYNC_ALL writeback is:
1600	 *
1601	 *      wb_writeback()
1602	 *          writeback_sb_inodes()       <== called only once
1603	 *              write_cache_pages()     <== called once for each inode
1604	 *                   (quickly) tag currently dirty pages
1605	 *                   (maybe slowly) sync all tagged pages
1606	 */
1607	if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
1608		pages = LONG_MAX;
1609	else {
1610		pages = min(wb->avg_write_bandwidth / 2,
1611			    global_wb_domain.dirty_limit / DIRTY_SCOPE);
1612		pages = min(pages, work->nr_pages);
1613		pages = round_down(pages + MIN_WRITEBACK_PAGES,
1614				   MIN_WRITEBACK_PAGES);
1615	}
1616
1617	return pages;
1618}
1619
1620/*
1621 * Write a portion of b_io inodes which belong to @sb.
1622 *
1623 * Return the number of pages and/or inodes written.
1624 *
1625 * NOTE! This is called with wb->list_lock held, and will
1626 * unlock and relock that for each inode it ends up doing
1627 * IO for.
1628 */
1629static long writeback_sb_inodes(struct super_block *sb,
1630				struct bdi_writeback *wb,
1631				struct wb_writeback_work *work)
1632{
1633	struct writeback_control wbc = {
1634		.sync_mode		= work->sync_mode,
1635		.tagged_writepages	= work->tagged_writepages,
1636		.for_kupdate		= work->for_kupdate,
1637		.for_background		= work->for_background,
1638		.for_sync		= work->for_sync,
1639		.range_cyclic		= work->range_cyclic,
1640		.range_start		= 0,
1641		.range_end		= LLONG_MAX,
1642	};
1643	unsigned long start_time = jiffies;
1644	long write_chunk;
1645	long wrote = 0;  /* count both pages and inodes */
1646
1647	while (!list_empty(&wb->b_io)) {
1648		struct inode *inode = wb_inode(wb->b_io.prev);
1649		struct bdi_writeback *tmp_wb;
1650
1651		if (inode->i_sb != sb) {
1652			if (work->sb) {
1653				/*
1654				 * We only want to write back data for this
1655				 * superblock, move all inodes not belonging
1656				 * to it back onto the dirty list.
1657				 */
1658				redirty_tail(inode, wb);
1659				continue;
1660			}
1661
1662			/*
1663			 * The inode belongs to a different superblock.
1664			 * Bounce back to the caller to unpin this and
1665			 * pin the next superblock.
1666			 */
1667			break;
1668		}
1669
1670		/*
1671		 * Don't bother with new inodes or inodes being freed, first
1672		 * kind does not need periodic writeout yet, and for the latter
1673		 * kind writeout is handled by the freer.
1674		 */
1675		spin_lock(&inode->i_lock);
1676		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
1677			redirty_tail_locked(inode, wb);
1678			spin_unlock(&inode->i_lock);
1679			continue;
1680		}
1681		if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
1682			/*
1683			 * If this inode is locked for writeback and we are not
1684			 * doing writeback-for-data-integrity, move it to
1685			 * b_more_io so that writeback can proceed with the
1686			 * other inodes on s_io.
1687			 *
1688			 * We'll have another go at writing back this inode
1689			 * when we completed a full scan of b_io.
1690			 */
1691			spin_unlock(&inode->i_lock);
1692			requeue_io(inode, wb);
1693			trace_writeback_sb_inodes_requeue(inode);
1694			continue;
1695		}
1696		spin_unlock(&wb->list_lock);
1697
1698		/*
1699		 * We already requeued the inode if it had I_SYNC set and we
1700		 * are doing WB_SYNC_NONE writeback. So this catches only the
1701		 * WB_SYNC_ALL case.
1702		 */
1703		if (inode->i_state & I_SYNC) {
1704			/* Wait for I_SYNC. This function drops i_lock... */
1705			inode_sleep_on_writeback(inode);
1706			/* Inode may be gone, start again */
1707			spin_lock(&wb->list_lock);
1708			continue;
1709		}
1710		inode->i_state |= I_SYNC;
1711		wbc_attach_and_unlock_inode(&wbc, inode);
1712
1713		write_chunk = writeback_chunk_size(wb, work);
1714		wbc.nr_to_write = write_chunk;
1715		wbc.pages_skipped = 0;
1716
1717		/*
1718		 * We use I_SYNC to pin the inode in memory. While it is set
1719		 * evict_inode() will wait so the inode cannot be freed.
1720		 */
1721		__writeback_single_inode(inode, &wbc);
1722
1723		wbc_detach_inode(&wbc);
1724		work->nr_pages -= write_chunk - wbc.nr_to_write;
1725		wrote += write_chunk - wbc.nr_to_write;
1726
1727		if (need_resched()) {
1728			/*
1729			 * We're trying to balance between building up a nice
1730			 * long list of IOs to improve our merge rate, and
1731			 * getting those IOs out quickly for anyone throttling
1732			 * in balance_dirty_pages().  cond_resched() doesn't
1733			 * unplug, so get our IOs out the door before we
1734			 * give up the CPU.
1735			 */
1736			blk_flush_plug(current);
1737			cond_resched();
1738		}
1739
1740		/*
1741		 * Requeue @inode if still dirty.  Be careful as @inode may
1742		 * have been switched to another wb in the meantime.
1743		 */
1744		tmp_wb = inode_to_wb_and_lock_list(inode);
1745		spin_lock(&inode->i_lock);
1746		if (!(inode->i_state & I_DIRTY_ALL))
1747			wrote++;
1748		requeue_inode(inode, tmp_wb, &wbc);
1749		inode_sync_complete(inode);
1750		spin_unlock(&inode->i_lock);
1751
1752		if (unlikely(tmp_wb != wb)) {
1753			spin_unlock(&tmp_wb->list_lock);
1754			spin_lock(&wb->list_lock);
1755		}
1756
1757		/*
1758		 * bail out to wb_writeback() often enough to check
1759		 * background threshold and other termination conditions.
1760		 */
1761		if (wrote) {
1762			if (time_is_before_jiffies(start_time + HZ / 10UL))
1763				break;
1764			if (work->nr_pages <= 0)
1765				break;
1766		}
1767	}
1768	return wrote;
1769}
1770
1771static long __writeback_inodes_wb(struct bdi_writeback *wb,
1772				  struct wb_writeback_work *work)
1773{
1774	unsigned long start_time = jiffies;
1775	long wrote = 0;
1776
1777	while (!list_empty(&wb->b_io)) {
1778		struct inode *inode = wb_inode(wb->b_io.prev);
1779		struct super_block *sb = inode->i_sb;
1780
1781		if (!trylock_super(sb)) {
1782			/*
1783			 * trylock_super() may fail consistently due to
1784			 * s_umount being grabbed by someone else. Don't use
1785			 * requeue_io() to avoid busy retrying the inode/sb.
1786			 */
1787			redirty_tail(inode, wb);
1788			continue;
1789		}
1790		wrote += writeback_sb_inodes(sb, wb, work);
1791		up_read(&sb->s_umount);
1792
1793		/* refer to the same tests at the end of writeback_sb_inodes */
1794		if (wrote) {
1795			if (time_is_before_jiffies(start_time + HZ / 10UL))
1796				break;
1797			if (work->nr_pages <= 0)
1798				break;
1799		}
1800	}
1801	/* Leave any unwritten inodes on b_io */
1802	return wrote;
1803}
1804
1805static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
1806				enum wb_reason reason)
1807{
1808	struct wb_writeback_work work = {
1809		.nr_pages	= nr_pages,
1810		.sync_mode	= WB_SYNC_NONE,
1811		.range_cyclic	= 1,
1812		.reason		= reason,
1813	};
1814	struct blk_plug plug;
1815
1816	blk_start_plug(&plug);
1817	spin_lock(&wb->list_lock);
1818	if (list_empty(&wb->b_io))
1819		queue_io(wb, &work, jiffies);
1820	__writeback_inodes_wb(wb, &work);
1821	spin_unlock(&wb->list_lock);
1822	blk_finish_plug(&plug);
1823
1824	return nr_pages - work.nr_pages;
1825}
1826
1827/*
1828 * Explicit flushing or periodic writeback of "old" data.
1829 *
1830 * Define "old": the first time one of an inode's pages is dirtied, we mark the
1831 * dirtying-time in the inode's address_space.  So this periodic writeback code
1832 * just walks the superblock inode list, writing back any inodes which are
1833 * older than a specific point in time.
1834 *
1835 * Try to run once per dirty_writeback_interval.  But if a writeback event
1836 * takes longer than a dirty_writeback_interval interval, then leave a
1837 * one-second gap.
1838 *
1839 * dirtied_before takes precedence over nr_to_write.  So we'll only write back
1840 * all dirty pages if they are all attached to "old" mappings.
1841 */
1842static long wb_writeback(struct bdi_writeback *wb,
1843			 struct wb_writeback_work *work)
1844{
1845	unsigned long wb_start = jiffies;
1846	long nr_pages = work->nr_pages;
1847	unsigned long dirtied_before = jiffies;
1848	struct inode *inode;
1849	long progress;
1850	struct blk_plug plug;
1851
1852	blk_start_plug(&plug);
1853	spin_lock(&wb->list_lock);
1854	for (;;) {
1855		/*
1856		 * Stop writeback when nr_pages has been consumed
1857		 */
1858		if (work->nr_pages <= 0)
1859			break;
1860
1861		/*
1862		 * Background writeout and kupdate-style writeback may
1863		 * run forever. Stop them if there is other work to do
1864		 * so that e.g. sync can proceed. They'll be restarted
1865		 * after the other works are all done.
1866		 */
1867		if ((work->for_background || work->for_kupdate) &&
1868		    !list_empty(&wb->work_list))
1869			break;
1870
1871		/*
1872		 * For background writeout, stop when we are below the
1873		 * background dirty threshold
1874		 */
1875		if (work->for_background && !wb_over_bg_thresh(wb))
1876			break;
1877
1878		/*
1879		 * Kupdate and background works are special and we want to
1880		 * include all inodes that need writing. Livelock avoidance is
1881		 * handled by these works yielding to any other work so we are
1882		 * safe.
1883		 */
1884		if (work->for_kupdate) {
1885			dirtied_before = jiffies -
1886				msecs_to_jiffies(dirty_expire_interval * 10);
1887		} else if (work->for_background)
1888			dirtied_before = jiffies;
1889
1890		trace_writeback_start(wb, work);
1891		if (list_empty(&wb->b_io))
1892			queue_io(wb, work, dirtied_before);
1893		if (work->sb)
1894			progress = writeback_sb_inodes(work->sb, wb, work);
1895		else
1896			progress = __writeback_inodes_wb(wb, work);
1897		trace_writeback_written(wb, work);
1898
1899		wb_update_bandwidth(wb, wb_start);
1900
1901		/*
1902		 * Did we write something? Try for more
1903		 *
1904		 * Dirty inodes are moved to b_io for writeback in batches.
1905		 * The completion of the current batch does not necessarily
1906		 * mean the overall work is done. So we keep looping as long
1907		 * as made some progress on cleaning pages or inodes.
1908		 */
1909		if (progress)
1910			continue;
1911		/*
1912		 * No more inodes for IO, bail
1913		 */
1914		if (list_empty(&wb->b_more_io))
1915			break;
1916		/*
1917		 * Nothing written. Wait for some inode to
1918		 * become available for writeback. Otherwise
1919		 * we'll just busyloop.
1920		 */
1921		trace_writeback_wait(wb, work);
1922		inode = wb_inode(wb->b_more_io.prev);
1923		spin_lock(&inode->i_lock);
1924		spin_unlock(&wb->list_lock);
1925		/* This function drops i_lock... */
1926		inode_sleep_on_writeback(inode);
1927		spin_lock(&wb->list_lock);
1928	}
1929	spin_unlock(&wb->list_lock);
1930	blk_finish_plug(&plug);
1931
1932	return nr_pages - work->nr_pages;
1933}
1934
1935/*
1936 * Return the next wb_writeback_work struct that hasn't been processed yet.
1937 */
1938static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
1939{
1940	struct wb_writeback_work *work = NULL;
1941
1942	spin_lock_bh(&wb->work_lock);
1943	if (!list_empty(&wb->work_list)) {
1944		work = list_entry(wb->work_list.next,
1945				  struct wb_writeback_work, list);
1946		list_del_init(&work->list);
1947	}
1948	spin_unlock_bh(&wb->work_lock);
1949	return work;
1950}
1951
1952static long wb_check_background_flush(struct bdi_writeback *wb)
1953{
1954	if (wb_over_bg_thresh(wb)) {
1955
1956		struct wb_writeback_work work = {
1957			.nr_pages	= LONG_MAX,
1958			.sync_mode	= WB_SYNC_NONE,
1959			.for_background	= 1,
1960			.range_cyclic	= 1,
1961			.reason		= WB_REASON_BACKGROUND,
1962		};
1963
1964		return wb_writeback(wb, &work);
1965	}
1966
1967	return 0;
1968}
1969
1970static long wb_check_old_data_flush(struct bdi_writeback *wb)
1971{
1972	unsigned long expired;
1973	long nr_pages;
1974
1975	/*
1976	 * When set to zero, disable periodic writeback
1977	 */
1978	if (!dirty_writeback_interval)
1979		return 0;
1980
1981	expired = wb->last_old_flush +
1982			msecs_to_jiffies(dirty_writeback_interval * 10);
1983	if (time_before(jiffies, expired))
1984		return 0;
1985
1986	wb->last_old_flush = jiffies;
1987	nr_pages = get_nr_dirty_pages();
1988
1989	if (nr_pages) {
1990		struct wb_writeback_work work = {
1991			.nr_pages	= nr_pages,
1992			.sync_mode	= WB_SYNC_NONE,
1993			.for_kupdate	= 1,
1994			.range_cyclic	= 1,
1995			.reason		= WB_REASON_PERIODIC,
1996		};
1997
1998		return wb_writeback(wb, &work);
1999	}
2000
2001	return 0;
2002}
2003
2004static long wb_check_start_all(struct bdi_writeback *wb)
2005{
2006	long nr_pages;
2007
2008	if (!test_bit(WB_start_all, &wb->state))
2009		return 0;
2010
2011	nr_pages = get_nr_dirty_pages();
2012	if (nr_pages) {
2013		struct wb_writeback_work work = {
2014			.nr_pages	= wb_split_bdi_pages(wb, nr_pages),
2015			.sync_mode	= WB_SYNC_NONE,
2016			.range_cyclic	= 1,
2017			.reason		= wb->start_all_reason,
2018		};
2019
2020		nr_pages = wb_writeback(wb, &work);
2021	}
2022
2023	clear_bit(WB_start_all, &wb->state);
2024	return nr_pages;
2025}
2026
2027
2028/*
2029 * Retrieve work items and do the writeback they describe
2030 */
2031static long wb_do_writeback(struct bdi_writeback *wb)
2032{
2033	struct wb_writeback_work *work;
2034	long wrote = 0;
2035
2036	set_bit(WB_writeback_running, &wb->state);
2037	while ((work = get_next_work_item(wb)) != NULL) {
2038		trace_writeback_exec(wb, work);
2039		wrote += wb_writeback(wb, work);
2040		finish_writeback_work(wb, work);
2041	}
2042
2043	/*
2044	 * Check for a flush-everything request
2045	 */
2046	wrote += wb_check_start_all(wb);
2047
2048	/*
2049	 * Check for periodic writeback, kupdated() style
2050	 */
2051	wrote += wb_check_old_data_flush(wb);
2052	wrote += wb_check_background_flush(wb);
2053	clear_bit(WB_writeback_running, &wb->state);
2054
2055	return wrote;
2056}
2057
2058/*
2059 * Handle writeback of dirty data for the device backed by this bdi. Also
2060 * reschedules periodically and does kupdated style flushing.
2061 */
2062void wb_workfn(struct work_struct *work)
2063{
2064	struct bdi_writeback *wb = container_of(to_delayed_work(work),
2065						struct bdi_writeback, dwork);
2066	long pages_written;
2067
2068	set_worker_desc("flush-%s", bdi_dev_name(wb->bdi));
2069	current->flags |= PF_SWAPWRITE;
2070
2071	if (likely(!current_is_workqueue_rescuer() ||
2072		   !test_bit(WB_registered, &wb->state))) {
2073		/*
2074		 * The normal path.  Keep writing back @wb until its
2075		 * work_list is empty.  Note that this path is also taken
2076		 * if @wb is shutting down even when we're running off the
2077		 * rescuer as work_list needs to be drained.
2078		 */
2079		do {
2080			pages_written = wb_do_writeback(wb);
2081			trace_writeback_pages_written(pages_written);
2082		} while (!list_empty(&wb->work_list));
2083	} else {
2084		/*
2085		 * bdi_wq can't get enough workers and we're running off
2086		 * the emergency worker.  Don't hog it.  Hopefully, 1024 is
2087		 * enough for efficient IO.
2088		 */
2089		pages_written = writeback_inodes_wb(wb, 1024,
2090						    WB_REASON_FORKER_THREAD);
2091		trace_writeback_pages_written(pages_written);
2092	}
2093
2094	if (!list_empty(&wb->work_list))
2095		wb_wakeup(wb);
2096	else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
2097		wb_wakeup_delayed(wb);
2098
2099	current->flags &= ~PF_SWAPWRITE;
2100}
2101
2102/*
2103 * Start writeback of `nr_pages' pages on this bdi. If `nr_pages' is zero,
2104 * write back the whole world.
2105 */
2106static void __wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
2107					 enum wb_reason reason)
2108{
2109	struct bdi_writeback *wb;
2110
2111	if (!bdi_has_dirty_io(bdi))
2112		return;
2113
2114	list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
2115		wb_start_writeback(wb, reason);
2116}
2117
2118void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
2119				enum wb_reason reason)
2120{
2121	rcu_read_lock();
2122	__wakeup_flusher_threads_bdi(bdi, reason);
2123	rcu_read_unlock();
2124}
2125
2126/*
2127 * Wakeup the flusher threads to start writeback of all currently dirty pages
2128 */
2129void wakeup_flusher_threads(enum wb_reason reason)
2130{
2131	struct backing_dev_info *bdi;
2132
2133	/*
2134	 * If we are expecting writeback progress we must submit plugged IO.
2135	 */
2136	if (blk_needs_flush_plug(current))
2137		blk_schedule_flush_plug(current);
2138
2139	rcu_read_lock();
2140	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
2141		__wakeup_flusher_threads_bdi(bdi, reason);
2142	rcu_read_unlock();
2143}
2144
2145/*
2146 * Wake up bdi's periodically to make sure dirtytime inodes gets
2147 * written back periodically.  We deliberately do *not* check the
2148 * b_dirtytime list in wb_has_dirty_io(), since this would cause the
2149 * kernel to be constantly waking up once there are any dirtytime
2150 * inodes on the system.  So instead we define a separate delayed work
2151 * function which gets called much more rarely.  (By default, only
2152 * once every 12 hours.)
2153 *
2154 * If there is any other write activity going on in the file system,
2155 * this function won't be necessary.  But if the only thing that has
2156 * happened on the file system is a dirtytime inode caused by an atime
2157 * update, we need this infrastructure below to make sure that inode
2158 * eventually gets pushed out to disk.
2159 */
2160static void wakeup_dirtytime_writeback(struct work_struct *w);
2161static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback);
2162
2163static void wakeup_dirtytime_writeback(struct work_struct *w)
2164{
2165	struct backing_dev_info *bdi;
2166
2167	rcu_read_lock();
2168	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
2169		struct bdi_writeback *wb;
2170
2171		list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
2172			if (!list_empty(&wb->b_dirty_time))
2173				wb_wakeup(wb);
2174	}
2175	rcu_read_unlock();
2176	schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
2177}
2178
2179static int __init start_dirtytime_writeback(void)
2180{
2181	schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
2182	return 0;
2183}
2184__initcall(start_dirtytime_writeback);
2185
2186int dirtytime_interval_handler(struct ctl_table *table, int write,
2187			       void *buffer, size_t *lenp, loff_t *ppos)
2188{
2189	int ret;
2190
2191	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2192	if (ret == 0 && write)
2193		mod_delayed_work(system_wq, &dirtytime_work, 0);
2194	return ret;
2195}
2196
2197static noinline void block_dump___mark_inode_dirty(struct inode *inode)
2198{
2199	if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
2200		struct dentry *dentry;
2201		const char *name = "?";
2202
2203		dentry = d_find_alias(inode);
2204		if (dentry) {
2205			spin_lock(&dentry->d_lock);
2206			name = (const char *) dentry->d_name.name;
2207		}
2208		printk(KERN_DEBUG
2209		       "%s(%d): dirtied inode %lu (%s) on %s\n",
2210		       current->comm, task_pid_nr(current), inode->i_ino,
2211		       name, inode->i_sb->s_id);
2212		if (dentry) {
2213			spin_unlock(&dentry->d_lock);
2214			dput(dentry);
2215		}
2216	}
2217}
2218
2219/**
2220 * __mark_inode_dirty -	internal function
2221 *
2222 * @inode: inode to mark
2223 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
 
 
2224 *
2225 * Mark an inode as dirty. Callers should use mark_inode_dirty or
2226 * mark_inode_dirty_sync.
2227 *
2228 * Put the inode on the super block's dirty list.
 
2229 *
2230 * CAREFUL! We mark it dirty unconditionally, but move it onto the
2231 * dirty list only if it is hashed or if it refers to a blockdev.
2232 * If it was not hashed, it will never be added to the dirty list
2233 * even if it is later hashed, as it will have been marked dirty already.
2234 *
2235 * In short, make sure you hash any inodes _before_ you start marking
2236 * them dirty.
2237 *
2238 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
2239 * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
2240 * the kernel-internal blockdev inode represents the dirtying time of the
2241 * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
2242 * page->mapping->host, so the page-dirtying time is recorded in the internal
2243 * blockdev inode.
2244 */
2245void __mark_inode_dirty(struct inode *inode, int flags)
2246{
2247	struct super_block *sb = inode->i_sb;
2248	int dirtytime;
2249
2250	trace_writeback_mark_inode_dirty(inode, flags);
2251
2252	/*
2253	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
2254	 * dirty the inode itself
2255	 */
2256	if (flags & (I_DIRTY_INODE | I_DIRTY_TIME)) {
 
 
 
2257		trace_writeback_dirty_inode_start(inode, flags);
2258
2259		if (sb->s_op->dirty_inode)
2260			sb->s_op->dirty_inode(inode, flags);
2261
2262		trace_writeback_dirty_inode(inode, flags);
2263	}
2264	if (flags & I_DIRTY_INODE)
2265		flags &= ~I_DIRTY_TIME;
2266	dirtytime = flags & I_DIRTY_TIME;
 
 
 
 
 
 
 
 
2267
2268	/*
2269	 * Paired with smp_mb() in __writeback_single_inode() for the
2270	 * following lockless i_state test.  See there for details.
2271	 */
2272	smp_mb();
2273
2274	if (((inode->i_state & flags) == flags) ||
2275	    (dirtytime && (inode->i_state & I_DIRTY_INODE)))
2276		return;
2277
2278	if (unlikely(block_dump))
2279		block_dump___mark_inode_dirty(inode);
2280
2281	spin_lock(&inode->i_lock);
2282	if (dirtytime && (inode->i_state & I_DIRTY_INODE))
2283		goto out_unlock_inode;
2284	if ((inode->i_state & flags) != flags) {
2285		const int was_dirty = inode->i_state & I_DIRTY;
2286
2287		inode_attach_wb(inode, NULL);
2288
 
2289		if (flags & I_DIRTY_INODE)
2290			inode->i_state &= ~I_DIRTY_TIME;
2291		inode->i_state |= flags;
2292
2293		/*
2294		 * If the inode is queued for writeback by flush worker, just
2295		 * update its dirty state. Once the flush worker is done with
2296		 * the inode it will place it on the appropriate superblock
2297		 * list, based upon its state.
2298		 */
2299		if (inode->i_state & I_SYNC_QUEUED)
2300			goto out_unlock_inode;
2301
2302		/*
2303		 * Only add valid (hashed) inodes to the superblock's
2304		 * dirty list.  Add blockdev inodes as well.
2305		 */
2306		if (!S_ISBLK(inode->i_mode)) {
2307			if (inode_unhashed(inode))
2308				goto out_unlock_inode;
2309		}
2310		if (inode->i_state & I_FREEING)
2311			goto out_unlock_inode;
2312
2313		/*
2314		 * If the inode was already on b_dirty/b_io/b_more_io, don't
2315		 * reposition it (that would break b_dirty time-ordering).
2316		 */
2317		if (!was_dirty) {
2318			struct bdi_writeback *wb;
2319			struct list_head *dirty_list;
2320			bool wakeup_bdi = false;
2321
2322			wb = locked_inode_to_wb_and_lock_list(inode);
2323
2324			WARN(bdi_cap_writeback_dirty(wb->bdi) &&
2325			     !test_bit(WB_registered, &wb->state),
2326			     "bdi-%s not registered\n", bdi_dev_name(wb->bdi));
2327
2328			inode->dirtied_when = jiffies;
2329			if (dirtytime)
2330				inode->dirtied_time_when = jiffies;
2331
2332			if (inode->i_state & I_DIRTY)
2333				dirty_list = &wb->b_dirty;
2334			else
2335				dirty_list = &wb->b_dirty_time;
2336
2337			wakeup_bdi = inode_io_list_move_locked(inode, wb,
2338							       dirty_list);
2339
2340			spin_unlock(&wb->list_lock);
2341			trace_writeback_dirty_inode_enqueue(inode);
2342
2343			/*
2344			 * If this is the first dirty inode for this bdi,
2345			 * we have to wake-up the corresponding bdi thread
2346			 * to make sure background write-back happens
2347			 * later.
2348			 */
2349			if (bdi_cap_writeback_dirty(wb->bdi) && wakeup_bdi)
 
2350				wb_wakeup_delayed(wb);
2351			return;
2352		}
2353	}
2354out_unlock_inode:
2355	spin_unlock(&inode->i_lock);
2356}
2357EXPORT_SYMBOL(__mark_inode_dirty);
2358
2359/*
2360 * The @s_sync_lock is used to serialise concurrent sync operations
2361 * to avoid lock contention problems with concurrent wait_sb_inodes() calls.
2362 * Concurrent callers will block on the s_sync_lock rather than doing contending
2363 * walks. The queueing maintains sync(2) required behaviour as all the IO that
2364 * has been issued up to the time this function is enter is guaranteed to be
2365 * completed by the time we have gained the lock and waited for all IO that is
2366 * in progress regardless of the order callers are granted the lock.
2367 */
2368static void wait_sb_inodes(struct super_block *sb)
2369{
2370	LIST_HEAD(sync_list);
2371
2372	/*
2373	 * We need to be protected against the filesystem going from
2374	 * r/o to r/w or vice versa.
2375	 */
2376	WARN_ON(!rwsem_is_locked(&sb->s_umount));
2377
2378	mutex_lock(&sb->s_sync_lock);
2379
2380	/*
2381	 * Splice the writeback list onto a temporary list to avoid waiting on
2382	 * inodes that have started writeback after this point.
2383	 *
2384	 * Use rcu_read_lock() to keep the inodes around until we have a
2385	 * reference. s_inode_wblist_lock protects sb->s_inodes_wb as well as
2386	 * the local list because inodes can be dropped from either by writeback
2387	 * completion.
2388	 */
2389	rcu_read_lock();
2390	spin_lock_irq(&sb->s_inode_wblist_lock);
2391	list_splice_init(&sb->s_inodes_wb, &sync_list);
2392
2393	/*
2394	 * Data integrity sync. Must wait for all pages under writeback, because
2395	 * there may have been pages dirtied before our sync call, but which had
2396	 * writeout started before we write it out.  In which case, the inode
2397	 * may not be on the dirty list, but we still have to wait for that
2398	 * writeout.
2399	 */
2400	while (!list_empty(&sync_list)) {
2401		struct inode *inode = list_first_entry(&sync_list, struct inode,
2402						       i_wb_list);
2403		struct address_space *mapping = inode->i_mapping;
2404
2405		/*
2406		 * Move each inode back to the wb list before we drop the lock
2407		 * to preserve consistency between i_wb_list and the mapping
2408		 * writeback tag. Writeback completion is responsible to remove
2409		 * the inode from either list once the writeback tag is cleared.
2410		 */
2411		list_move_tail(&inode->i_wb_list, &sb->s_inodes_wb);
2412
2413		/*
2414		 * The mapping can appear untagged while still on-list since we
2415		 * do not have the mapping lock. Skip it here, wb completion
2416		 * will remove it.
2417		 */
2418		if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
2419			continue;
2420
2421		spin_unlock_irq(&sb->s_inode_wblist_lock);
2422
2423		spin_lock(&inode->i_lock);
2424		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
2425			spin_unlock(&inode->i_lock);
2426
2427			spin_lock_irq(&sb->s_inode_wblist_lock);
2428			continue;
2429		}
2430		__iget(inode);
2431		spin_unlock(&inode->i_lock);
2432		rcu_read_unlock();
2433
2434		/*
2435		 * We keep the error status of individual mapping so that
2436		 * applications can catch the writeback error using fsync(2).
2437		 * See filemap_fdatawait_keep_errors() for details.
2438		 */
2439		filemap_fdatawait_keep_errors(mapping);
2440
2441		cond_resched();
2442
2443		iput(inode);
2444
2445		rcu_read_lock();
2446		spin_lock_irq(&sb->s_inode_wblist_lock);
2447	}
2448	spin_unlock_irq(&sb->s_inode_wblist_lock);
2449	rcu_read_unlock();
2450	mutex_unlock(&sb->s_sync_lock);
2451}
2452
2453static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
2454				     enum wb_reason reason, bool skip_if_busy)
2455{
2456	struct backing_dev_info *bdi = sb->s_bdi;
2457	DEFINE_WB_COMPLETION(done, bdi);
2458	struct wb_writeback_work work = {
2459		.sb			= sb,
2460		.sync_mode		= WB_SYNC_NONE,
2461		.tagged_writepages	= 1,
2462		.done			= &done,
2463		.nr_pages		= nr,
2464		.reason			= reason,
2465	};
2466
2467	if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
2468		return;
2469	WARN_ON(!rwsem_is_locked(&sb->s_umount));
2470
2471	bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy);
2472	wb_wait_for_completion(&done);
2473}
2474
2475/**
2476 * writeback_inodes_sb_nr -	writeback dirty inodes from given super_block
2477 * @sb: the superblock
2478 * @nr: the number of pages to write
2479 * @reason: reason why some writeback work initiated
2480 *
2481 * Start writeback on some inodes on this super_block. No guarantees are made
2482 * on how many (if any) will be written, and this function does not wait
2483 * for IO completion of submitted IO.
2484 */
2485void writeback_inodes_sb_nr(struct super_block *sb,
2486			    unsigned long nr,
2487			    enum wb_reason reason)
2488{
2489	__writeback_inodes_sb_nr(sb, nr, reason, false);
2490}
2491EXPORT_SYMBOL(writeback_inodes_sb_nr);
2492
2493/**
2494 * writeback_inodes_sb	-	writeback dirty inodes from given super_block
2495 * @sb: the superblock
2496 * @reason: reason why some writeback work was initiated
2497 *
2498 * Start writeback on some inodes on this super_block. No guarantees are made
2499 * on how many (if any) will be written, and this function does not wait
2500 * for IO completion of submitted IO.
2501 */
2502void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
2503{
2504	return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
2505}
2506EXPORT_SYMBOL(writeback_inodes_sb);
2507
2508/**
2509 * try_to_writeback_inodes_sb - try to start writeback if none underway
2510 * @sb: the superblock
2511 * @reason: reason why some writeback work was initiated
2512 *
2513 * Invoke __writeback_inodes_sb_nr if no writeback is currently underway.
2514 */
2515void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
2516{
2517	if (!down_read_trylock(&sb->s_umount))
2518		return;
2519
2520	__writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason, true);
2521	up_read(&sb->s_umount);
2522}
2523EXPORT_SYMBOL(try_to_writeback_inodes_sb);
2524
2525/**
2526 * sync_inodes_sb	-	sync sb inode pages
2527 * @sb: the superblock
2528 *
2529 * This function writes and waits on any dirty inode belonging to this
2530 * super_block.
2531 */
2532void sync_inodes_sb(struct super_block *sb)
2533{
2534	struct backing_dev_info *bdi = sb->s_bdi;
2535	DEFINE_WB_COMPLETION(done, bdi);
2536	struct wb_writeback_work work = {
2537		.sb		= sb,
2538		.sync_mode	= WB_SYNC_ALL,
2539		.nr_pages	= LONG_MAX,
2540		.range_cyclic	= 0,
2541		.done		= &done,
2542		.reason		= WB_REASON_SYNC,
2543		.for_sync	= 1,
2544	};
2545
2546	/*
2547	 * Can't skip on !bdi_has_dirty() because we should wait for !dirty
2548	 * inodes under writeback and I_DIRTY_TIME inodes ignored by
2549	 * bdi_has_dirty() need to be written out too.
2550	 */
2551	if (bdi == &noop_backing_dev_info)
2552		return;
2553	WARN_ON(!rwsem_is_locked(&sb->s_umount));
2554
2555	/* protect against inode wb switch, see inode_switch_wbs_work_fn() */
2556	bdi_down_write_wb_switch_rwsem(bdi);
2557	bdi_split_work_to_wbs(bdi, &work, false);
2558	wb_wait_for_completion(&done);
2559	bdi_up_write_wb_switch_rwsem(bdi);
2560
2561	wait_sb_inodes(sb);
2562}
2563EXPORT_SYMBOL(sync_inodes_sb);
2564
2565/**
2566 * write_inode_now	-	write an inode to disk
2567 * @inode: inode to write to disk
2568 * @sync: whether the write should be synchronous or not
2569 *
2570 * This function commits an inode to disk immediately if it is dirty. This is
2571 * primarily needed by knfsd.
2572 *
2573 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
2574 */
2575int write_inode_now(struct inode *inode, int sync)
2576{
2577	struct writeback_control wbc = {
2578		.nr_to_write = LONG_MAX,
2579		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
2580		.range_start = 0,
2581		.range_end = LLONG_MAX,
2582	};
2583
2584	if (!mapping_cap_writeback_dirty(inode->i_mapping))
2585		wbc.nr_to_write = 0;
2586
2587	might_sleep();
2588	return writeback_single_inode(inode, &wbc);
2589}
2590EXPORT_SYMBOL(write_inode_now);
2591
2592/**
2593 * sync_inode - write an inode and its pages to disk.
2594 * @inode: the inode to sync
2595 * @wbc: controls the writeback mode
2596 *
2597 * sync_inode() will write an inode and its pages to disk.  It will also
2598 * correctly update the inode on its superblock's dirty inode lists and will
2599 * update inode->i_state.
2600 *
2601 * The caller must have a ref on the inode.
2602 */
2603int sync_inode(struct inode *inode, struct writeback_control *wbc)
2604{
2605	return writeback_single_inode(inode, wbc);
2606}
2607EXPORT_SYMBOL(sync_inode);
2608
2609/**
2610 * sync_inode_metadata - write an inode to disk
2611 * @inode: the inode to sync
2612 * @wait: wait for I/O to complete.
2613 *
2614 * Write an inode to disk and adjust its dirty state after completion.
2615 *
2616 * Note: only writes the actual inode, no associated data or other metadata.
2617 */
2618int sync_inode_metadata(struct inode *inode, int wait)
2619{
2620	struct writeback_control wbc = {
2621		.sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
2622		.nr_to_write = 0, /* metadata-only */
2623	};
2624
2625	return sync_inode(inode, &wbc);
2626}
2627EXPORT_SYMBOL(sync_inode_metadata);
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * fs/fs-writeback.c
   4 *
   5 * Copyright (C) 2002, Linus Torvalds.
   6 *
   7 * Contains all the functions related to writing back and waiting
   8 * upon dirty inodes against superblocks, and writing back dirty
   9 * pages against inodes.  ie: data writeback.  Writeout of the
  10 * inode itself is not handled here.
  11 *
  12 * 10Apr2002	Andrew Morton
  13 *		Split out of fs/inode.c
  14 *		Additions for address_space-based writeback
  15 */
  16
  17#include <linux/kernel.h>
  18#include <linux/export.h>
  19#include <linux/spinlock.h>
  20#include <linux/slab.h>
  21#include <linux/sched.h>
  22#include <linux/fs.h>
  23#include <linux/mm.h>
  24#include <linux/pagemap.h>
  25#include <linux/kthread.h>
  26#include <linux/writeback.h>
  27#include <linux/blkdev.h>
  28#include <linux/backing-dev.h>
  29#include <linux/tracepoint.h>
  30#include <linux/device.h>
  31#include <linux/memcontrol.h>
  32#include "internal.h"
  33
  34/*
  35 * 4MB minimal write chunk size
  36 */
  37#define MIN_WRITEBACK_PAGES	(4096UL >> (PAGE_SHIFT - 10))
  38
  39/*
  40 * Passed into wb_writeback(), essentially a subset of writeback_control
  41 */
  42struct wb_writeback_work {
  43	long nr_pages;
  44	struct super_block *sb;
  45	enum writeback_sync_modes sync_mode;
  46	unsigned int tagged_writepages:1;
  47	unsigned int for_kupdate:1;
  48	unsigned int range_cyclic:1;
  49	unsigned int for_background:1;
  50	unsigned int for_sync:1;	/* sync(2) WB_SYNC_ALL writeback */
  51	unsigned int auto_free:1;	/* free on completion */
  52	enum wb_reason reason;		/* why was writeback initiated? */
  53
  54	struct list_head list;		/* pending work list */
  55	struct wb_completion *done;	/* set if the caller waits */
  56};
  57
  58/*
  59 * If an inode is constantly having its pages dirtied, but then the
  60 * updates stop dirtytime_expire_interval seconds in the past, it's
  61 * possible for the worst case time between when an inode has its
  62 * timestamps updated and when they finally get written out to be two
  63 * dirtytime_expire_intervals.  We set the default to 12 hours (in
  64 * seconds), which means most of the time inodes will have their
  65 * timestamps written to disk after 12 hours, but in the worst case a
  66 * few inodes might not their timestamps updated for 24 hours.
  67 */
  68unsigned int dirtytime_expire_interval = 12 * 60 * 60;
  69
  70static inline struct inode *wb_inode(struct list_head *head)
  71{
  72	return list_entry(head, struct inode, i_io_list);
  73}
  74
  75/*
  76 * Include the creation of the trace points after defining the
  77 * wb_writeback_work structure and inline functions so that the definition
  78 * remains local to this file.
  79 */
  80#define CREATE_TRACE_POINTS
  81#include <trace/events/writeback.h>
  82
  83EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage);
  84
  85static bool wb_io_lists_populated(struct bdi_writeback *wb)
  86{
  87	if (wb_has_dirty_io(wb)) {
  88		return false;
  89	} else {
  90		set_bit(WB_has_dirty_io, &wb->state);
  91		WARN_ON_ONCE(!wb->avg_write_bandwidth);
  92		atomic_long_add(wb->avg_write_bandwidth,
  93				&wb->bdi->tot_write_bandwidth);
  94		return true;
  95	}
  96}
  97
  98static void wb_io_lists_depopulated(struct bdi_writeback *wb)
  99{
 100	if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) &&
 101	    list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) {
 102		clear_bit(WB_has_dirty_io, &wb->state);
 103		WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth,
 104					&wb->bdi->tot_write_bandwidth) < 0);
 105	}
 106}
 107
 108/**
 109 * inode_io_list_move_locked - move an inode onto a bdi_writeback IO list
 110 * @inode: inode to be moved
 111 * @wb: target bdi_writeback
 112 * @head: one of @wb->b_{dirty|io|more_io|dirty_time}
 113 *
 114 * Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io.
 115 * Returns %true if @inode is the first occupant of the !dirty_time IO
 116 * lists; otherwise, %false.
 117 */
 118static bool inode_io_list_move_locked(struct inode *inode,
 119				      struct bdi_writeback *wb,
 120				      struct list_head *head)
 121{
 122	assert_spin_locked(&wb->list_lock);
 123
 124	list_move(&inode->i_io_list, head);
 125
 126	/* dirty_time doesn't count as dirty_io until expiration */
 127	if (head != &wb->b_dirty_time)
 128		return wb_io_lists_populated(wb);
 129
 130	wb_io_lists_depopulated(wb);
 131	return false;
 132}
 133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 134static void wb_wakeup(struct bdi_writeback *wb)
 135{
 136	spin_lock_bh(&wb->work_lock);
 137	if (test_bit(WB_registered, &wb->state))
 138		mod_delayed_work(bdi_wq, &wb->dwork, 0);
 139	spin_unlock_bh(&wb->work_lock);
 140}
 141
 142static void finish_writeback_work(struct bdi_writeback *wb,
 143				  struct wb_writeback_work *work)
 144{
 145	struct wb_completion *done = work->done;
 146
 147	if (work->auto_free)
 148		kfree(work);
 149	if (done) {
 150		wait_queue_head_t *waitq = done->waitq;
 151
 152		/* @done can't be accessed after the following dec */
 153		if (atomic_dec_and_test(&done->cnt))
 154			wake_up_all(waitq);
 155	}
 156}
 157
 158static void wb_queue_work(struct bdi_writeback *wb,
 159			  struct wb_writeback_work *work)
 160{
 161	trace_writeback_queue(wb, work);
 162
 163	if (work->done)
 164		atomic_inc(&work->done->cnt);
 165
 166	spin_lock_bh(&wb->work_lock);
 167
 168	if (test_bit(WB_registered, &wb->state)) {
 169		list_add_tail(&work->list, &wb->work_list);
 170		mod_delayed_work(bdi_wq, &wb->dwork, 0);
 171	} else
 172		finish_writeback_work(wb, work);
 173
 174	spin_unlock_bh(&wb->work_lock);
 175}
 176
 177/**
 178 * wb_wait_for_completion - wait for completion of bdi_writeback_works
 179 * @done: target wb_completion
 180 *
 181 * Wait for one or more work items issued to @bdi with their ->done field
 182 * set to @done, which should have been initialized with
 183 * DEFINE_WB_COMPLETION().  This function returns after all such work items
 184 * are completed.  Work items which are waited upon aren't freed
 185 * automatically on completion.
 186 */
 187void wb_wait_for_completion(struct wb_completion *done)
 188{
 189	atomic_dec(&done->cnt);		/* put down the initial count */
 190	wait_event(*done->waitq, !atomic_read(&done->cnt));
 191}
 192
 193#ifdef CONFIG_CGROUP_WRITEBACK
 194
 195/*
 196 * Parameters for foreign inode detection, see wbc_detach_inode() to see
 197 * how they're used.
 198 *
 199 * These paramters are inherently heuristical as the detection target
 200 * itself is fuzzy.  All we want to do is detaching an inode from the
 201 * current owner if it's being written to by some other cgroups too much.
 202 *
 203 * The current cgroup writeback is built on the assumption that multiple
 204 * cgroups writing to the same inode concurrently is very rare and a mode
 205 * of operation which isn't well supported.  As such, the goal is not
 206 * taking too long when a different cgroup takes over an inode while
 207 * avoiding too aggressive flip-flops from occasional foreign writes.
 208 *
 209 * We record, very roughly, 2s worth of IO time history and if more than
 210 * half of that is foreign, trigger the switch.  The recording is quantized
 211 * to 16 slots.  To avoid tiny writes from swinging the decision too much,
 212 * writes smaller than 1/8 of avg size are ignored.
 213 */
 214#define WB_FRN_TIME_SHIFT	13	/* 1s = 2^13, upto 8 secs w/ 16bit */
 215#define WB_FRN_TIME_AVG_SHIFT	3	/* avg = avg * 7/8 + new * 1/8 */
 216#define WB_FRN_TIME_CUT_DIV	8	/* ignore rounds < avg / 8 */
 217#define WB_FRN_TIME_PERIOD	(2 * (1 << WB_FRN_TIME_SHIFT))	/* 2s */
 218
 219#define WB_FRN_HIST_SLOTS	16	/* inode->i_wb_frn_history is 16bit */
 220#define WB_FRN_HIST_UNIT	(WB_FRN_TIME_PERIOD / WB_FRN_HIST_SLOTS)
 221					/* each slot's duration is 2s / 16 */
 222#define WB_FRN_HIST_THR_SLOTS	(WB_FRN_HIST_SLOTS / 2)
 223					/* if foreign slots >= 8, switch */
 224#define WB_FRN_HIST_MAX_SLOTS	(WB_FRN_HIST_THR_SLOTS / 2 + 1)
 225					/* one round can affect upto 5 slots */
 226#define WB_FRN_MAX_IN_FLIGHT	1024	/* don't queue too many concurrently */
 227
 228/*
 229 * Maximum inodes per isw.  A specific value has been chosen to make
 230 * struct inode_switch_wbs_context fit into 1024 bytes kmalloc.
 231 */
 232#define WB_MAX_INODES_PER_ISW  ((1024UL - sizeof(struct inode_switch_wbs_context)) \
 233                                / sizeof(struct inode *))
 234
 235static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
 236static struct workqueue_struct *isw_wq;
 237
 238void __inode_attach_wb(struct inode *inode, struct page *page)
 239{
 240	struct backing_dev_info *bdi = inode_to_bdi(inode);
 241	struct bdi_writeback *wb = NULL;
 242
 243	if (inode_cgwb_enabled(inode)) {
 244		struct cgroup_subsys_state *memcg_css;
 245
 246		if (page) {
 247			memcg_css = mem_cgroup_css_from_page(page);
 248			wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
 249		} else {
 250			/* must pin memcg_css, see wb_get_create() */
 251			memcg_css = task_get_css(current, memory_cgrp_id);
 252			wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
 253			css_put(memcg_css);
 254		}
 255	}
 256
 257	if (!wb)
 258		wb = &bdi->wb;
 259
 260	/*
 261	 * There may be multiple instances of this function racing to
 262	 * update the same inode.  Use cmpxchg() to tell the winner.
 263	 */
 264	if (unlikely(cmpxchg(&inode->i_wb, NULL, wb)))
 265		wb_put(wb);
 266}
 267EXPORT_SYMBOL_GPL(__inode_attach_wb);
 268
 269/**
 270 * inode_cgwb_move_to_attached - put the inode onto wb->b_attached list
 271 * @inode: inode of interest with i_lock held
 272 * @wb: target bdi_writeback
 273 *
 274 * Remove the inode from wb's io lists and if necessarily put onto b_attached
 275 * list.  Only inodes attached to cgwb's are kept on this list.
 276 */
 277static void inode_cgwb_move_to_attached(struct inode *inode,
 278					struct bdi_writeback *wb)
 279{
 280	assert_spin_locked(&wb->list_lock);
 281	assert_spin_locked(&inode->i_lock);
 282
 283	inode->i_state &= ~I_SYNC_QUEUED;
 284	if (wb != &wb->bdi->wb)
 285		list_move(&inode->i_io_list, &wb->b_attached);
 286	else
 287		list_del_init(&inode->i_io_list);
 288	wb_io_lists_depopulated(wb);
 289}
 290
 291/**
 292 * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it
 293 * @inode: inode of interest with i_lock held
 294 *
 295 * Returns @inode's wb with its list_lock held.  @inode->i_lock must be
 296 * held on entry and is released on return.  The returned wb is guaranteed
 297 * to stay @inode's associated wb until its list_lock is released.
 298 */
 299static struct bdi_writeback *
 300locked_inode_to_wb_and_lock_list(struct inode *inode)
 301	__releases(&inode->i_lock)
 302	__acquires(&wb->list_lock)
 303{
 304	while (true) {
 305		struct bdi_writeback *wb = inode_to_wb(inode);
 306
 307		/*
 308		 * inode_to_wb() association is protected by both
 309		 * @inode->i_lock and @wb->list_lock but list_lock nests
 310		 * outside i_lock.  Drop i_lock and verify that the
 311		 * association hasn't changed after acquiring list_lock.
 312		 */
 313		wb_get(wb);
 314		spin_unlock(&inode->i_lock);
 315		spin_lock(&wb->list_lock);
 316
 317		/* i_wb may have changed inbetween, can't use inode_to_wb() */
 318		if (likely(wb == inode->i_wb)) {
 319			wb_put(wb);	/* @inode already has ref */
 320			return wb;
 321		}
 322
 323		spin_unlock(&wb->list_lock);
 324		wb_put(wb);
 325		cpu_relax();
 326		spin_lock(&inode->i_lock);
 327	}
 328}
 329
 330/**
 331 * inode_to_wb_and_lock_list - determine an inode's wb and lock it
 332 * @inode: inode of interest
 333 *
 334 * Same as locked_inode_to_wb_and_lock_list() but @inode->i_lock isn't held
 335 * on entry.
 336 */
 337static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
 338	__acquires(&wb->list_lock)
 339{
 340	spin_lock(&inode->i_lock);
 341	return locked_inode_to_wb_and_lock_list(inode);
 342}
 343
 344struct inode_switch_wbs_context {
 345	struct rcu_work		work;
 
 346
 347	/*
 348	 * Multiple inodes can be switched at once.  The switching procedure
 349	 * consists of two parts, separated by a RCU grace period.  To make
 350	 * sure that the second part is executed for each inode gone through
 351	 * the first part, all inode pointers are placed into a NULL-terminated
 352	 * array embedded into struct inode_switch_wbs_context.  Otherwise
 353	 * an inode could be left in a non-consistent state.
 354	 */
 355	struct bdi_writeback	*new_wb;
 356	struct inode		*inodes[];
 357};
 358
 359static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
 360{
 361	down_write(&bdi->wb_switch_rwsem);
 362}
 363
 364static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
 365{
 366	up_write(&bdi->wb_switch_rwsem);
 367}
 368
 369static bool inode_do_switch_wbs(struct inode *inode,
 370				struct bdi_writeback *old_wb,
 371				struct bdi_writeback *new_wb)
 372{
 
 
 
 
 373	struct address_space *mapping = inode->i_mapping;
 
 
 374	XA_STATE(xas, &mapping->i_pages, 0);
 375	struct page *page;
 376	bool switched = false;
 377
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 378	spin_lock(&inode->i_lock);
 379	xa_lock_irq(&mapping->i_pages);
 380
 381	/*
 382	 * Once I_FREEING or I_WILL_FREE are visible under i_lock, the eviction
 383	 * path owns the inode and we shouldn't modify ->i_io_list.
 384	 */
 385	if (unlikely(inode->i_state & (I_FREEING | I_WILL_FREE)))
 386		goto skip_switch;
 387
 388	trace_inode_switch_wbs(inode, old_wb, new_wb);
 389
 390	/*
 391	 * Count and transfer stats.  Note that PAGECACHE_TAG_DIRTY points
 392	 * to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
 393	 * pages actually under writeback.
 394	 */
 395	xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
 396		if (PageDirty(page)) {
 397			dec_wb_stat(old_wb, WB_RECLAIMABLE);
 398			inc_wb_stat(new_wb, WB_RECLAIMABLE);
 399		}
 400	}
 401
 402	xas_set(&xas, 0);
 403	xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
 404		WARN_ON_ONCE(!PageWriteback(page));
 405		dec_wb_stat(old_wb, WB_WRITEBACK);
 406		inc_wb_stat(new_wb, WB_WRITEBACK);
 407	}
 408
 409	wb_get(new_wb);
 410
 411	/*
 412	 * Transfer to @new_wb's IO list if necessary.  If the @inode is dirty,
 413	 * the specific list @inode was on is ignored and the @inode is put on
 414	 * ->b_dirty which is always correct including from ->b_dirty_time.
 415	 * The transfer preserves @inode->dirtied_when ordering.  If the @inode
 416	 * was clean, it means it was on the b_attached list, so move it onto
 417	 * the b_attached list of @new_wb.
 418	 */
 419	if (!list_empty(&inode->i_io_list)) {
 
 
 
 420		inode->i_wb = new_wb;
 421
 422		if (inode->i_state & I_DIRTY_ALL) {
 423			struct inode *pos;
 424
 425			list_for_each_entry(pos, &new_wb->b_dirty, i_io_list)
 426				if (time_after_eq(inode->dirtied_when,
 427						  pos->dirtied_when))
 428					break;
 429			inode_io_list_move_locked(inode, new_wb,
 430						  pos->i_io_list.prev);
 431		} else {
 432			inode_cgwb_move_to_attached(inode, new_wb);
 433		}
 434	} else {
 435		inode->i_wb = new_wb;
 436	}
 437
 438	/* ->i_wb_frn updates may race wbc_detach_inode() but doesn't matter */
 439	inode->i_wb_frn_winner = 0;
 440	inode->i_wb_frn_avg_time = 0;
 441	inode->i_wb_frn_history = 0;
 442	switched = true;
 443skip_switch:
 444	/*
 445	 * Paired with load_acquire in unlocked_inode_to_wb_begin() and
 446	 * ensures that the new wb is visible if they see !I_WB_SWITCH.
 447	 */
 448	smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH);
 449
 450	xa_unlock_irq(&mapping->i_pages);
 451	spin_unlock(&inode->i_lock);
 452
 453	return switched;
 454}
 455
 456static void inode_switch_wbs_work_fn(struct work_struct *work)
 457{
 458	struct inode_switch_wbs_context *isw =
 459		container_of(to_rcu_work(work), struct inode_switch_wbs_context, work);
 460	struct backing_dev_info *bdi = inode_to_bdi(isw->inodes[0]);
 461	struct bdi_writeback *old_wb = isw->inodes[0]->i_wb;
 462	struct bdi_writeback *new_wb = isw->new_wb;
 463	unsigned long nr_switched = 0;
 464	struct inode **inodep;
 465
 466	/*
 467	 * If @inode switches cgwb membership while sync_inodes_sb() is
 468	 * being issued, sync_inodes_sb() might miss it.  Synchronize.
 469	 */
 470	down_read(&bdi->wb_switch_rwsem);
 471
 472	/*
 473	 * By the time control reaches here, RCU grace period has passed
 474	 * since I_WB_SWITCH assertion and all wb stat update transactions
 475	 * between unlocked_inode_to_wb_begin/end() are guaranteed to be
 476	 * synchronizing against the i_pages lock.
 477	 *
 478	 * Grabbing old_wb->list_lock, inode->i_lock and the i_pages lock
 479	 * gives us exclusion against all wb related operations on @inode
 480	 * including IO list manipulations and stat updates.
 481	 */
 482	if (old_wb < new_wb) {
 483		spin_lock(&old_wb->list_lock);
 484		spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
 485	} else {
 486		spin_lock(&new_wb->list_lock);
 487		spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
 488	}
 489
 490	for (inodep = isw->inodes; *inodep; inodep++) {
 491		WARN_ON_ONCE((*inodep)->i_wb != old_wb);
 492		if (inode_do_switch_wbs(*inodep, old_wb, new_wb))
 493			nr_switched++;
 494	}
 495
 496	spin_unlock(&new_wb->list_lock);
 497	spin_unlock(&old_wb->list_lock);
 498
 499	up_read(&bdi->wb_switch_rwsem);
 500
 501	if (nr_switched) {
 502		wb_wakeup(new_wb);
 503		wb_put_many(old_wb, nr_switched);
 504	}
 
 505
 506	for (inodep = isw->inodes; *inodep; inodep++)
 507		iput(*inodep);
 508	wb_put(new_wb);
 509	kfree(isw);
 
 510	atomic_dec(&isw_nr_in_flight);
 511}
 512
 513static bool inode_prepare_wbs_switch(struct inode *inode,
 514				     struct bdi_writeback *new_wb)
 515{
 516	/*
 517	 * Paired with smp_mb() in cgroup_writeback_umount().
 518	 * isw_nr_in_flight must be increased before checking SB_ACTIVE and
 519	 * grabbing an inode, otherwise isw_nr_in_flight can be observed as 0
 520	 * in cgroup_writeback_umount() and the isw_wq will be not flushed.
 521	 */
 522	smp_mb();
 523
 524	if (IS_DAX(inode))
 525		return false;
 526
 527	/* while holding I_WB_SWITCH, no one else can update the association */
 528	spin_lock(&inode->i_lock);
 529	if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
 530	    inode->i_state & (I_WB_SWITCH | I_FREEING | I_WILL_FREE) ||
 531	    inode_to_wb(inode) == new_wb) {
 532		spin_unlock(&inode->i_lock);
 533		return false;
 534	}
 535	inode->i_state |= I_WB_SWITCH;
 536	__iget(inode);
 537	spin_unlock(&inode->i_lock);
 538
 539	return true;
 540}
 541
 542/**
 543 * inode_switch_wbs - change the wb association of an inode
 544 * @inode: target inode
 545 * @new_wb_id: ID of the new wb
 546 *
 547 * Switch @inode's wb association to the wb identified by @new_wb_id.  The
 548 * switching is performed asynchronously and may fail silently.
 549 */
 550static void inode_switch_wbs(struct inode *inode, int new_wb_id)
 551{
 552	struct backing_dev_info *bdi = inode_to_bdi(inode);
 553	struct cgroup_subsys_state *memcg_css;
 554	struct inode_switch_wbs_context *isw;
 555
 556	/* noop if seems to be already in progress */
 557	if (inode->i_state & I_WB_SWITCH)
 558		return;
 559
 560	/* avoid queueing a new switch if too many are already in flight */
 561	if (atomic_read(&isw_nr_in_flight) > WB_FRN_MAX_IN_FLIGHT)
 562		return;
 563
 564	isw = kzalloc(sizeof(*isw) + 2 * sizeof(struct inode *), GFP_ATOMIC);
 565	if (!isw)
 566		return;
 567
 568	atomic_inc(&isw_nr_in_flight);
 569
 570	/* find and pin the new wb */
 571	rcu_read_lock();
 572	memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys);
 573	if (memcg_css && !css_tryget(memcg_css))
 574		memcg_css = NULL;
 575	rcu_read_unlock();
 576	if (!memcg_css)
 577		goto out_free;
 578
 579	isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
 580	css_put(memcg_css);
 581	if (!isw->new_wb)
 582		goto out_free;
 583
 584	if (!inode_prepare_wbs_switch(inode, isw->new_wb))
 
 
 
 
 
 585		goto out_free;
 
 
 
 
 586
 587	isw->inodes[0] = inode;
 588
 589	/*
 590	 * In addition to synchronizing among switchers, I_WB_SWITCH tells
 591	 * the RCU protected stat update paths to grab the i_page
 592	 * lock so that stat transfer can synchronize against them.
 593	 * Let's continue after I_WB_SWITCH is guaranteed to be visible.
 594	 */
 595	INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn);
 596	queue_rcu_work(isw_wq, &isw->work);
 
 597	return;
 598
 599out_free:
 600	atomic_dec(&isw_nr_in_flight);
 601	if (isw->new_wb)
 602		wb_put(isw->new_wb);
 603	kfree(isw);
 604}
 605
 606/**
 607 * cleanup_offline_cgwb - detach associated inodes
 608 * @wb: target wb
 609 *
 610 * Switch all inodes attached to @wb to a nearest living ancestor's wb in order
 611 * to eventually release the dying @wb.  Returns %true if not all inodes were
 612 * switched and the function has to be restarted.
 613 */
 614bool cleanup_offline_cgwb(struct bdi_writeback *wb)
 615{
 616	struct cgroup_subsys_state *memcg_css;
 617	struct inode_switch_wbs_context *isw;
 618	struct inode *inode;
 619	int nr;
 620	bool restart = false;
 621
 622	isw = kzalloc(sizeof(*isw) + WB_MAX_INODES_PER_ISW *
 623		      sizeof(struct inode *), GFP_KERNEL);
 624	if (!isw)
 625		return restart;
 626
 627	atomic_inc(&isw_nr_in_flight);
 628
 629	for (memcg_css = wb->memcg_css->parent; memcg_css;
 630	     memcg_css = memcg_css->parent) {
 631		isw->new_wb = wb_get_create(wb->bdi, memcg_css, GFP_KERNEL);
 632		if (isw->new_wb)
 633			break;
 634	}
 635	if (unlikely(!isw->new_wb))
 636		isw->new_wb = &wb->bdi->wb; /* wb_get() is noop for bdi's wb */
 637
 638	nr = 0;
 639	spin_lock(&wb->list_lock);
 640	list_for_each_entry(inode, &wb->b_attached, i_io_list) {
 641		if (!inode_prepare_wbs_switch(inode, isw->new_wb))
 642			continue;
 643
 644		isw->inodes[nr++] = inode;
 645
 646		if (nr >= WB_MAX_INODES_PER_ISW - 1) {
 647			restart = true;
 648			break;
 649		}
 650	}
 651	spin_unlock(&wb->list_lock);
 652
 653	/* no attached inodes? bail out */
 654	if (nr == 0) {
 655		atomic_dec(&isw_nr_in_flight);
 656		wb_put(isw->new_wb);
 657		kfree(isw);
 658		return restart;
 659	}
 660
 661	/*
 662	 * In addition to synchronizing among switchers, I_WB_SWITCH tells
 663	 * the RCU protected stat update paths to grab the i_page
 664	 * lock so that stat transfer can synchronize against them.
 665	 * Let's continue after I_WB_SWITCH is guaranteed to be visible.
 666	 */
 667	INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn);
 668	queue_rcu_work(isw_wq, &isw->work);
 669
 670	return restart;
 671}
 672
 673/**
 674 * wbc_attach_and_unlock_inode - associate wbc with target inode and unlock it
 675 * @wbc: writeback_control of interest
 676 * @inode: target inode
 677 *
 678 * @inode is locked and about to be written back under the control of @wbc.
 679 * Record @inode's writeback context into @wbc and unlock the i_lock.  On
 680 * writeback completion, wbc_detach_inode() should be called.  This is used
 681 * to track the cgroup writeback context.
 682 */
 683void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
 684				 struct inode *inode)
 685{
 686	if (!inode_cgwb_enabled(inode)) {
 687		spin_unlock(&inode->i_lock);
 688		return;
 689	}
 690
 691	wbc->wb = inode_to_wb(inode);
 692	wbc->inode = inode;
 693
 694	wbc->wb_id = wbc->wb->memcg_css->id;
 695	wbc->wb_lcand_id = inode->i_wb_frn_winner;
 696	wbc->wb_tcand_id = 0;
 697	wbc->wb_bytes = 0;
 698	wbc->wb_lcand_bytes = 0;
 699	wbc->wb_tcand_bytes = 0;
 700
 701	wb_get(wbc->wb);
 702	spin_unlock(&inode->i_lock);
 703
 704	/*
 705	 * A dying wb indicates that either the blkcg associated with the
 706	 * memcg changed or the associated memcg is dying.  In the first
 707	 * case, a replacement wb should already be available and we should
 708	 * refresh the wb immediately.  In the second case, trying to
 709	 * refresh will keep failing.
 710	 */
 711	if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css)))
 712		inode_switch_wbs(inode, wbc->wb_id);
 713}
 714EXPORT_SYMBOL_GPL(wbc_attach_and_unlock_inode);
 715
 716/**
 717 * wbc_detach_inode - disassociate wbc from inode and perform foreign detection
 718 * @wbc: writeback_control of the just finished writeback
 719 *
 720 * To be called after a writeback attempt of an inode finishes and undoes
 721 * wbc_attach_and_unlock_inode().  Can be called under any context.
 722 *
 723 * As concurrent write sharing of an inode is expected to be very rare and
 724 * memcg only tracks page ownership on first-use basis severely confining
 725 * the usefulness of such sharing, cgroup writeback tracks ownership
 726 * per-inode.  While the support for concurrent write sharing of an inode
 727 * is deemed unnecessary, an inode being written to by different cgroups at
 728 * different points in time is a lot more common, and, more importantly,
 729 * charging only by first-use can too readily lead to grossly incorrect
 730 * behaviors (single foreign page can lead to gigabytes of writeback to be
 731 * incorrectly attributed).
 732 *
 733 * To resolve this issue, cgroup writeback detects the majority dirtier of
 734 * an inode and transfers the ownership to it.  To avoid unnnecessary
 735 * oscillation, the detection mechanism keeps track of history and gives
 736 * out the switch verdict only if the foreign usage pattern is stable over
 737 * a certain amount of time and/or writeback attempts.
 738 *
 739 * On each writeback attempt, @wbc tries to detect the majority writer
 740 * using Boyer-Moore majority vote algorithm.  In addition to the byte
 741 * count from the majority voting, it also counts the bytes written for the
 742 * current wb and the last round's winner wb (max of last round's current
 743 * wb, the winner from two rounds ago, and the last round's majority
 744 * candidate).  Keeping track of the historical winner helps the algorithm
 745 * to semi-reliably detect the most active writer even when it's not the
 746 * absolute majority.
 747 *
 748 * Once the winner of the round is determined, whether the winner is
 749 * foreign or not and how much IO time the round consumed is recorded in
 750 * inode->i_wb_frn_history.  If the amount of recorded foreign IO time is
 751 * over a certain threshold, the switch verdict is given.
 752 */
 753void wbc_detach_inode(struct writeback_control *wbc)
 754{
 755	struct bdi_writeback *wb = wbc->wb;
 756	struct inode *inode = wbc->inode;
 757	unsigned long avg_time, max_bytes, max_time;
 758	u16 history;
 759	int max_id;
 760
 761	if (!wb)
 762		return;
 763
 764	history = inode->i_wb_frn_history;
 765	avg_time = inode->i_wb_frn_avg_time;
 766
 767	/* pick the winner of this round */
 768	if (wbc->wb_bytes >= wbc->wb_lcand_bytes &&
 769	    wbc->wb_bytes >= wbc->wb_tcand_bytes) {
 770		max_id = wbc->wb_id;
 771		max_bytes = wbc->wb_bytes;
 772	} else if (wbc->wb_lcand_bytes >= wbc->wb_tcand_bytes) {
 773		max_id = wbc->wb_lcand_id;
 774		max_bytes = wbc->wb_lcand_bytes;
 775	} else {
 776		max_id = wbc->wb_tcand_id;
 777		max_bytes = wbc->wb_tcand_bytes;
 778	}
 779
 780	/*
 781	 * Calculate the amount of IO time the winner consumed and fold it
 782	 * into the running average kept per inode.  If the consumed IO
 783	 * time is lower than avag / WB_FRN_TIME_CUT_DIV, ignore it for
 784	 * deciding whether to switch or not.  This is to prevent one-off
 785	 * small dirtiers from skewing the verdict.
 786	 */
 787	max_time = DIV_ROUND_UP((max_bytes >> PAGE_SHIFT) << WB_FRN_TIME_SHIFT,
 788				wb->avg_write_bandwidth);
 789	if (avg_time)
 790		avg_time += (max_time >> WB_FRN_TIME_AVG_SHIFT) -
 791			    (avg_time >> WB_FRN_TIME_AVG_SHIFT);
 792	else
 793		avg_time = max_time;	/* immediate catch up on first run */
 794
 795	if (max_time >= avg_time / WB_FRN_TIME_CUT_DIV) {
 796		int slots;
 797
 798		/*
 799		 * The switch verdict is reached if foreign wb's consume
 800		 * more than a certain proportion of IO time in a
 801		 * WB_FRN_TIME_PERIOD.  This is loosely tracked by 16 slot
 802		 * history mask where each bit represents one sixteenth of
 803		 * the period.  Determine the number of slots to shift into
 804		 * history from @max_time.
 805		 */
 806		slots = min(DIV_ROUND_UP(max_time, WB_FRN_HIST_UNIT),
 807			    (unsigned long)WB_FRN_HIST_MAX_SLOTS);
 808		history <<= slots;
 809		if (wbc->wb_id != max_id)
 810			history |= (1U << slots) - 1;
 811
 812		if (history)
 813			trace_inode_foreign_history(inode, wbc, history);
 814
 815		/*
 816		 * Switch if the current wb isn't the consistent winner.
 817		 * If there are multiple closely competing dirtiers, the
 818		 * inode may switch across them repeatedly over time, which
 819		 * is okay.  The main goal is avoiding keeping an inode on
 820		 * the wrong wb for an extended period of time.
 821		 */
 822		if (hweight32(history) > WB_FRN_HIST_THR_SLOTS)
 823			inode_switch_wbs(inode, max_id);
 824	}
 825
 826	/*
 827	 * Multiple instances of this function may race to update the
 828	 * following fields but we don't mind occassional inaccuracies.
 829	 */
 830	inode->i_wb_frn_winner = max_id;
 831	inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX);
 832	inode->i_wb_frn_history = history;
 833
 834	wb_put(wbc->wb);
 835	wbc->wb = NULL;
 836}
 837EXPORT_SYMBOL_GPL(wbc_detach_inode);
 838
 839/**
 840 * wbc_account_cgroup_owner - account writeback to update inode cgroup ownership
 841 * @wbc: writeback_control of the writeback in progress
 842 * @page: page being written out
 843 * @bytes: number of bytes being written out
 844 *
 845 * @bytes from @page are about to written out during the writeback
 846 * controlled by @wbc.  Keep the book for foreign inode detection.  See
 847 * wbc_detach_inode().
 848 */
 849void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
 850			      size_t bytes)
 851{
 852	struct cgroup_subsys_state *css;
 853	int id;
 854
 855	/*
 856	 * pageout() path doesn't attach @wbc to the inode being written
 857	 * out.  This is intentional as we don't want the function to block
 858	 * behind a slow cgroup.  Ultimately, we want pageout() to kick off
 859	 * regular writeback instead of writing things out itself.
 860	 */
 861	if (!wbc->wb || wbc->no_cgroup_owner)
 862		return;
 863
 864	css = mem_cgroup_css_from_page(page);
 865	/* dead cgroups shouldn't contribute to inode ownership arbitration */
 866	if (!(css->flags & CSS_ONLINE))
 867		return;
 868
 869	id = css->id;
 870
 871	if (id == wbc->wb_id) {
 872		wbc->wb_bytes += bytes;
 873		return;
 874	}
 875
 876	if (id == wbc->wb_lcand_id)
 877		wbc->wb_lcand_bytes += bytes;
 878
 879	/* Boyer-Moore majority vote algorithm */
 880	if (!wbc->wb_tcand_bytes)
 881		wbc->wb_tcand_id = id;
 882	if (id == wbc->wb_tcand_id)
 883		wbc->wb_tcand_bytes += bytes;
 884	else
 885		wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
 886}
 887EXPORT_SYMBOL_GPL(wbc_account_cgroup_owner);
 888
 889/**
 890 * inode_congested - test whether an inode is congested
 891 * @inode: inode to test for congestion (may be NULL)
 892 * @cong_bits: mask of WB_[a]sync_congested bits to test
 893 *
 894 * Tests whether @inode is congested.  @cong_bits is the mask of congestion
 895 * bits to test and the return value is the mask of set bits.
 896 *
 897 * If cgroup writeback is enabled for @inode, the congestion state is
 898 * determined by whether the cgwb (cgroup bdi_writeback) for the blkcg
 899 * associated with @inode is congested; otherwise, the root wb's congestion
 900 * state is used.
 901 *
 902 * @inode is allowed to be NULL as this function is often called on
 903 * mapping->host which is NULL for the swapper space.
 904 */
 905int inode_congested(struct inode *inode, int cong_bits)
 906{
 907	/*
 908	 * Once set, ->i_wb never becomes NULL while the inode is alive.
 909	 * Start transaction iff ->i_wb is visible.
 910	 */
 911	if (inode && inode_to_wb_is_valid(inode)) {
 912		struct bdi_writeback *wb;
 913		struct wb_lock_cookie lock_cookie = {};
 914		bool congested;
 915
 916		wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
 917		congested = wb_congested(wb, cong_bits);
 918		unlocked_inode_to_wb_end(inode, &lock_cookie);
 919		return congested;
 920	}
 921
 922	return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
 923}
 924EXPORT_SYMBOL_GPL(inode_congested);
 925
 926/**
 927 * wb_split_bdi_pages - split nr_pages to write according to bandwidth
 928 * @wb: target bdi_writeback to split @nr_pages to
 929 * @nr_pages: number of pages to write for the whole bdi
 930 *
 931 * Split @wb's portion of @nr_pages according to @wb's write bandwidth in
 932 * relation to the total write bandwidth of all wb's w/ dirty inodes on
 933 * @wb->bdi.
 934 */
 935static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
 936{
 937	unsigned long this_bw = wb->avg_write_bandwidth;
 938	unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
 939
 940	if (nr_pages == LONG_MAX)
 941		return LONG_MAX;
 942
 943	/*
 944	 * This may be called on clean wb's and proportional distribution
 945	 * may not make sense, just use the original @nr_pages in those
 946	 * cases.  In general, we wanna err on the side of writing more.
 947	 */
 948	if (!tot_bw || this_bw >= tot_bw)
 949		return nr_pages;
 950	else
 951		return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw);
 952}
 953
 954/**
 955 * bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi
 956 * @bdi: target backing_dev_info
 957 * @base_work: wb_writeback_work to issue
 958 * @skip_if_busy: skip wb's which already have writeback in progress
 959 *
 960 * Split and issue @base_work to all wb's (bdi_writeback's) of @bdi which
 961 * have dirty inodes.  If @base_work->nr_page isn't %LONG_MAX, it's
 962 * distributed to the busy wbs according to each wb's proportion in the
 963 * total active write bandwidth of @bdi.
 964 */
 965static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
 966				  struct wb_writeback_work *base_work,
 967				  bool skip_if_busy)
 968{
 969	struct bdi_writeback *last_wb = NULL;
 970	struct bdi_writeback *wb = list_entry(&bdi->wb_list,
 971					      struct bdi_writeback, bdi_node);
 972
 973	might_sleep();
 974restart:
 975	rcu_read_lock();
 976	list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
 977		DEFINE_WB_COMPLETION(fallback_work_done, bdi);
 978		struct wb_writeback_work fallback_work;
 979		struct wb_writeback_work *work;
 980		long nr_pages;
 981
 982		if (last_wb) {
 983			wb_put(last_wb);
 984			last_wb = NULL;
 985		}
 986
 987		/* SYNC_ALL writes out I_DIRTY_TIME too */
 988		if (!wb_has_dirty_io(wb) &&
 989		    (base_work->sync_mode == WB_SYNC_NONE ||
 990		     list_empty(&wb->b_dirty_time)))
 991			continue;
 992		if (skip_if_busy && writeback_in_progress(wb))
 993			continue;
 994
 995		nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages);
 996
 997		work = kmalloc(sizeof(*work), GFP_ATOMIC);
 998		if (work) {
 999			*work = *base_work;
1000			work->nr_pages = nr_pages;
1001			work->auto_free = 1;
1002			wb_queue_work(wb, work);
1003			continue;
1004		}
1005
1006		/* alloc failed, execute synchronously using on-stack fallback */
1007		work = &fallback_work;
1008		*work = *base_work;
1009		work->nr_pages = nr_pages;
1010		work->auto_free = 0;
1011		work->done = &fallback_work_done;
1012
1013		wb_queue_work(wb, work);
1014
1015		/*
1016		 * Pin @wb so that it stays on @bdi->wb_list.  This allows
1017		 * continuing iteration from @wb after dropping and
1018		 * regrabbing rcu read lock.
1019		 */
1020		wb_get(wb);
1021		last_wb = wb;
1022
1023		rcu_read_unlock();
1024		wb_wait_for_completion(&fallback_work_done);
1025		goto restart;
1026	}
1027	rcu_read_unlock();
1028
1029	if (last_wb)
1030		wb_put(last_wb);
1031}
1032
1033/**
1034 * cgroup_writeback_by_id - initiate cgroup writeback from bdi and memcg IDs
1035 * @bdi_id: target bdi id
1036 * @memcg_id: target memcg css id
1037 * @nr: number of pages to write, 0 for best-effort dirty flushing
1038 * @reason: reason why some writeback work initiated
1039 * @done: target wb_completion
1040 *
1041 * Initiate flush of the bdi_writeback identified by @bdi_id and @memcg_id
1042 * with the specified parameters.
1043 */
1044int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, unsigned long nr,
1045			   enum wb_reason reason, struct wb_completion *done)
1046{
1047	struct backing_dev_info *bdi;
1048	struct cgroup_subsys_state *memcg_css;
1049	struct bdi_writeback *wb;
1050	struct wb_writeback_work *work;
1051	int ret;
1052
1053	/* lookup bdi and memcg */
1054	bdi = bdi_get_by_id(bdi_id);
1055	if (!bdi)
1056		return -ENOENT;
1057
1058	rcu_read_lock();
1059	memcg_css = css_from_id(memcg_id, &memory_cgrp_subsys);
1060	if (memcg_css && !css_tryget(memcg_css))
1061		memcg_css = NULL;
1062	rcu_read_unlock();
1063	if (!memcg_css) {
1064		ret = -ENOENT;
1065		goto out_bdi_put;
1066	}
1067
1068	/*
1069	 * And find the associated wb.  If the wb isn't there already
1070	 * there's nothing to flush, don't create one.
1071	 */
1072	wb = wb_get_lookup(bdi, memcg_css);
1073	if (!wb) {
1074		ret = -ENOENT;
1075		goto out_css_put;
1076	}
1077
1078	/*
1079	 * If @nr is zero, the caller is attempting to write out most of
1080	 * the currently dirty pages.  Let's take the current dirty page
1081	 * count and inflate it by 25% which should be large enough to
1082	 * flush out most dirty pages while avoiding getting livelocked by
1083	 * concurrent dirtiers.
1084	 */
1085	if (!nr) {
1086		unsigned long filepages, headroom, dirty, writeback;
1087
1088		mem_cgroup_wb_stats(wb, &filepages, &headroom, &dirty,
1089				      &writeback);
1090		nr = dirty * 10 / 8;
1091	}
1092
1093	/* issue the writeback work */
1094	work = kzalloc(sizeof(*work), GFP_NOWAIT | __GFP_NOWARN);
1095	if (work) {
1096		work->nr_pages = nr;
1097		work->sync_mode = WB_SYNC_NONE;
1098		work->range_cyclic = 1;
1099		work->reason = reason;
1100		work->done = done;
1101		work->auto_free = 1;
1102		wb_queue_work(wb, work);
1103		ret = 0;
1104	} else {
1105		ret = -ENOMEM;
1106	}
1107
1108	wb_put(wb);
1109out_css_put:
1110	css_put(memcg_css);
1111out_bdi_put:
1112	bdi_put(bdi);
1113	return ret;
1114}
1115
1116/**
1117 * cgroup_writeback_umount - flush inode wb switches for umount
1118 *
1119 * This function is called when a super_block is about to be destroyed and
1120 * flushes in-flight inode wb switches.  An inode wb switch goes through
1121 * RCU and then workqueue, so the two need to be flushed in order to ensure
1122 * that all previously scheduled switches are finished.  As wb switches are
1123 * rare occurrences and synchronize_rcu() can take a while, perform
1124 * flushing iff wb switches are in flight.
1125 */
1126void cgroup_writeback_umount(void)
1127{
1128	/*
1129	 * SB_ACTIVE should be reliably cleared before checking
1130	 * isw_nr_in_flight, see generic_shutdown_super().
1131	 */
1132	smp_mb();
1133
1134	if (atomic_read(&isw_nr_in_flight)) {
1135		/*
1136		 * Use rcu_barrier() to wait for all pending callbacks to
1137		 * ensure that all in-flight wb switches are in the workqueue.
1138		 */
1139		rcu_barrier();
1140		flush_workqueue(isw_wq);
1141	}
1142}
1143
1144static int __init cgroup_writeback_init(void)
1145{
1146	isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0);
1147	if (!isw_wq)
1148		return -ENOMEM;
1149	return 0;
1150}
1151fs_initcall(cgroup_writeback_init);
1152
1153#else	/* CONFIG_CGROUP_WRITEBACK */
1154
1155static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
1156static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
1157
1158static void inode_cgwb_move_to_attached(struct inode *inode,
1159					struct bdi_writeback *wb)
1160{
1161	assert_spin_locked(&wb->list_lock);
1162	assert_spin_locked(&inode->i_lock);
1163
1164	inode->i_state &= ~I_SYNC_QUEUED;
1165	list_del_init(&inode->i_io_list);
1166	wb_io_lists_depopulated(wb);
1167}
1168
1169static struct bdi_writeback *
1170locked_inode_to_wb_and_lock_list(struct inode *inode)
1171	__releases(&inode->i_lock)
1172	__acquires(&wb->list_lock)
1173{
1174	struct bdi_writeback *wb = inode_to_wb(inode);
1175
1176	spin_unlock(&inode->i_lock);
1177	spin_lock(&wb->list_lock);
1178	return wb;
1179}
1180
1181static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
1182	__acquires(&wb->list_lock)
1183{
1184	struct bdi_writeback *wb = inode_to_wb(inode);
1185
1186	spin_lock(&wb->list_lock);
1187	return wb;
1188}
1189
1190static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
1191{
1192	return nr_pages;
1193}
1194
1195static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
1196				  struct wb_writeback_work *base_work,
1197				  bool skip_if_busy)
1198{
1199	might_sleep();
1200
1201	if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) {
1202		base_work->auto_free = 0;
1203		wb_queue_work(&bdi->wb, base_work);
1204	}
1205}
1206
1207#endif	/* CONFIG_CGROUP_WRITEBACK */
1208
1209/*
1210 * Add in the number of potentially dirty inodes, because each inode
1211 * write can dirty pagecache in the underlying blockdev.
1212 */
1213static unsigned long get_nr_dirty_pages(void)
1214{
1215	return global_node_page_state(NR_FILE_DIRTY) +
1216		get_nr_dirty_inodes();
1217}
1218
1219static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason)
1220{
1221	if (!wb_has_dirty_io(wb))
1222		return;
1223
1224	/*
1225	 * All callers of this function want to start writeback of all
1226	 * dirty pages. Places like vmscan can call this at a very
1227	 * high frequency, causing pointless allocations of tons of
1228	 * work items and keeping the flusher threads busy retrieving
1229	 * that work. Ensure that we only allow one of them pending and
1230	 * inflight at the time.
1231	 */
1232	if (test_bit(WB_start_all, &wb->state) ||
1233	    test_and_set_bit(WB_start_all, &wb->state))
1234		return;
1235
1236	wb->start_all_reason = reason;
1237	wb_wakeup(wb);
1238}
1239
1240/**
1241 * wb_start_background_writeback - start background writeback
1242 * @wb: bdi_writback to write from
1243 *
1244 * Description:
1245 *   This makes sure WB_SYNC_NONE background writeback happens. When
1246 *   this function returns, it is only guaranteed that for given wb
1247 *   some IO is happening if we are over background dirty threshold.
1248 *   Caller need not hold sb s_umount semaphore.
1249 */
1250void wb_start_background_writeback(struct bdi_writeback *wb)
1251{
1252	/*
1253	 * We just wake up the flusher thread. It will perform background
1254	 * writeback as soon as there is no other work to do.
1255	 */
1256	trace_writeback_wake_background(wb);
1257	wb_wakeup(wb);
1258}
1259
1260/*
1261 * Remove the inode from the writeback list it is on.
1262 */
1263void inode_io_list_del(struct inode *inode)
1264{
1265	struct bdi_writeback *wb;
1266
1267	wb = inode_to_wb_and_lock_list(inode);
1268	spin_lock(&inode->i_lock);
1269
1270	inode->i_state &= ~I_SYNC_QUEUED;
1271	list_del_init(&inode->i_io_list);
1272	wb_io_lists_depopulated(wb);
1273
1274	spin_unlock(&inode->i_lock);
1275	spin_unlock(&wb->list_lock);
1276}
1277EXPORT_SYMBOL(inode_io_list_del);
1278
1279/*
1280 * mark an inode as under writeback on the sb
1281 */
1282void sb_mark_inode_writeback(struct inode *inode)
1283{
1284	struct super_block *sb = inode->i_sb;
1285	unsigned long flags;
1286
1287	if (list_empty(&inode->i_wb_list)) {
1288		spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
1289		if (list_empty(&inode->i_wb_list)) {
1290			list_add_tail(&inode->i_wb_list, &sb->s_inodes_wb);
1291			trace_sb_mark_inode_writeback(inode);
1292		}
1293		spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
1294	}
1295}
1296
1297/*
1298 * clear an inode as under writeback on the sb
1299 */
1300void sb_clear_inode_writeback(struct inode *inode)
1301{
1302	struct super_block *sb = inode->i_sb;
1303	unsigned long flags;
1304
1305	if (!list_empty(&inode->i_wb_list)) {
1306		spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
1307		if (!list_empty(&inode->i_wb_list)) {
1308			list_del_init(&inode->i_wb_list);
1309			trace_sb_clear_inode_writeback(inode);
1310		}
1311		spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
1312	}
1313}
1314
1315/*
1316 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
1317 * furthest end of its superblock's dirty-inode list.
1318 *
1319 * Before stamping the inode's ->dirtied_when, we check to see whether it is
1320 * already the most-recently-dirtied inode on the b_dirty list.  If that is
1321 * the case then the inode must have been redirtied while it was being written
1322 * out and we don't reset its dirtied_when.
1323 */
1324static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
1325{
1326	assert_spin_locked(&inode->i_lock);
1327
1328	if (!list_empty(&wb->b_dirty)) {
1329		struct inode *tail;
1330
1331		tail = wb_inode(wb->b_dirty.next);
1332		if (time_before(inode->dirtied_when, tail->dirtied_when))
1333			inode->dirtied_when = jiffies;
1334	}
1335	inode_io_list_move_locked(inode, wb, &wb->b_dirty);
1336	inode->i_state &= ~I_SYNC_QUEUED;
1337}
1338
1339static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
1340{
1341	spin_lock(&inode->i_lock);
1342	redirty_tail_locked(inode, wb);
1343	spin_unlock(&inode->i_lock);
1344}
1345
1346/*
1347 * requeue inode for re-scanning after bdi->b_io list is exhausted.
1348 */
1349static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
1350{
1351	inode_io_list_move_locked(inode, wb, &wb->b_more_io);
1352}
1353
1354static void inode_sync_complete(struct inode *inode)
1355{
1356	inode->i_state &= ~I_SYNC;
1357	/* If inode is clean an unused, put it into LRU now... */
1358	inode_add_lru(inode);
1359	/* Waiters must see I_SYNC cleared before being woken up */
1360	smp_mb();
1361	wake_up_bit(&inode->i_state, __I_SYNC);
1362}
1363
1364static bool inode_dirtied_after(struct inode *inode, unsigned long t)
1365{
1366	bool ret = time_after(inode->dirtied_when, t);
1367#ifndef CONFIG_64BIT
1368	/*
1369	 * For inodes being constantly redirtied, dirtied_when can get stuck.
1370	 * It _appears_ to be in the future, but is actually in distant past.
1371	 * This test is necessary to prevent such wrapped-around relative times
1372	 * from permanently stopping the whole bdi writeback.
1373	 */
1374	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
1375#endif
1376	return ret;
1377}
1378
1379#define EXPIRE_DIRTY_ATIME 0x0001
1380
1381/*
1382 * Move expired (dirtied before dirtied_before) dirty inodes from
1383 * @delaying_queue to @dispatch_queue.
1384 */
1385static int move_expired_inodes(struct list_head *delaying_queue,
1386			       struct list_head *dispatch_queue,
1387			       unsigned long dirtied_before)
1388{
1389	LIST_HEAD(tmp);
1390	struct list_head *pos, *node;
1391	struct super_block *sb = NULL;
1392	struct inode *inode;
1393	int do_sb_sort = 0;
1394	int moved = 0;
1395
1396	while (!list_empty(delaying_queue)) {
1397		inode = wb_inode(delaying_queue->prev);
1398		if (inode_dirtied_after(inode, dirtied_before))
1399			break;
1400		list_move(&inode->i_io_list, &tmp);
1401		moved++;
1402		spin_lock(&inode->i_lock);
1403		inode->i_state |= I_SYNC_QUEUED;
1404		spin_unlock(&inode->i_lock);
1405		if (sb_is_blkdev_sb(inode->i_sb))
1406			continue;
1407		if (sb && sb != inode->i_sb)
1408			do_sb_sort = 1;
1409		sb = inode->i_sb;
1410	}
1411
1412	/* just one sb in list, splice to dispatch_queue and we're done */
1413	if (!do_sb_sort) {
1414		list_splice(&tmp, dispatch_queue);
1415		goto out;
1416	}
1417
1418	/* Move inodes from one superblock together */
1419	while (!list_empty(&tmp)) {
1420		sb = wb_inode(tmp.prev)->i_sb;
1421		list_for_each_prev_safe(pos, node, &tmp) {
1422			inode = wb_inode(pos);
1423			if (inode->i_sb == sb)
1424				list_move(&inode->i_io_list, dispatch_queue);
1425		}
1426	}
1427out:
1428	return moved;
1429}
1430
1431/*
1432 * Queue all expired dirty inodes for io, eldest first.
1433 * Before
1434 *         newly dirtied     b_dirty    b_io    b_more_io
1435 *         =============>    gf         edc     BA
1436 * After
1437 *         newly dirtied     b_dirty    b_io    b_more_io
1438 *         =============>    g          fBAedc
1439 *                                           |
1440 *                                           +--> dequeue for IO
1441 */
1442static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work,
1443		     unsigned long dirtied_before)
1444{
1445	int moved;
1446	unsigned long time_expire_jif = dirtied_before;
1447
1448	assert_spin_locked(&wb->list_lock);
1449	list_splice_init(&wb->b_more_io, &wb->b_io);
1450	moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before);
1451	if (!work->for_sync)
1452		time_expire_jif = jiffies - dirtytime_expire_interval * HZ;
1453	moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
1454				     time_expire_jif);
1455	if (moved)
1456		wb_io_lists_populated(wb);
1457	trace_writeback_queue_io(wb, work, dirtied_before, moved);
1458}
1459
1460static int write_inode(struct inode *inode, struct writeback_control *wbc)
1461{
1462	int ret;
1463
1464	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
1465		trace_writeback_write_inode_start(inode, wbc);
1466		ret = inode->i_sb->s_op->write_inode(inode, wbc);
1467		trace_writeback_write_inode(inode, wbc);
1468		return ret;
1469	}
1470	return 0;
1471}
1472
1473/*
1474 * Wait for writeback on an inode to complete. Called with i_lock held.
1475 * Caller must make sure inode cannot go away when we drop i_lock.
1476 */
1477static void __inode_wait_for_writeback(struct inode *inode)
1478	__releases(inode->i_lock)
1479	__acquires(inode->i_lock)
1480{
1481	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
1482	wait_queue_head_t *wqh;
1483
1484	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
1485	while (inode->i_state & I_SYNC) {
1486		spin_unlock(&inode->i_lock);
1487		__wait_on_bit(wqh, &wq, bit_wait,
1488			      TASK_UNINTERRUPTIBLE);
1489		spin_lock(&inode->i_lock);
1490	}
1491}
1492
1493/*
1494 * Wait for writeback on an inode to complete. Caller must have inode pinned.
1495 */
1496void inode_wait_for_writeback(struct inode *inode)
1497{
1498	spin_lock(&inode->i_lock);
1499	__inode_wait_for_writeback(inode);
1500	spin_unlock(&inode->i_lock);
1501}
1502
1503/*
1504 * Sleep until I_SYNC is cleared. This function must be called with i_lock
1505 * held and drops it. It is aimed for callers not holding any inode reference
1506 * so once i_lock is dropped, inode can go away.
1507 */
1508static void inode_sleep_on_writeback(struct inode *inode)
1509	__releases(inode->i_lock)
1510{
1511	DEFINE_WAIT(wait);
1512	wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
1513	int sleep;
1514
1515	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1516	sleep = inode->i_state & I_SYNC;
1517	spin_unlock(&inode->i_lock);
1518	if (sleep)
1519		schedule();
1520	finish_wait(wqh, &wait);
1521}
1522
1523/*
1524 * Find proper writeback list for the inode depending on its current state and
1525 * possibly also change of its state while we were doing writeback.  Here we
1526 * handle things such as livelock prevention or fairness of writeback among
1527 * inodes. This function can be called only by flusher thread - noone else
1528 * processes all inodes in writeback lists and requeueing inodes behind flusher
1529 * thread's back can have unexpected consequences.
1530 */
1531static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
1532			  struct writeback_control *wbc)
1533{
1534	if (inode->i_state & I_FREEING)
1535		return;
1536
1537	/*
1538	 * Sync livelock prevention. Each inode is tagged and synced in one
1539	 * shot. If still dirty, it will be redirty_tail()'ed below.  Update
1540	 * the dirty time to prevent enqueue and sync it again.
1541	 */
1542	if ((inode->i_state & I_DIRTY) &&
1543	    (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
1544		inode->dirtied_when = jiffies;
1545
1546	if (wbc->pages_skipped) {
1547		/*
1548		 * writeback is not making progress due to locked
1549		 * buffers. Skip this inode for now.
1550		 */
1551		redirty_tail_locked(inode, wb);
1552		return;
1553	}
1554
1555	if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
1556		/*
1557		 * We didn't write back all the pages.  nfs_writepages()
1558		 * sometimes bales out without doing anything.
1559		 */
1560		if (wbc->nr_to_write <= 0) {
1561			/* Slice used up. Queue for next turn. */
1562			requeue_io(inode, wb);
1563		} else {
1564			/*
1565			 * Writeback blocked by something other than
1566			 * congestion. Delay the inode for some time to
1567			 * avoid spinning on the CPU (100% iowait)
1568			 * retrying writeback of the dirty page/inode
1569			 * that cannot be performed immediately.
1570			 */
1571			redirty_tail_locked(inode, wb);
1572		}
1573	} else if (inode->i_state & I_DIRTY) {
1574		/*
1575		 * Filesystems can dirty the inode during writeback operations,
1576		 * such as delayed allocation during submission or metadata
1577		 * updates after data IO completion.
1578		 */
1579		redirty_tail_locked(inode, wb);
1580	} else if (inode->i_state & I_DIRTY_TIME) {
1581		inode->dirtied_when = jiffies;
1582		inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
1583		inode->i_state &= ~I_SYNC_QUEUED;
1584	} else {
1585		/* The inode is clean. Remove from writeback lists. */
1586		inode_cgwb_move_to_attached(inode, wb);
1587	}
1588}
1589
1590/*
1591 * Write out an inode and its dirty pages (or some of its dirty pages, depending
1592 * on @wbc->nr_to_write), and clear the relevant dirty flags from i_state.
1593 *
1594 * This doesn't remove the inode from the writeback list it is on, except
1595 * potentially to move it from b_dirty_time to b_dirty due to timestamp
1596 * expiration.  The caller is otherwise responsible for writeback list handling.
1597 *
1598 * The caller is also responsible for setting the I_SYNC flag beforehand and
1599 * calling inode_sync_complete() to clear it afterwards.
1600 */
1601static int
1602__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
1603{
1604	struct address_space *mapping = inode->i_mapping;
1605	long nr_to_write = wbc->nr_to_write;
1606	unsigned dirty;
1607	int ret;
1608
1609	WARN_ON(!(inode->i_state & I_SYNC));
1610
1611	trace_writeback_single_inode_start(inode, wbc, nr_to_write);
1612
1613	ret = do_writepages(mapping, wbc);
1614
1615	/*
1616	 * Make sure to wait on the data before writing out the metadata.
1617	 * This is important for filesystems that modify metadata on data
1618	 * I/O completion. We don't do it for sync(2) writeback because it has a
1619	 * separate, external IO completion path and ->sync_fs for guaranteeing
1620	 * inode metadata is written back correctly.
1621	 */
1622	if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
1623		int err = filemap_fdatawait(mapping);
1624		if (ret == 0)
1625			ret = err;
1626	}
1627
1628	/*
1629	 * If the inode has dirty timestamps and we need to write them, call
1630	 * mark_inode_dirty_sync() to notify the filesystem about it and to
1631	 * change I_DIRTY_TIME into I_DIRTY_SYNC.
1632	 */
 
 
 
1633	if ((inode->i_state & I_DIRTY_TIME) &&
1634	    (wbc->sync_mode == WB_SYNC_ALL ||
 
1635	     time_after(jiffies, inode->dirtied_time_when +
1636			dirtytime_expire_interval * HZ))) {
 
1637		trace_writeback_lazytime(inode);
1638		mark_inode_dirty_sync(inode);
1639	}
1640
1641	/*
1642	 * Get and clear the dirty flags from i_state.  This needs to be done
1643	 * after calling writepages because some filesystems may redirty the
1644	 * inode during writepages due to delalloc.  It also needs to be done
1645	 * after handling timestamp expiration, as that may dirty the inode too.
1646	 */
1647	spin_lock(&inode->i_lock);
1648	dirty = inode->i_state & I_DIRTY;
1649	inode->i_state &= ~dirty;
1650
1651	/*
1652	 * Paired with smp_mb() in __mark_inode_dirty().  This allows
1653	 * __mark_inode_dirty() to test i_state without grabbing i_lock -
1654	 * either they see the I_DIRTY bits cleared or we see the dirtied
1655	 * inode.
1656	 *
1657	 * I_DIRTY_PAGES is always cleared together above even if @mapping
1658	 * still has dirty pages.  The flag is reinstated after smp_mb() if
1659	 * necessary.  This guarantees that either __mark_inode_dirty()
1660	 * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
1661	 */
1662	smp_mb();
1663
1664	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
1665		inode->i_state |= I_DIRTY_PAGES;
1666
1667	spin_unlock(&inode->i_lock);
1668
 
 
1669	/* Don't write the inode if only I_DIRTY_PAGES was set */
1670	if (dirty & ~I_DIRTY_PAGES) {
1671		int err = write_inode(inode, wbc);
1672		if (ret == 0)
1673			ret = err;
1674	}
1675	trace_writeback_single_inode(inode, wbc, nr_to_write);
1676	return ret;
1677}
1678
1679/*
1680 * Write out an inode's dirty data and metadata on-demand, i.e. separately from
1681 * the regular batched writeback done by the flusher threads in
1682 * writeback_sb_inodes().  @wbc controls various aspects of the write, such as
1683 * whether it is a data-integrity sync (%WB_SYNC_ALL) or not (%WB_SYNC_NONE).
1684 *
1685 * To prevent the inode from going away, either the caller must have a reference
1686 * to the inode, or the inode must have I_WILL_FREE or I_FREEING set.
 
1687 */
1688static int writeback_single_inode(struct inode *inode,
1689				  struct writeback_control *wbc)
1690{
1691	struct bdi_writeback *wb;
1692	int ret = 0;
1693
1694	spin_lock(&inode->i_lock);
1695	if (!atomic_read(&inode->i_count))
1696		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
1697	else
1698		WARN_ON(inode->i_state & I_WILL_FREE);
1699
1700	if (inode->i_state & I_SYNC) {
 
 
1701		/*
1702		 * Writeback is already running on the inode.  For WB_SYNC_NONE,
1703		 * that's enough and we can just return.  For WB_SYNC_ALL, we
1704		 * must wait for the existing writeback to complete, then do
1705		 * writeback again if there's anything left.
1706		 */
1707		if (wbc->sync_mode != WB_SYNC_ALL)
1708			goto out;
1709		__inode_wait_for_writeback(inode);
1710	}
1711	WARN_ON(inode->i_state & I_SYNC);
1712	/*
1713	 * If the inode is already fully clean, then there's nothing to do.
1714	 *
1715	 * For data-integrity syncs we also need to check whether any pages are
1716	 * still under writeback, e.g. due to prior WB_SYNC_NONE writeback.  If
1717	 * there are any such pages, we'll need to wait for them.
 
1718	 */
1719	if (!(inode->i_state & I_DIRTY_ALL) &&
1720	    (wbc->sync_mode != WB_SYNC_ALL ||
1721	     !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
1722		goto out;
1723	inode->i_state |= I_SYNC;
1724	wbc_attach_and_unlock_inode(wbc, inode);
1725
1726	ret = __writeback_single_inode(inode, wbc);
1727
1728	wbc_detach_inode(wbc);
1729
1730	wb = inode_to_wb_and_lock_list(inode);
1731	spin_lock(&inode->i_lock);
1732	/*
1733	 * If the inode is now fully clean, then it can be safely removed from
1734	 * its writeback list (if any).  Otherwise the flusher threads are
1735	 * responsible for the writeback lists.
1736	 */
1737	if (!(inode->i_state & I_DIRTY_ALL))
1738		inode_cgwb_move_to_attached(inode, wb);
1739	spin_unlock(&wb->list_lock);
1740	inode_sync_complete(inode);
1741out:
1742	spin_unlock(&inode->i_lock);
1743	return ret;
1744}
1745
1746static long writeback_chunk_size(struct bdi_writeback *wb,
1747				 struct wb_writeback_work *work)
1748{
1749	long pages;
1750
1751	/*
1752	 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
1753	 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
1754	 * here avoids calling into writeback_inodes_wb() more than once.
1755	 *
1756	 * The intended call sequence for WB_SYNC_ALL writeback is:
1757	 *
1758	 *      wb_writeback()
1759	 *          writeback_sb_inodes()       <== called only once
1760	 *              write_cache_pages()     <== called once for each inode
1761	 *                   (quickly) tag currently dirty pages
1762	 *                   (maybe slowly) sync all tagged pages
1763	 */
1764	if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
1765		pages = LONG_MAX;
1766	else {
1767		pages = min(wb->avg_write_bandwidth / 2,
1768			    global_wb_domain.dirty_limit / DIRTY_SCOPE);
1769		pages = min(pages, work->nr_pages);
1770		pages = round_down(pages + MIN_WRITEBACK_PAGES,
1771				   MIN_WRITEBACK_PAGES);
1772	}
1773
1774	return pages;
1775}
1776
1777/*
1778 * Write a portion of b_io inodes which belong to @sb.
1779 *
1780 * Return the number of pages and/or inodes written.
1781 *
1782 * NOTE! This is called with wb->list_lock held, and will
1783 * unlock and relock that for each inode it ends up doing
1784 * IO for.
1785 */
1786static long writeback_sb_inodes(struct super_block *sb,
1787				struct bdi_writeback *wb,
1788				struct wb_writeback_work *work)
1789{
1790	struct writeback_control wbc = {
1791		.sync_mode		= work->sync_mode,
1792		.tagged_writepages	= work->tagged_writepages,
1793		.for_kupdate		= work->for_kupdate,
1794		.for_background		= work->for_background,
1795		.for_sync		= work->for_sync,
1796		.range_cyclic		= work->range_cyclic,
1797		.range_start		= 0,
1798		.range_end		= LLONG_MAX,
1799	};
1800	unsigned long start_time = jiffies;
1801	long write_chunk;
1802	long wrote = 0;  /* count both pages and inodes */
1803
1804	while (!list_empty(&wb->b_io)) {
1805		struct inode *inode = wb_inode(wb->b_io.prev);
1806		struct bdi_writeback *tmp_wb;
1807
1808		if (inode->i_sb != sb) {
1809			if (work->sb) {
1810				/*
1811				 * We only want to write back data for this
1812				 * superblock, move all inodes not belonging
1813				 * to it back onto the dirty list.
1814				 */
1815				redirty_tail(inode, wb);
1816				continue;
1817			}
1818
1819			/*
1820			 * The inode belongs to a different superblock.
1821			 * Bounce back to the caller to unpin this and
1822			 * pin the next superblock.
1823			 */
1824			break;
1825		}
1826
1827		/*
1828		 * Don't bother with new inodes or inodes being freed, first
1829		 * kind does not need periodic writeout yet, and for the latter
1830		 * kind writeout is handled by the freer.
1831		 */
1832		spin_lock(&inode->i_lock);
1833		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
1834			redirty_tail_locked(inode, wb);
1835			spin_unlock(&inode->i_lock);
1836			continue;
1837		}
1838		if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
1839			/*
1840			 * If this inode is locked for writeback and we are not
1841			 * doing writeback-for-data-integrity, move it to
1842			 * b_more_io so that writeback can proceed with the
1843			 * other inodes on s_io.
1844			 *
1845			 * We'll have another go at writing back this inode
1846			 * when we completed a full scan of b_io.
1847			 */
1848			spin_unlock(&inode->i_lock);
1849			requeue_io(inode, wb);
1850			trace_writeback_sb_inodes_requeue(inode);
1851			continue;
1852		}
1853		spin_unlock(&wb->list_lock);
1854
1855		/*
1856		 * We already requeued the inode if it had I_SYNC set and we
1857		 * are doing WB_SYNC_NONE writeback. So this catches only the
1858		 * WB_SYNC_ALL case.
1859		 */
1860		if (inode->i_state & I_SYNC) {
1861			/* Wait for I_SYNC. This function drops i_lock... */
1862			inode_sleep_on_writeback(inode);
1863			/* Inode may be gone, start again */
1864			spin_lock(&wb->list_lock);
1865			continue;
1866		}
1867		inode->i_state |= I_SYNC;
1868		wbc_attach_and_unlock_inode(&wbc, inode);
1869
1870		write_chunk = writeback_chunk_size(wb, work);
1871		wbc.nr_to_write = write_chunk;
1872		wbc.pages_skipped = 0;
1873
1874		/*
1875		 * We use I_SYNC to pin the inode in memory. While it is set
1876		 * evict_inode() will wait so the inode cannot be freed.
1877		 */
1878		__writeback_single_inode(inode, &wbc);
1879
1880		wbc_detach_inode(&wbc);
1881		work->nr_pages -= write_chunk - wbc.nr_to_write;
1882		wrote += write_chunk - wbc.nr_to_write;
1883
1884		if (need_resched()) {
1885			/*
1886			 * We're trying to balance between building up a nice
1887			 * long list of IOs to improve our merge rate, and
1888			 * getting those IOs out quickly for anyone throttling
1889			 * in balance_dirty_pages().  cond_resched() doesn't
1890			 * unplug, so get our IOs out the door before we
1891			 * give up the CPU.
1892			 */
1893			blk_flush_plug(current);
1894			cond_resched();
1895		}
1896
1897		/*
1898		 * Requeue @inode if still dirty.  Be careful as @inode may
1899		 * have been switched to another wb in the meantime.
1900		 */
1901		tmp_wb = inode_to_wb_and_lock_list(inode);
1902		spin_lock(&inode->i_lock);
1903		if (!(inode->i_state & I_DIRTY_ALL))
1904			wrote++;
1905		requeue_inode(inode, tmp_wb, &wbc);
1906		inode_sync_complete(inode);
1907		spin_unlock(&inode->i_lock);
1908
1909		if (unlikely(tmp_wb != wb)) {
1910			spin_unlock(&tmp_wb->list_lock);
1911			spin_lock(&wb->list_lock);
1912		}
1913
1914		/*
1915		 * bail out to wb_writeback() often enough to check
1916		 * background threshold and other termination conditions.
1917		 */
1918		if (wrote) {
1919			if (time_is_before_jiffies(start_time + HZ / 10UL))
1920				break;
1921			if (work->nr_pages <= 0)
1922				break;
1923		}
1924	}
1925	return wrote;
1926}
1927
1928static long __writeback_inodes_wb(struct bdi_writeback *wb,
1929				  struct wb_writeback_work *work)
1930{
1931	unsigned long start_time = jiffies;
1932	long wrote = 0;
1933
1934	while (!list_empty(&wb->b_io)) {
1935		struct inode *inode = wb_inode(wb->b_io.prev);
1936		struct super_block *sb = inode->i_sb;
1937
1938		if (!trylock_super(sb)) {
1939			/*
1940			 * trylock_super() may fail consistently due to
1941			 * s_umount being grabbed by someone else. Don't use
1942			 * requeue_io() to avoid busy retrying the inode/sb.
1943			 */
1944			redirty_tail(inode, wb);
1945			continue;
1946		}
1947		wrote += writeback_sb_inodes(sb, wb, work);
1948		up_read(&sb->s_umount);
1949
1950		/* refer to the same tests at the end of writeback_sb_inodes */
1951		if (wrote) {
1952			if (time_is_before_jiffies(start_time + HZ / 10UL))
1953				break;
1954			if (work->nr_pages <= 0)
1955				break;
1956		}
1957	}
1958	/* Leave any unwritten inodes on b_io */
1959	return wrote;
1960}
1961
1962static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
1963				enum wb_reason reason)
1964{
1965	struct wb_writeback_work work = {
1966		.nr_pages	= nr_pages,
1967		.sync_mode	= WB_SYNC_NONE,
1968		.range_cyclic	= 1,
1969		.reason		= reason,
1970	};
1971	struct blk_plug plug;
1972
1973	blk_start_plug(&plug);
1974	spin_lock(&wb->list_lock);
1975	if (list_empty(&wb->b_io))
1976		queue_io(wb, &work, jiffies);
1977	__writeback_inodes_wb(wb, &work);
1978	spin_unlock(&wb->list_lock);
1979	blk_finish_plug(&plug);
1980
1981	return nr_pages - work.nr_pages;
1982}
1983
1984/*
1985 * Explicit flushing or periodic writeback of "old" data.
1986 *
1987 * Define "old": the first time one of an inode's pages is dirtied, we mark the
1988 * dirtying-time in the inode's address_space.  So this periodic writeback code
1989 * just walks the superblock inode list, writing back any inodes which are
1990 * older than a specific point in time.
1991 *
1992 * Try to run once per dirty_writeback_interval.  But if a writeback event
1993 * takes longer than a dirty_writeback_interval interval, then leave a
1994 * one-second gap.
1995 *
1996 * dirtied_before takes precedence over nr_to_write.  So we'll only write back
1997 * all dirty pages if they are all attached to "old" mappings.
1998 */
1999static long wb_writeback(struct bdi_writeback *wb,
2000			 struct wb_writeback_work *work)
2001{
2002	unsigned long wb_start = jiffies;
2003	long nr_pages = work->nr_pages;
2004	unsigned long dirtied_before = jiffies;
2005	struct inode *inode;
2006	long progress;
2007	struct blk_plug plug;
2008
2009	blk_start_plug(&plug);
2010	spin_lock(&wb->list_lock);
2011	for (;;) {
2012		/*
2013		 * Stop writeback when nr_pages has been consumed
2014		 */
2015		if (work->nr_pages <= 0)
2016			break;
2017
2018		/*
2019		 * Background writeout and kupdate-style writeback may
2020		 * run forever. Stop them if there is other work to do
2021		 * so that e.g. sync can proceed. They'll be restarted
2022		 * after the other works are all done.
2023		 */
2024		if ((work->for_background || work->for_kupdate) &&
2025		    !list_empty(&wb->work_list))
2026			break;
2027
2028		/*
2029		 * For background writeout, stop when we are below the
2030		 * background dirty threshold
2031		 */
2032		if (work->for_background && !wb_over_bg_thresh(wb))
2033			break;
2034
2035		/*
2036		 * Kupdate and background works are special and we want to
2037		 * include all inodes that need writing. Livelock avoidance is
2038		 * handled by these works yielding to any other work so we are
2039		 * safe.
2040		 */
2041		if (work->for_kupdate) {
2042			dirtied_before = jiffies -
2043				msecs_to_jiffies(dirty_expire_interval * 10);
2044		} else if (work->for_background)
2045			dirtied_before = jiffies;
2046
2047		trace_writeback_start(wb, work);
2048		if (list_empty(&wb->b_io))
2049			queue_io(wb, work, dirtied_before);
2050		if (work->sb)
2051			progress = writeback_sb_inodes(work->sb, wb, work);
2052		else
2053			progress = __writeback_inodes_wb(wb, work);
2054		trace_writeback_written(wb, work);
2055
2056		wb_update_bandwidth(wb, wb_start);
2057
2058		/*
2059		 * Did we write something? Try for more
2060		 *
2061		 * Dirty inodes are moved to b_io for writeback in batches.
2062		 * The completion of the current batch does not necessarily
2063		 * mean the overall work is done. So we keep looping as long
2064		 * as made some progress on cleaning pages or inodes.
2065		 */
2066		if (progress)
2067			continue;
2068		/*
2069		 * No more inodes for IO, bail
2070		 */
2071		if (list_empty(&wb->b_more_io))
2072			break;
2073		/*
2074		 * Nothing written. Wait for some inode to
2075		 * become available for writeback. Otherwise
2076		 * we'll just busyloop.
2077		 */
2078		trace_writeback_wait(wb, work);
2079		inode = wb_inode(wb->b_more_io.prev);
2080		spin_lock(&inode->i_lock);
2081		spin_unlock(&wb->list_lock);
2082		/* This function drops i_lock... */
2083		inode_sleep_on_writeback(inode);
2084		spin_lock(&wb->list_lock);
2085	}
2086	spin_unlock(&wb->list_lock);
2087	blk_finish_plug(&plug);
2088
2089	return nr_pages - work->nr_pages;
2090}
2091
2092/*
2093 * Return the next wb_writeback_work struct that hasn't been processed yet.
2094 */
2095static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
2096{
2097	struct wb_writeback_work *work = NULL;
2098
2099	spin_lock_bh(&wb->work_lock);
2100	if (!list_empty(&wb->work_list)) {
2101		work = list_entry(wb->work_list.next,
2102				  struct wb_writeback_work, list);
2103		list_del_init(&work->list);
2104	}
2105	spin_unlock_bh(&wb->work_lock);
2106	return work;
2107}
2108
2109static long wb_check_background_flush(struct bdi_writeback *wb)
2110{
2111	if (wb_over_bg_thresh(wb)) {
2112
2113		struct wb_writeback_work work = {
2114			.nr_pages	= LONG_MAX,
2115			.sync_mode	= WB_SYNC_NONE,
2116			.for_background	= 1,
2117			.range_cyclic	= 1,
2118			.reason		= WB_REASON_BACKGROUND,
2119		};
2120
2121		return wb_writeback(wb, &work);
2122	}
2123
2124	return 0;
2125}
2126
2127static long wb_check_old_data_flush(struct bdi_writeback *wb)
2128{
2129	unsigned long expired;
2130	long nr_pages;
2131
2132	/*
2133	 * When set to zero, disable periodic writeback
2134	 */
2135	if (!dirty_writeback_interval)
2136		return 0;
2137
2138	expired = wb->last_old_flush +
2139			msecs_to_jiffies(dirty_writeback_interval * 10);
2140	if (time_before(jiffies, expired))
2141		return 0;
2142
2143	wb->last_old_flush = jiffies;
2144	nr_pages = get_nr_dirty_pages();
2145
2146	if (nr_pages) {
2147		struct wb_writeback_work work = {
2148			.nr_pages	= nr_pages,
2149			.sync_mode	= WB_SYNC_NONE,
2150			.for_kupdate	= 1,
2151			.range_cyclic	= 1,
2152			.reason		= WB_REASON_PERIODIC,
2153		};
2154
2155		return wb_writeback(wb, &work);
2156	}
2157
2158	return 0;
2159}
2160
2161static long wb_check_start_all(struct bdi_writeback *wb)
2162{
2163	long nr_pages;
2164
2165	if (!test_bit(WB_start_all, &wb->state))
2166		return 0;
2167
2168	nr_pages = get_nr_dirty_pages();
2169	if (nr_pages) {
2170		struct wb_writeback_work work = {
2171			.nr_pages	= wb_split_bdi_pages(wb, nr_pages),
2172			.sync_mode	= WB_SYNC_NONE,
2173			.range_cyclic	= 1,
2174			.reason		= wb->start_all_reason,
2175		};
2176
2177		nr_pages = wb_writeback(wb, &work);
2178	}
2179
2180	clear_bit(WB_start_all, &wb->state);
2181	return nr_pages;
2182}
2183
2184
2185/*
2186 * Retrieve work items and do the writeback they describe
2187 */
2188static long wb_do_writeback(struct bdi_writeback *wb)
2189{
2190	struct wb_writeback_work *work;
2191	long wrote = 0;
2192
2193	set_bit(WB_writeback_running, &wb->state);
2194	while ((work = get_next_work_item(wb)) != NULL) {
2195		trace_writeback_exec(wb, work);
2196		wrote += wb_writeback(wb, work);
2197		finish_writeback_work(wb, work);
2198	}
2199
2200	/*
2201	 * Check for a flush-everything request
2202	 */
2203	wrote += wb_check_start_all(wb);
2204
2205	/*
2206	 * Check for periodic writeback, kupdated() style
2207	 */
2208	wrote += wb_check_old_data_flush(wb);
2209	wrote += wb_check_background_flush(wb);
2210	clear_bit(WB_writeback_running, &wb->state);
2211
2212	return wrote;
2213}
2214
2215/*
2216 * Handle writeback of dirty data for the device backed by this bdi. Also
2217 * reschedules periodically and does kupdated style flushing.
2218 */
2219void wb_workfn(struct work_struct *work)
2220{
2221	struct bdi_writeback *wb = container_of(to_delayed_work(work),
2222						struct bdi_writeback, dwork);
2223	long pages_written;
2224
2225	set_worker_desc("flush-%s", bdi_dev_name(wb->bdi));
2226	current->flags |= PF_SWAPWRITE;
2227
2228	if (likely(!current_is_workqueue_rescuer() ||
2229		   !test_bit(WB_registered, &wb->state))) {
2230		/*
2231		 * The normal path.  Keep writing back @wb until its
2232		 * work_list is empty.  Note that this path is also taken
2233		 * if @wb is shutting down even when we're running off the
2234		 * rescuer as work_list needs to be drained.
2235		 */
2236		do {
2237			pages_written = wb_do_writeback(wb);
2238			trace_writeback_pages_written(pages_written);
2239		} while (!list_empty(&wb->work_list));
2240	} else {
2241		/*
2242		 * bdi_wq can't get enough workers and we're running off
2243		 * the emergency worker.  Don't hog it.  Hopefully, 1024 is
2244		 * enough for efficient IO.
2245		 */
2246		pages_written = writeback_inodes_wb(wb, 1024,
2247						    WB_REASON_FORKER_THREAD);
2248		trace_writeback_pages_written(pages_written);
2249	}
2250
2251	if (!list_empty(&wb->work_list))
2252		wb_wakeup(wb);
2253	else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
2254		wb_wakeup_delayed(wb);
2255
2256	current->flags &= ~PF_SWAPWRITE;
2257}
2258
2259/*
2260 * Start writeback of `nr_pages' pages on this bdi. If `nr_pages' is zero,
2261 * write back the whole world.
2262 */
2263static void __wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
2264					 enum wb_reason reason)
2265{
2266	struct bdi_writeback *wb;
2267
2268	if (!bdi_has_dirty_io(bdi))
2269		return;
2270
2271	list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
2272		wb_start_writeback(wb, reason);
2273}
2274
2275void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
2276				enum wb_reason reason)
2277{
2278	rcu_read_lock();
2279	__wakeup_flusher_threads_bdi(bdi, reason);
2280	rcu_read_unlock();
2281}
2282
2283/*
2284 * Wakeup the flusher threads to start writeback of all currently dirty pages
2285 */
2286void wakeup_flusher_threads(enum wb_reason reason)
2287{
2288	struct backing_dev_info *bdi;
2289
2290	/*
2291	 * If we are expecting writeback progress we must submit plugged IO.
2292	 */
2293	if (blk_needs_flush_plug(current))
2294		blk_schedule_flush_plug(current);
2295
2296	rcu_read_lock();
2297	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
2298		__wakeup_flusher_threads_bdi(bdi, reason);
2299	rcu_read_unlock();
2300}
2301
2302/*
2303 * Wake up bdi's periodically to make sure dirtytime inodes gets
2304 * written back periodically.  We deliberately do *not* check the
2305 * b_dirtytime list in wb_has_dirty_io(), since this would cause the
2306 * kernel to be constantly waking up once there are any dirtytime
2307 * inodes on the system.  So instead we define a separate delayed work
2308 * function which gets called much more rarely.  (By default, only
2309 * once every 12 hours.)
2310 *
2311 * If there is any other write activity going on in the file system,
2312 * this function won't be necessary.  But if the only thing that has
2313 * happened on the file system is a dirtytime inode caused by an atime
2314 * update, we need this infrastructure below to make sure that inode
2315 * eventually gets pushed out to disk.
2316 */
2317static void wakeup_dirtytime_writeback(struct work_struct *w);
2318static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback);
2319
2320static void wakeup_dirtytime_writeback(struct work_struct *w)
2321{
2322	struct backing_dev_info *bdi;
2323
2324	rcu_read_lock();
2325	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
2326		struct bdi_writeback *wb;
2327
2328		list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
2329			if (!list_empty(&wb->b_dirty_time))
2330				wb_wakeup(wb);
2331	}
2332	rcu_read_unlock();
2333	schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
2334}
2335
2336static int __init start_dirtytime_writeback(void)
2337{
2338	schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
2339	return 0;
2340}
2341__initcall(start_dirtytime_writeback);
2342
2343int dirtytime_interval_handler(struct ctl_table *table, int write,
2344			       void *buffer, size_t *lenp, loff_t *ppos)
2345{
2346	int ret;
2347
2348	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2349	if (ret == 0 && write)
2350		mod_delayed_work(system_wq, &dirtytime_work, 0);
2351	return ret;
2352}
2353
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2354/**
2355 * __mark_inode_dirty -	internal function to mark an inode dirty
2356 *
2357 * @inode: inode to mark
2358 * @flags: what kind of dirty, e.g. I_DIRTY_SYNC.  This can be a combination of
2359 *	   multiple I_DIRTY_* flags, except that I_DIRTY_TIME can't be combined
2360 *	   with I_DIRTY_PAGES.
2361 *
2362 * Mark an inode as dirty.  We notify the filesystem, then update the inode's
2363 * dirty flags.  Then, if needed we add the inode to the appropriate dirty list.
2364 *
2365 * Most callers should use mark_inode_dirty() or mark_inode_dirty_sync()
2366 * instead of calling this directly.
2367 *
2368 * CAREFUL!  We only add the inode to the dirty list if it is hashed or if it
2369 * refers to a blockdev.  Unhashed inodes will never be added to the dirty list
2370 * even if they are later hashed, as they will have been marked dirty already.
 
2371 *
2372 * In short, ensure you hash any inodes _before_ you start marking them dirty.
 
2373 *
2374 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
2375 * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
2376 * the kernel-internal blockdev inode represents the dirtying time of the
2377 * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
2378 * page->mapping->host, so the page-dirtying time is recorded in the internal
2379 * blockdev inode.
2380 */
2381void __mark_inode_dirty(struct inode *inode, int flags)
2382{
2383	struct super_block *sb = inode->i_sb;
2384	int dirtytime = 0;
2385
2386	trace_writeback_mark_inode_dirty(inode, flags);
2387
2388	if (flags & I_DIRTY_INODE) {
2389		/*
2390		 * Notify the filesystem about the inode being dirtied, so that
2391		 * (if needed) it can update on-disk fields and journal the
2392		 * inode.  This is only needed when the inode itself is being
2393		 * dirtied now.  I.e. it's only needed for I_DIRTY_INODE, not
2394		 * for just I_DIRTY_PAGES or I_DIRTY_TIME.
2395		 */
2396		trace_writeback_dirty_inode_start(inode, flags);
 
2397		if (sb->s_op->dirty_inode)
2398			sb->s_op->dirty_inode(inode, flags & I_DIRTY_INODE);
 
2399		trace_writeback_dirty_inode(inode, flags);
2400
2401		/* I_DIRTY_INODE supersedes I_DIRTY_TIME. */
2402		flags &= ~I_DIRTY_TIME;
2403	} else {
2404		/*
2405		 * Else it's either I_DIRTY_PAGES, I_DIRTY_TIME, or nothing.
2406		 * (We don't support setting both I_DIRTY_PAGES and I_DIRTY_TIME
2407		 * in one call to __mark_inode_dirty().)
2408		 */
2409		dirtytime = flags & I_DIRTY_TIME;
2410		WARN_ON_ONCE(dirtytime && flags != I_DIRTY_TIME);
2411	}
2412
2413	/*
2414	 * Paired with smp_mb() in __writeback_single_inode() for the
2415	 * following lockless i_state test.  See there for details.
2416	 */
2417	smp_mb();
2418
2419	if (((inode->i_state & flags) == flags) ||
2420	    (dirtytime && (inode->i_state & I_DIRTY_INODE)))
2421		return;
2422
 
 
 
2423	spin_lock(&inode->i_lock);
2424	if (dirtytime && (inode->i_state & I_DIRTY_INODE))
2425		goto out_unlock_inode;
2426	if ((inode->i_state & flags) != flags) {
2427		const int was_dirty = inode->i_state & I_DIRTY;
2428
2429		inode_attach_wb(inode, NULL);
2430
2431		/* I_DIRTY_INODE supersedes I_DIRTY_TIME. */
2432		if (flags & I_DIRTY_INODE)
2433			inode->i_state &= ~I_DIRTY_TIME;
2434		inode->i_state |= flags;
2435
2436		/*
2437		 * If the inode is queued for writeback by flush worker, just
2438		 * update its dirty state. Once the flush worker is done with
2439		 * the inode it will place it on the appropriate superblock
2440		 * list, based upon its state.
2441		 */
2442		if (inode->i_state & I_SYNC_QUEUED)
2443			goto out_unlock_inode;
2444
2445		/*
2446		 * Only add valid (hashed) inodes to the superblock's
2447		 * dirty list.  Add blockdev inodes as well.
2448		 */
2449		if (!S_ISBLK(inode->i_mode)) {
2450			if (inode_unhashed(inode))
2451				goto out_unlock_inode;
2452		}
2453		if (inode->i_state & I_FREEING)
2454			goto out_unlock_inode;
2455
2456		/*
2457		 * If the inode was already on b_dirty/b_io/b_more_io, don't
2458		 * reposition it (that would break b_dirty time-ordering).
2459		 */
2460		if (!was_dirty) {
2461			struct bdi_writeback *wb;
2462			struct list_head *dirty_list;
2463			bool wakeup_bdi = false;
2464
2465			wb = locked_inode_to_wb_and_lock_list(inode);
2466
 
 
 
 
2467			inode->dirtied_when = jiffies;
2468			if (dirtytime)
2469				inode->dirtied_time_when = jiffies;
2470
2471			if (inode->i_state & I_DIRTY)
2472				dirty_list = &wb->b_dirty;
2473			else
2474				dirty_list = &wb->b_dirty_time;
2475
2476			wakeup_bdi = inode_io_list_move_locked(inode, wb,
2477							       dirty_list);
2478
2479			spin_unlock(&wb->list_lock);
2480			trace_writeback_dirty_inode_enqueue(inode);
2481
2482			/*
2483			 * If this is the first dirty inode for this bdi,
2484			 * we have to wake-up the corresponding bdi thread
2485			 * to make sure background write-back happens
2486			 * later.
2487			 */
2488			if (wakeup_bdi &&
2489			    (wb->bdi->capabilities & BDI_CAP_WRITEBACK))
2490				wb_wakeup_delayed(wb);
2491			return;
2492		}
2493	}
2494out_unlock_inode:
2495	spin_unlock(&inode->i_lock);
2496}
2497EXPORT_SYMBOL(__mark_inode_dirty);
2498
2499/*
2500 * The @s_sync_lock is used to serialise concurrent sync operations
2501 * to avoid lock contention problems with concurrent wait_sb_inodes() calls.
2502 * Concurrent callers will block on the s_sync_lock rather than doing contending
2503 * walks. The queueing maintains sync(2) required behaviour as all the IO that
2504 * has been issued up to the time this function is enter is guaranteed to be
2505 * completed by the time we have gained the lock and waited for all IO that is
2506 * in progress regardless of the order callers are granted the lock.
2507 */
2508static void wait_sb_inodes(struct super_block *sb)
2509{
2510	LIST_HEAD(sync_list);
2511
2512	/*
2513	 * We need to be protected against the filesystem going from
2514	 * r/o to r/w or vice versa.
2515	 */
2516	WARN_ON(!rwsem_is_locked(&sb->s_umount));
2517
2518	mutex_lock(&sb->s_sync_lock);
2519
2520	/*
2521	 * Splice the writeback list onto a temporary list to avoid waiting on
2522	 * inodes that have started writeback after this point.
2523	 *
2524	 * Use rcu_read_lock() to keep the inodes around until we have a
2525	 * reference. s_inode_wblist_lock protects sb->s_inodes_wb as well as
2526	 * the local list because inodes can be dropped from either by writeback
2527	 * completion.
2528	 */
2529	rcu_read_lock();
2530	spin_lock_irq(&sb->s_inode_wblist_lock);
2531	list_splice_init(&sb->s_inodes_wb, &sync_list);
2532
2533	/*
2534	 * Data integrity sync. Must wait for all pages under writeback, because
2535	 * there may have been pages dirtied before our sync call, but which had
2536	 * writeout started before we write it out.  In which case, the inode
2537	 * may not be on the dirty list, but we still have to wait for that
2538	 * writeout.
2539	 */
2540	while (!list_empty(&sync_list)) {
2541		struct inode *inode = list_first_entry(&sync_list, struct inode,
2542						       i_wb_list);
2543		struct address_space *mapping = inode->i_mapping;
2544
2545		/*
2546		 * Move each inode back to the wb list before we drop the lock
2547		 * to preserve consistency between i_wb_list and the mapping
2548		 * writeback tag. Writeback completion is responsible to remove
2549		 * the inode from either list once the writeback tag is cleared.
2550		 */
2551		list_move_tail(&inode->i_wb_list, &sb->s_inodes_wb);
2552
2553		/*
2554		 * The mapping can appear untagged while still on-list since we
2555		 * do not have the mapping lock. Skip it here, wb completion
2556		 * will remove it.
2557		 */
2558		if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
2559			continue;
2560
2561		spin_unlock_irq(&sb->s_inode_wblist_lock);
2562
2563		spin_lock(&inode->i_lock);
2564		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
2565			spin_unlock(&inode->i_lock);
2566
2567			spin_lock_irq(&sb->s_inode_wblist_lock);
2568			continue;
2569		}
2570		__iget(inode);
2571		spin_unlock(&inode->i_lock);
2572		rcu_read_unlock();
2573
2574		/*
2575		 * We keep the error status of individual mapping so that
2576		 * applications can catch the writeback error using fsync(2).
2577		 * See filemap_fdatawait_keep_errors() for details.
2578		 */
2579		filemap_fdatawait_keep_errors(mapping);
2580
2581		cond_resched();
2582
2583		iput(inode);
2584
2585		rcu_read_lock();
2586		spin_lock_irq(&sb->s_inode_wblist_lock);
2587	}
2588	spin_unlock_irq(&sb->s_inode_wblist_lock);
2589	rcu_read_unlock();
2590	mutex_unlock(&sb->s_sync_lock);
2591}
2592
2593static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
2594				     enum wb_reason reason, bool skip_if_busy)
2595{
2596	struct backing_dev_info *bdi = sb->s_bdi;
2597	DEFINE_WB_COMPLETION(done, bdi);
2598	struct wb_writeback_work work = {
2599		.sb			= sb,
2600		.sync_mode		= WB_SYNC_NONE,
2601		.tagged_writepages	= 1,
2602		.done			= &done,
2603		.nr_pages		= nr,
2604		.reason			= reason,
2605	};
2606
2607	if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
2608		return;
2609	WARN_ON(!rwsem_is_locked(&sb->s_umount));
2610
2611	bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy);
2612	wb_wait_for_completion(&done);
2613}
2614
2615/**
2616 * writeback_inodes_sb_nr -	writeback dirty inodes from given super_block
2617 * @sb: the superblock
2618 * @nr: the number of pages to write
2619 * @reason: reason why some writeback work initiated
2620 *
2621 * Start writeback on some inodes on this super_block. No guarantees are made
2622 * on how many (if any) will be written, and this function does not wait
2623 * for IO completion of submitted IO.
2624 */
2625void writeback_inodes_sb_nr(struct super_block *sb,
2626			    unsigned long nr,
2627			    enum wb_reason reason)
2628{
2629	__writeback_inodes_sb_nr(sb, nr, reason, false);
2630}
2631EXPORT_SYMBOL(writeback_inodes_sb_nr);
2632
2633/**
2634 * writeback_inodes_sb	-	writeback dirty inodes from given super_block
2635 * @sb: the superblock
2636 * @reason: reason why some writeback work was initiated
2637 *
2638 * Start writeback on some inodes on this super_block. No guarantees are made
2639 * on how many (if any) will be written, and this function does not wait
2640 * for IO completion of submitted IO.
2641 */
2642void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
2643{
2644	return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
2645}
2646EXPORT_SYMBOL(writeback_inodes_sb);
2647
2648/**
2649 * try_to_writeback_inodes_sb - try to start writeback if none underway
2650 * @sb: the superblock
2651 * @reason: reason why some writeback work was initiated
2652 *
2653 * Invoke __writeback_inodes_sb_nr if no writeback is currently underway.
2654 */
2655void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
2656{
2657	if (!down_read_trylock(&sb->s_umount))
2658		return;
2659
2660	__writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason, true);
2661	up_read(&sb->s_umount);
2662}
2663EXPORT_SYMBOL(try_to_writeback_inodes_sb);
2664
2665/**
2666 * sync_inodes_sb	-	sync sb inode pages
2667 * @sb: the superblock
2668 *
2669 * This function writes and waits on any dirty inode belonging to this
2670 * super_block.
2671 */
2672void sync_inodes_sb(struct super_block *sb)
2673{
2674	struct backing_dev_info *bdi = sb->s_bdi;
2675	DEFINE_WB_COMPLETION(done, bdi);
2676	struct wb_writeback_work work = {
2677		.sb		= sb,
2678		.sync_mode	= WB_SYNC_ALL,
2679		.nr_pages	= LONG_MAX,
2680		.range_cyclic	= 0,
2681		.done		= &done,
2682		.reason		= WB_REASON_SYNC,
2683		.for_sync	= 1,
2684	};
2685
2686	/*
2687	 * Can't skip on !bdi_has_dirty() because we should wait for !dirty
2688	 * inodes under writeback and I_DIRTY_TIME inodes ignored by
2689	 * bdi_has_dirty() need to be written out too.
2690	 */
2691	if (bdi == &noop_backing_dev_info)
2692		return;
2693	WARN_ON(!rwsem_is_locked(&sb->s_umount));
2694
2695	/* protect against inode wb switch, see inode_switch_wbs_work_fn() */
2696	bdi_down_write_wb_switch_rwsem(bdi);
2697	bdi_split_work_to_wbs(bdi, &work, false);
2698	wb_wait_for_completion(&done);
2699	bdi_up_write_wb_switch_rwsem(bdi);
2700
2701	wait_sb_inodes(sb);
2702}
2703EXPORT_SYMBOL(sync_inodes_sb);
2704
2705/**
2706 * write_inode_now	-	write an inode to disk
2707 * @inode: inode to write to disk
2708 * @sync: whether the write should be synchronous or not
2709 *
2710 * This function commits an inode to disk immediately if it is dirty. This is
2711 * primarily needed by knfsd.
2712 *
2713 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
2714 */
2715int write_inode_now(struct inode *inode, int sync)
2716{
2717	struct writeback_control wbc = {
2718		.nr_to_write = LONG_MAX,
2719		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
2720		.range_start = 0,
2721		.range_end = LLONG_MAX,
2722	};
2723
2724	if (!mapping_can_writeback(inode->i_mapping))
2725		wbc.nr_to_write = 0;
2726
2727	might_sleep();
2728	return writeback_single_inode(inode, &wbc);
2729}
2730EXPORT_SYMBOL(write_inode_now);
2731
2732/**
2733 * sync_inode - write an inode and its pages to disk.
2734 * @inode: the inode to sync
2735 * @wbc: controls the writeback mode
2736 *
2737 * sync_inode() will write an inode and its pages to disk.  It will also
2738 * correctly update the inode on its superblock's dirty inode lists and will
2739 * update inode->i_state.
2740 *
2741 * The caller must have a ref on the inode.
2742 */
2743int sync_inode(struct inode *inode, struct writeback_control *wbc)
2744{
2745	return writeback_single_inode(inode, wbc);
2746}
2747EXPORT_SYMBOL(sync_inode);
2748
2749/**
2750 * sync_inode_metadata - write an inode to disk
2751 * @inode: the inode to sync
2752 * @wait: wait for I/O to complete.
2753 *
2754 * Write an inode to disk and adjust its dirty state after completion.
2755 *
2756 * Note: only writes the actual inode, no associated data or other metadata.
2757 */
2758int sync_inode_metadata(struct inode *inode, int wait)
2759{
2760	struct writeback_control wbc = {
2761		.sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
2762		.nr_to_write = 0, /* metadata-only */
2763	};
2764
2765	return sync_inode(inode, &wbc);
2766}
2767EXPORT_SYMBOL(sync_inode_metadata);