Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * fs/fs-writeback.c
   4 *
   5 * Copyright (C) 2002, Linus Torvalds.
   6 *
   7 * Contains all the functions related to writing back and waiting
   8 * upon dirty inodes against superblocks, and writing back dirty
   9 * pages against inodes.  ie: data writeback.  Writeout of the
  10 * inode itself is not handled here.
  11 *
  12 * 10Apr2002	Andrew Morton
  13 *		Split out of fs/inode.c
  14 *		Additions for address_space-based writeback
  15 */
  16
  17#include <linux/kernel.h>
  18#include <linux/export.h>
  19#include <linux/spinlock.h>
  20#include <linux/slab.h>
  21#include <linux/sched.h>
  22#include <linux/fs.h>
  23#include <linux/mm.h>
  24#include <linux/pagemap.h>
  25#include <linux/kthread.h>
  26#include <linux/writeback.h>
  27#include <linux/blkdev.h>
  28#include <linux/backing-dev.h>
  29#include <linux/tracepoint.h>
  30#include <linux/device.h>
  31#include <linux/memcontrol.h>
  32#include "internal.h"
  33
  34/*
  35 * 4MB minimal write chunk size
  36 */
  37#define MIN_WRITEBACK_PAGES	(4096UL >> (PAGE_SHIFT - 10))
  38
  39/*
  40 * Passed into wb_writeback(), essentially a subset of writeback_control
  41 */
  42struct wb_writeback_work {
  43	long nr_pages;
  44	struct super_block *sb;
  45	enum writeback_sync_modes sync_mode;
  46	unsigned int tagged_writepages:1;
  47	unsigned int for_kupdate:1;
  48	unsigned int range_cyclic:1;
  49	unsigned int for_background:1;
  50	unsigned int for_sync:1;	/* sync(2) WB_SYNC_ALL writeback */
  51	unsigned int auto_free:1;	/* free on completion */
  52	enum wb_reason reason;		/* why was writeback initiated? */
  53
  54	struct list_head list;		/* pending work list */
  55	struct wb_completion *done;	/* set if the caller waits */
  56};
  57
  58/*
  59 * If an inode is constantly having its pages dirtied, but then the
  60 * updates stop dirtytime_expire_interval seconds in the past, it's
  61 * possible for the worst case time between when an inode has its
  62 * timestamps updated and when they finally get written out to be two
  63 * dirtytime_expire_intervals.  We set the default to 12 hours (in
  64 * seconds), which means most of the time inodes will have their
  65 * timestamps written to disk after 12 hours, but in the worst case a
  66 * few inodes might not their timestamps updated for 24 hours.
  67 */
  68unsigned int dirtytime_expire_interval = 12 * 60 * 60;
  69
  70static inline struct inode *wb_inode(struct list_head *head)
  71{
  72	return list_entry(head, struct inode, i_io_list);
  73}
  74
  75/*
  76 * Include the creation of the trace points after defining the
  77 * wb_writeback_work structure and inline functions so that the definition
  78 * remains local to this file.
  79 */
  80#define CREATE_TRACE_POINTS
  81#include <trace/events/writeback.h>
  82
  83EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage);
  84
  85static bool wb_io_lists_populated(struct bdi_writeback *wb)
  86{
  87	if (wb_has_dirty_io(wb)) {
  88		return false;
  89	} else {
  90		set_bit(WB_has_dirty_io, &wb->state);
  91		WARN_ON_ONCE(!wb->avg_write_bandwidth);
  92		atomic_long_add(wb->avg_write_bandwidth,
  93				&wb->bdi->tot_write_bandwidth);
  94		return true;
  95	}
  96}
  97
  98static void wb_io_lists_depopulated(struct bdi_writeback *wb)
  99{
 100	if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) &&
 101	    list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) {
 102		clear_bit(WB_has_dirty_io, &wb->state);
 103		WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth,
 104					&wb->bdi->tot_write_bandwidth) < 0);
 105	}
 106}
 107
 108/**
 109 * inode_io_list_move_locked - move an inode onto a bdi_writeback IO list
 110 * @inode: inode to be moved
 111 * @wb: target bdi_writeback
 112 * @head: one of @wb->b_{dirty|io|more_io|dirty_time}
 113 *
 114 * Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io.
 115 * Returns %true if @inode is the first occupant of the !dirty_time IO
 116 * lists; otherwise, %false.
 117 */
 118static bool inode_io_list_move_locked(struct inode *inode,
 119				      struct bdi_writeback *wb,
 120				      struct list_head *head)
 121{
 122	assert_spin_locked(&wb->list_lock);
 
 
 123
 124	list_move(&inode->i_io_list, head);
 125
 126	/* dirty_time doesn't count as dirty_io until expiration */
 127	if (head != &wb->b_dirty_time)
 128		return wb_io_lists_populated(wb);
 129
 130	wb_io_lists_depopulated(wb);
 131	return false;
 132}
 133
 134static void wb_wakeup(struct bdi_writeback *wb)
 135{
 136	spin_lock_bh(&wb->work_lock);
 137	if (test_bit(WB_registered, &wb->state))
 138		mod_delayed_work(bdi_wq, &wb->dwork, 0);
 139	spin_unlock_bh(&wb->work_lock);
 140}
 141
 142static void finish_writeback_work(struct bdi_writeback *wb,
 143				  struct wb_writeback_work *work)
 144{
 145	struct wb_completion *done = work->done;
 146
 147	if (work->auto_free)
 148		kfree(work);
 149	if (done) {
 150		wait_queue_head_t *waitq = done->waitq;
 151
 152		/* @done can't be accessed after the following dec */
 153		if (atomic_dec_and_test(&done->cnt))
 154			wake_up_all(waitq);
 155	}
 156}
 157
 158static void wb_queue_work(struct bdi_writeback *wb,
 159			  struct wb_writeback_work *work)
 160{
 161	trace_writeback_queue(wb, work);
 162
 163	if (work->done)
 164		atomic_inc(&work->done->cnt);
 165
 166	spin_lock_bh(&wb->work_lock);
 167
 168	if (test_bit(WB_registered, &wb->state)) {
 169		list_add_tail(&work->list, &wb->work_list);
 170		mod_delayed_work(bdi_wq, &wb->dwork, 0);
 171	} else
 172		finish_writeback_work(wb, work);
 173
 174	spin_unlock_bh(&wb->work_lock);
 175}
 176
 177/**
 178 * wb_wait_for_completion - wait for completion of bdi_writeback_works
 179 * @done: target wb_completion
 180 *
 181 * Wait for one or more work items issued to @bdi with their ->done field
 182 * set to @done, which should have been initialized with
 183 * DEFINE_WB_COMPLETION().  This function returns after all such work items
 184 * are completed.  Work items which are waited upon aren't freed
 185 * automatically on completion.
 186 */
 187void wb_wait_for_completion(struct wb_completion *done)
 188{
 189	atomic_dec(&done->cnt);		/* put down the initial count */
 190	wait_event(*done->waitq, !atomic_read(&done->cnt));
 191}
 192
 193#ifdef CONFIG_CGROUP_WRITEBACK
 194
 195/*
 196 * Parameters for foreign inode detection, see wbc_detach_inode() to see
 197 * how they're used.
 198 *
 199 * These paramters are inherently heuristical as the detection target
 200 * itself is fuzzy.  All we want to do is detaching an inode from the
 201 * current owner if it's being written to by some other cgroups too much.
 202 *
 203 * The current cgroup writeback is built on the assumption that multiple
 204 * cgroups writing to the same inode concurrently is very rare and a mode
 205 * of operation which isn't well supported.  As such, the goal is not
 206 * taking too long when a different cgroup takes over an inode while
 207 * avoiding too aggressive flip-flops from occasional foreign writes.
 208 *
 209 * We record, very roughly, 2s worth of IO time history and if more than
 210 * half of that is foreign, trigger the switch.  The recording is quantized
 211 * to 16 slots.  To avoid tiny writes from swinging the decision too much,
 212 * writes smaller than 1/8 of avg size are ignored.
 213 */
 214#define WB_FRN_TIME_SHIFT	13	/* 1s = 2^13, upto 8 secs w/ 16bit */
 215#define WB_FRN_TIME_AVG_SHIFT	3	/* avg = avg * 7/8 + new * 1/8 */
 216#define WB_FRN_TIME_CUT_DIV	8	/* ignore rounds < avg / 8 */
 217#define WB_FRN_TIME_PERIOD	(2 * (1 << WB_FRN_TIME_SHIFT))	/* 2s */
 218
 219#define WB_FRN_HIST_SLOTS	16	/* inode->i_wb_frn_history is 16bit */
 220#define WB_FRN_HIST_UNIT	(WB_FRN_TIME_PERIOD / WB_FRN_HIST_SLOTS)
 221					/* each slot's duration is 2s / 16 */
 222#define WB_FRN_HIST_THR_SLOTS	(WB_FRN_HIST_SLOTS / 2)
 223					/* if foreign slots >= 8, switch */
 224#define WB_FRN_HIST_MAX_SLOTS	(WB_FRN_HIST_THR_SLOTS / 2 + 1)
 225					/* one round can affect upto 5 slots */
 226#define WB_FRN_MAX_IN_FLIGHT	1024	/* don't queue too many concurrently */
 227
 228/*
 229 * Maximum inodes per isw.  A specific value has been chosen to make
 230 * struct inode_switch_wbs_context fit into 1024 bytes kmalloc.
 231 */
 232#define WB_MAX_INODES_PER_ISW  ((1024UL - sizeof(struct inode_switch_wbs_context)) \
 233                                / sizeof(struct inode *))
 234
 235static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
 236static struct workqueue_struct *isw_wq;
 237
 238void __inode_attach_wb(struct inode *inode, struct page *page)
 239{
 240	struct backing_dev_info *bdi = inode_to_bdi(inode);
 241	struct bdi_writeback *wb = NULL;
 242
 243	if (inode_cgwb_enabled(inode)) {
 244		struct cgroup_subsys_state *memcg_css;
 245
 246		if (page) {
 247			memcg_css = mem_cgroup_css_from_page(page);
 248			wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
 249		} else {
 250			/* must pin memcg_css, see wb_get_create() */
 251			memcg_css = task_get_css(current, memory_cgrp_id);
 252			wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
 253			css_put(memcg_css);
 254		}
 255	}
 256
 257	if (!wb)
 258		wb = &bdi->wb;
 259
 260	/*
 261	 * There may be multiple instances of this function racing to
 262	 * update the same inode.  Use cmpxchg() to tell the winner.
 263	 */
 264	if (unlikely(cmpxchg(&inode->i_wb, NULL, wb)))
 265		wb_put(wb);
 266}
 267EXPORT_SYMBOL_GPL(__inode_attach_wb);
 268
 269/**
 270 * inode_cgwb_move_to_attached - put the inode onto wb->b_attached list
 271 * @inode: inode of interest with i_lock held
 272 * @wb: target bdi_writeback
 273 *
 274 * Remove the inode from wb's io lists and if necessarily put onto b_attached
 275 * list.  Only inodes attached to cgwb's are kept on this list.
 276 */
 277static void inode_cgwb_move_to_attached(struct inode *inode,
 278					struct bdi_writeback *wb)
 279{
 280	assert_spin_locked(&wb->list_lock);
 281	assert_spin_locked(&inode->i_lock);
 
 282
 283	inode->i_state &= ~I_SYNC_QUEUED;
 284	if (wb != &wb->bdi->wb)
 285		list_move(&inode->i_io_list, &wb->b_attached);
 286	else
 287		list_del_init(&inode->i_io_list);
 288	wb_io_lists_depopulated(wb);
 289}
 290
 291/**
 292 * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it
 293 * @inode: inode of interest with i_lock held
 294 *
 295 * Returns @inode's wb with its list_lock held.  @inode->i_lock must be
 296 * held on entry and is released on return.  The returned wb is guaranteed
 297 * to stay @inode's associated wb until its list_lock is released.
 298 */
 299static struct bdi_writeback *
 300locked_inode_to_wb_and_lock_list(struct inode *inode)
 301	__releases(&inode->i_lock)
 302	__acquires(&wb->list_lock)
 303{
 304	while (true) {
 305		struct bdi_writeback *wb = inode_to_wb(inode);
 306
 307		/*
 308		 * inode_to_wb() association is protected by both
 309		 * @inode->i_lock and @wb->list_lock but list_lock nests
 310		 * outside i_lock.  Drop i_lock and verify that the
 311		 * association hasn't changed after acquiring list_lock.
 312		 */
 313		wb_get(wb);
 314		spin_unlock(&inode->i_lock);
 315		spin_lock(&wb->list_lock);
 316
 317		/* i_wb may have changed inbetween, can't use inode_to_wb() */
 318		if (likely(wb == inode->i_wb)) {
 319			wb_put(wb);	/* @inode already has ref */
 320			return wb;
 321		}
 322
 323		spin_unlock(&wb->list_lock);
 324		wb_put(wb);
 325		cpu_relax();
 326		spin_lock(&inode->i_lock);
 327	}
 328}
 329
 330/**
 331 * inode_to_wb_and_lock_list - determine an inode's wb and lock it
 332 * @inode: inode of interest
 333 *
 334 * Same as locked_inode_to_wb_and_lock_list() but @inode->i_lock isn't held
 335 * on entry.
 336 */
 337static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
 338	__acquires(&wb->list_lock)
 339{
 340	spin_lock(&inode->i_lock);
 341	return locked_inode_to_wb_and_lock_list(inode);
 342}
 343
 344struct inode_switch_wbs_context {
 345	struct rcu_work		work;
 346
 347	/*
 348	 * Multiple inodes can be switched at once.  The switching procedure
 349	 * consists of two parts, separated by a RCU grace period.  To make
 350	 * sure that the second part is executed for each inode gone through
 351	 * the first part, all inode pointers are placed into a NULL-terminated
 352	 * array embedded into struct inode_switch_wbs_context.  Otherwise
 353	 * an inode could be left in a non-consistent state.
 354	 */
 355	struct bdi_writeback	*new_wb;
 356	struct inode		*inodes[];
 357};
 358
 359static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
 360{
 361	down_write(&bdi->wb_switch_rwsem);
 362}
 363
 364static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
 365{
 366	up_write(&bdi->wb_switch_rwsem);
 367}
 368
 369static bool inode_do_switch_wbs(struct inode *inode,
 370				struct bdi_writeback *old_wb,
 371				struct bdi_writeback *new_wb)
 372{
 373	struct address_space *mapping = inode->i_mapping;
 374	XA_STATE(xas, &mapping->i_pages, 0);
 375	struct page *page;
 376	bool switched = false;
 377
 378	spin_lock(&inode->i_lock);
 379	xa_lock_irq(&mapping->i_pages);
 380
 381	/*
 382	 * Once I_FREEING or I_WILL_FREE are visible under i_lock, the eviction
 383	 * path owns the inode and we shouldn't modify ->i_io_list.
 384	 */
 385	if (unlikely(inode->i_state & (I_FREEING | I_WILL_FREE)))
 386		goto skip_switch;
 387
 388	trace_inode_switch_wbs(inode, old_wb, new_wb);
 389
 390	/*
 391	 * Count and transfer stats.  Note that PAGECACHE_TAG_DIRTY points
 392	 * to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
 393	 * pages actually under writeback.
 394	 */
 395	xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
 396		if (PageDirty(page)) {
 397			dec_wb_stat(old_wb, WB_RECLAIMABLE);
 398			inc_wb_stat(new_wb, WB_RECLAIMABLE);
 
 399		}
 400	}
 401
 402	xas_set(&xas, 0);
 403	xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
 404		WARN_ON_ONCE(!PageWriteback(page));
 405		dec_wb_stat(old_wb, WB_WRITEBACK);
 406		inc_wb_stat(new_wb, WB_WRITEBACK);
 
 
 
 
 
 
 407	}
 408
 409	wb_get(new_wb);
 410
 411	/*
 412	 * Transfer to @new_wb's IO list if necessary.  If the @inode is dirty,
 413	 * the specific list @inode was on is ignored and the @inode is put on
 414	 * ->b_dirty which is always correct including from ->b_dirty_time.
 415	 * The transfer preserves @inode->dirtied_when ordering.  If the @inode
 416	 * was clean, it means it was on the b_attached list, so move it onto
 417	 * the b_attached list of @new_wb.
 418	 */
 419	if (!list_empty(&inode->i_io_list)) {
 420		inode->i_wb = new_wb;
 421
 422		if (inode->i_state & I_DIRTY_ALL) {
 423			struct inode *pos;
 424
 425			list_for_each_entry(pos, &new_wb->b_dirty, i_io_list)
 426				if (time_after_eq(inode->dirtied_when,
 427						  pos->dirtied_when))
 428					break;
 429			inode_io_list_move_locked(inode, new_wb,
 430						  pos->i_io_list.prev);
 431		} else {
 432			inode_cgwb_move_to_attached(inode, new_wb);
 433		}
 434	} else {
 435		inode->i_wb = new_wb;
 436	}
 437
 438	/* ->i_wb_frn updates may race wbc_detach_inode() but doesn't matter */
 439	inode->i_wb_frn_winner = 0;
 440	inode->i_wb_frn_avg_time = 0;
 441	inode->i_wb_frn_history = 0;
 442	switched = true;
 443skip_switch:
 444	/*
 445	 * Paired with load_acquire in unlocked_inode_to_wb_begin() and
 446	 * ensures that the new wb is visible if they see !I_WB_SWITCH.
 447	 */
 448	smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH);
 449
 450	xa_unlock_irq(&mapping->i_pages);
 451	spin_unlock(&inode->i_lock);
 452
 453	return switched;
 454}
 455
 456static void inode_switch_wbs_work_fn(struct work_struct *work)
 457{
 458	struct inode_switch_wbs_context *isw =
 459		container_of(to_rcu_work(work), struct inode_switch_wbs_context, work);
 460	struct backing_dev_info *bdi = inode_to_bdi(isw->inodes[0]);
 461	struct bdi_writeback *old_wb = isw->inodes[0]->i_wb;
 462	struct bdi_writeback *new_wb = isw->new_wb;
 463	unsigned long nr_switched = 0;
 464	struct inode **inodep;
 465
 466	/*
 467	 * If @inode switches cgwb membership while sync_inodes_sb() is
 468	 * being issued, sync_inodes_sb() might miss it.  Synchronize.
 469	 */
 470	down_read(&bdi->wb_switch_rwsem);
 471
 472	/*
 473	 * By the time control reaches here, RCU grace period has passed
 474	 * since I_WB_SWITCH assertion and all wb stat update transactions
 475	 * between unlocked_inode_to_wb_begin/end() are guaranteed to be
 476	 * synchronizing against the i_pages lock.
 477	 *
 478	 * Grabbing old_wb->list_lock, inode->i_lock and the i_pages lock
 479	 * gives us exclusion against all wb related operations on @inode
 480	 * including IO list manipulations and stat updates.
 481	 */
 482	if (old_wb < new_wb) {
 483		spin_lock(&old_wb->list_lock);
 484		spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
 485	} else {
 486		spin_lock(&new_wb->list_lock);
 487		spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
 488	}
 489
 490	for (inodep = isw->inodes; *inodep; inodep++) {
 491		WARN_ON_ONCE((*inodep)->i_wb != old_wb);
 492		if (inode_do_switch_wbs(*inodep, old_wb, new_wb))
 493			nr_switched++;
 494	}
 495
 496	spin_unlock(&new_wb->list_lock);
 497	spin_unlock(&old_wb->list_lock);
 498
 499	up_read(&bdi->wb_switch_rwsem);
 500
 501	if (nr_switched) {
 502		wb_wakeup(new_wb);
 503		wb_put_many(old_wb, nr_switched);
 504	}
 505
 506	for (inodep = isw->inodes; *inodep; inodep++)
 507		iput(*inodep);
 508	wb_put(new_wb);
 509	kfree(isw);
 510	atomic_dec(&isw_nr_in_flight);
 511}
 512
 513static bool inode_prepare_wbs_switch(struct inode *inode,
 514				     struct bdi_writeback *new_wb)
 515{
 516	/*
 517	 * Paired with smp_mb() in cgroup_writeback_umount().
 518	 * isw_nr_in_flight must be increased before checking SB_ACTIVE and
 519	 * grabbing an inode, otherwise isw_nr_in_flight can be observed as 0
 520	 * in cgroup_writeback_umount() and the isw_wq will be not flushed.
 521	 */
 522	smp_mb();
 523
 524	if (IS_DAX(inode))
 525		return false;
 526
 527	/* while holding I_WB_SWITCH, no one else can update the association */
 528	spin_lock(&inode->i_lock);
 529	if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
 530	    inode->i_state & (I_WB_SWITCH | I_FREEING | I_WILL_FREE) ||
 531	    inode_to_wb(inode) == new_wb) {
 532		spin_unlock(&inode->i_lock);
 533		return false;
 534	}
 535	inode->i_state |= I_WB_SWITCH;
 536	__iget(inode);
 537	spin_unlock(&inode->i_lock);
 538
 539	return true;
 540}
 541
 542/**
 543 * inode_switch_wbs - change the wb association of an inode
 544 * @inode: target inode
 545 * @new_wb_id: ID of the new wb
 546 *
 547 * Switch @inode's wb association to the wb identified by @new_wb_id.  The
 548 * switching is performed asynchronously and may fail silently.
 549 */
 550static void inode_switch_wbs(struct inode *inode, int new_wb_id)
 551{
 552	struct backing_dev_info *bdi = inode_to_bdi(inode);
 553	struct cgroup_subsys_state *memcg_css;
 554	struct inode_switch_wbs_context *isw;
 555
 556	/* noop if seems to be already in progress */
 557	if (inode->i_state & I_WB_SWITCH)
 558		return;
 559
 560	/* avoid queueing a new switch if too many are already in flight */
 561	if (atomic_read(&isw_nr_in_flight) > WB_FRN_MAX_IN_FLIGHT)
 562		return;
 563
 564	isw = kzalloc(sizeof(*isw) + 2 * sizeof(struct inode *), GFP_ATOMIC);
 565	if (!isw)
 566		return;
 567
 568	atomic_inc(&isw_nr_in_flight);
 569
 570	/* find and pin the new wb */
 571	rcu_read_lock();
 572	memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys);
 573	if (memcg_css && !css_tryget(memcg_css))
 574		memcg_css = NULL;
 575	rcu_read_unlock();
 576	if (!memcg_css)
 577		goto out_free;
 578
 579	isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
 580	css_put(memcg_css);
 581	if (!isw->new_wb)
 582		goto out_free;
 583
 584	if (!inode_prepare_wbs_switch(inode, isw->new_wb))
 585		goto out_free;
 586
 587	isw->inodes[0] = inode;
 588
 589	/*
 590	 * In addition to synchronizing among switchers, I_WB_SWITCH tells
 591	 * the RCU protected stat update paths to grab the i_page
 592	 * lock so that stat transfer can synchronize against them.
 593	 * Let's continue after I_WB_SWITCH is guaranteed to be visible.
 594	 */
 595	INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn);
 596	queue_rcu_work(isw_wq, &isw->work);
 597	return;
 598
 599out_free:
 600	atomic_dec(&isw_nr_in_flight);
 601	if (isw->new_wb)
 602		wb_put(isw->new_wb);
 603	kfree(isw);
 604}
 605
 606/**
 607 * cleanup_offline_cgwb - detach associated inodes
 608 * @wb: target wb
 609 *
 610 * Switch all inodes attached to @wb to a nearest living ancestor's wb in order
 611 * to eventually release the dying @wb.  Returns %true if not all inodes were
 612 * switched and the function has to be restarted.
 613 */
 614bool cleanup_offline_cgwb(struct bdi_writeback *wb)
 615{
 616	struct cgroup_subsys_state *memcg_css;
 617	struct inode_switch_wbs_context *isw;
 618	struct inode *inode;
 619	int nr;
 620	bool restart = false;
 621
 622	isw = kzalloc(sizeof(*isw) + WB_MAX_INODES_PER_ISW *
 623		      sizeof(struct inode *), GFP_KERNEL);
 624	if (!isw)
 625		return restart;
 626
 627	atomic_inc(&isw_nr_in_flight);
 628
 629	for (memcg_css = wb->memcg_css->parent; memcg_css;
 630	     memcg_css = memcg_css->parent) {
 631		isw->new_wb = wb_get_create(wb->bdi, memcg_css, GFP_KERNEL);
 632		if (isw->new_wb)
 633			break;
 634	}
 635	if (unlikely(!isw->new_wb))
 636		isw->new_wb = &wb->bdi->wb; /* wb_get() is noop for bdi's wb */
 637
 638	nr = 0;
 639	spin_lock(&wb->list_lock);
 640	list_for_each_entry(inode, &wb->b_attached, i_io_list) {
 641		if (!inode_prepare_wbs_switch(inode, isw->new_wb))
 642			continue;
 643
 644		isw->inodes[nr++] = inode;
 645
 646		if (nr >= WB_MAX_INODES_PER_ISW - 1) {
 647			restart = true;
 648			break;
 649		}
 650	}
 651	spin_unlock(&wb->list_lock);
 652
 653	/* no attached inodes? bail out */
 654	if (nr == 0) {
 655		atomic_dec(&isw_nr_in_flight);
 656		wb_put(isw->new_wb);
 657		kfree(isw);
 658		return restart;
 659	}
 660
 661	/*
 662	 * In addition to synchronizing among switchers, I_WB_SWITCH tells
 663	 * the RCU protected stat update paths to grab the i_page
 664	 * lock so that stat transfer can synchronize against them.
 665	 * Let's continue after I_WB_SWITCH is guaranteed to be visible.
 666	 */
 667	INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn);
 668	queue_rcu_work(isw_wq, &isw->work);
 669
 670	return restart;
 671}
 672
 673/**
 674 * wbc_attach_and_unlock_inode - associate wbc with target inode and unlock it
 675 * @wbc: writeback_control of interest
 676 * @inode: target inode
 677 *
 678 * @inode is locked and about to be written back under the control of @wbc.
 679 * Record @inode's writeback context into @wbc and unlock the i_lock.  On
 680 * writeback completion, wbc_detach_inode() should be called.  This is used
 681 * to track the cgroup writeback context.
 682 */
 683void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
 684				 struct inode *inode)
 685{
 686	if (!inode_cgwb_enabled(inode)) {
 687		spin_unlock(&inode->i_lock);
 688		return;
 689	}
 690
 691	wbc->wb = inode_to_wb(inode);
 692	wbc->inode = inode;
 693
 694	wbc->wb_id = wbc->wb->memcg_css->id;
 695	wbc->wb_lcand_id = inode->i_wb_frn_winner;
 696	wbc->wb_tcand_id = 0;
 697	wbc->wb_bytes = 0;
 698	wbc->wb_lcand_bytes = 0;
 699	wbc->wb_tcand_bytes = 0;
 700
 701	wb_get(wbc->wb);
 702	spin_unlock(&inode->i_lock);
 703
 704	/*
 705	 * A dying wb indicates that either the blkcg associated with the
 706	 * memcg changed or the associated memcg is dying.  In the first
 707	 * case, a replacement wb should already be available and we should
 708	 * refresh the wb immediately.  In the second case, trying to
 709	 * refresh will keep failing.
 710	 */
 711	if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css)))
 712		inode_switch_wbs(inode, wbc->wb_id);
 713}
 714EXPORT_SYMBOL_GPL(wbc_attach_and_unlock_inode);
 715
 716/**
 717 * wbc_detach_inode - disassociate wbc from inode and perform foreign detection
 718 * @wbc: writeback_control of the just finished writeback
 719 *
 720 * To be called after a writeback attempt of an inode finishes and undoes
 721 * wbc_attach_and_unlock_inode().  Can be called under any context.
 722 *
 723 * As concurrent write sharing of an inode is expected to be very rare and
 724 * memcg only tracks page ownership on first-use basis severely confining
 725 * the usefulness of such sharing, cgroup writeback tracks ownership
 726 * per-inode.  While the support for concurrent write sharing of an inode
 727 * is deemed unnecessary, an inode being written to by different cgroups at
 728 * different points in time is a lot more common, and, more importantly,
 729 * charging only by first-use can too readily lead to grossly incorrect
 730 * behaviors (single foreign page can lead to gigabytes of writeback to be
 731 * incorrectly attributed).
 732 *
 733 * To resolve this issue, cgroup writeback detects the majority dirtier of
 734 * an inode and transfers the ownership to it.  To avoid unnnecessary
 735 * oscillation, the detection mechanism keeps track of history and gives
 736 * out the switch verdict only if the foreign usage pattern is stable over
 737 * a certain amount of time and/or writeback attempts.
 738 *
 739 * On each writeback attempt, @wbc tries to detect the majority writer
 740 * using Boyer-Moore majority vote algorithm.  In addition to the byte
 741 * count from the majority voting, it also counts the bytes written for the
 742 * current wb and the last round's winner wb (max of last round's current
 743 * wb, the winner from two rounds ago, and the last round's majority
 744 * candidate).  Keeping track of the historical winner helps the algorithm
 745 * to semi-reliably detect the most active writer even when it's not the
 746 * absolute majority.
 747 *
 748 * Once the winner of the round is determined, whether the winner is
 749 * foreign or not and how much IO time the round consumed is recorded in
 750 * inode->i_wb_frn_history.  If the amount of recorded foreign IO time is
 751 * over a certain threshold, the switch verdict is given.
 752 */
 753void wbc_detach_inode(struct writeback_control *wbc)
 754{
 755	struct bdi_writeback *wb = wbc->wb;
 756	struct inode *inode = wbc->inode;
 757	unsigned long avg_time, max_bytes, max_time;
 758	u16 history;
 759	int max_id;
 760
 761	if (!wb)
 762		return;
 763
 764	history = inode->i_wb_frn_history;
 765	avg_time = inode->i_wb_frn_avg_time;
 766
 767	/* pick the winner of this round */
 768	if (wbc->wb_bytes >= wbc->wb_lcand_bytes &&
 769	    wbc->wb_bytes >= wbc->wb_tcand_bytes) {
 770		max_id = wbc->wb_id;
 771		max_bytes = wbc->wb_bytes;
 772	} else if (wbc->wb_lcand_bytes >= wbc->wb_tcand_bytes) {
 773		max_id = wbc->wb_lcand_id;
 774		max_bytes = wbc->wb_lcand_bytes;
 775	} else {
 776		max_id = wbc->wb_tcand_id;
 777		max_bytes = wbc->wb_tcand_bytes;
 778	}
 779
 780	/*
 781	 * Calculate the amount of IO time the winner consumed and fold it
 782	 * into the running average kept per inode.  If the consumed IO
 783	 * time is lower than avag / WB_FRN_TIME_CUT_DIV, ignore it for
 784	 * deciding whether to switch or not.  This is to prevent one-off
 785	 * small dirtiers from skewing the verdict.
 786	 */
 787	max_time = DIV_ROUND_UP((max_bytes >> PAGE_SHIFT) << WB_FRN_TIME_SHIFT,
 788				wb->avg_write_bandwidth);
 789	if (avg_time)
 790		avg_time += (max_time >> WB_FRN_TIME_AVG_SHIFT) -
 791			    (avg_time >> WB_FRN_TIME_AVG_SHIFT);
 792	else
 793		avg_time = max_time;	/* immediate catch up on first run */
 794
 795	if (max_time >= avg_time / WB_FRN_TIME_CUT_DIV) {
 796		int slots;
 797
 798		/*
 799		 * The switch verdict is reached if foreign wb's consume
 800		 * more than a certain proportion of IO time in a
 801		 * WB_FRN_TIME_PERIOD.  This is loosely tracked by 16 slot
 802		 * history mask where each bit represents one sixteenth of
 803		 * the period.  Determine the number of slots to shift into
 804		 * history from @max_time.
 805		 */
 806		slots = min(DIV_ROUND_UP(max_time, WB_FRN_HIST_UNIT),
 807			    (unsigned long)WB_FRN_HIST_MAX_SLOTS);
 808		history <<= slots;
 809		if (wbc->wb_id != max_id)
 810			history |= (1U << slots) - 1;
 811
 812		if (history)
 813			trace_inode_foreign_history(inode, wbc, history);
 814
 815		/*
 816		 * Switch if the current wb isn't the consistent winner.
 817		 * If there are multiple closely competing dirtiers, the
 818		 * inode may switch across them repeatedly over time, which
 819		 * is okay.  The main goal is avoiding keeping an inode on
 820		 * the wrong wb for an extended period of time.
 821		 */
 822		if (hweight32(history) > WB_FRN_HIST_THR_SLOTS)
 823			inode_switch_wbs(inode, max_id);
 824	}
 825
 826	/*
 827	 * Multiple instances of this function may race to update the
 828	 * following fields but we don't mind occassional inaccuracies.
 829	 */
 830	inode->i_wb_frn_winner = max_id;
 831	inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX);
 832	inode->i_wb_frn_history = history;
 833
 834	wb_put(wbc->wb);
 835	wbc->wb = NULL;
 836}
 837EXPORT_SYMBOL_GPL(wbc_detach_inode);
 838
 839/**
 840 * wbc_account_cgroup_owner - account writeback to update inode cgroup ownership
 841 * @wbc: writeback_control of the writeback in progress
 842 * @page: page being written out
 843 * @bytes: number of bytes being written out
 844 *
 845 * @bytes from @page are about to written out during the writeback
 846 * controlled by @wbc.  Keep the book for foreign inode detection.  See
 847 * wbc_detach_inode().
 848 */
 849void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
 850			      size_t bytes)
 851{
 852	struct cgroup_subsys_state *css;
 853	int id;
 854
 855	/*
 856	 * pageout() path doesn't attach @wbc to the inode being written
 857	 * out.  This is intentional as we don't want the function to block
 858	 * behind a slow cgroup.  Ultimately, we want pageout() to kick off
 859	 * regular writeback instead of writing things out itself.
 860	 */
 861	if (!wbc->wb || wbc->no_cgroup_owner)
 862		return;
 863
 864	css = mem_cgroup_css_from_page(page);
 865	/* dead cgroups shouldn't contribute to inode ownership arbitration */
 866	if (!(css->flags & CSS_ONLINE))
 867		return;
 868
 869	id = css->id;
 870
 871	if (id == wbc->wb_id) {
 872		wbc->wb_bytes += bytes;
 873		return;
 874	}
 875
 876	if (id == wbc->wb_lcand_id)
 877		wbc->wb_lcand_bytes += bytes;
 878
 879	/* Boyer-Moore majority vote algorithm */
 880	if (!wbc->wb_tcand_bytes)
 881		wbc->wb_tcand_id = id;
 882	if (id == wbc->wb_tcand_id)
 883		wbc->wb_tcand_bytes += bytes;
 884	else
 885		wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
 886}
 887EXPORT_SYMBOL_GPL(wbc_account_cgroup_owner);
 888
 889/**
 890 * inode_congested - test whether an inode is congested
 891 * @inode: inode to test for congestion (may be NULL)
 892 * @cong_bits: mask of WB_[a]sync_congested bits to test
 893 *
 894 * Tests whether @inode is congested.  @cong_bits is the mask of congestion
 895 * bits to test and the return value is the mask of set bits.
 896 *
 897 * If cgroup writeback is enabled for @inode, the congestion state is
 898 * determined by whether the cgwb (cgroup bdi_writeback) for the blkcg
 899 * associated with @inode is congested; otherwise, the root wb's congestion
 900 * state is used.
 901 *
 902 * @inode is allowed to be NULL as this function is often called on
 903 * mapping->host which is NULL for the swapper space.
 904 */
 905int inode_congested(struct inode *inode, int cong_bits)
 906{
 907	/*
 908	 * Once set, ->i_wb never becomes NULL while the inode is alive.
 909	 * Start transaction iff ->i_wb is visible.
 910	 */
 911	if (inode && inode_to_wb_is_valid(inode)) {
 912		struct bdi_writeback *wb;
 913		struct wb_lock_cookie lock_cookie = {};
 914		bool congested;
 915
 916		wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
 917		congested = wb_congested(wb, cong_bits);
 918		unlocked_inode_to_wb_end(inode, &lock_cookie);
 919		return congested;
 920	}
 921
 922	return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
 923}
 924EXPORT_SYMBOL_GPL(inode_congested);
 925
 926/**
 927 * wb_split_bdi_pages - split nr_pages to write according to bandwidth
 928 * @wb: target bdi_writeback to split @nr_pages to
 929 * @nr_pages: number of pages to write for the whole bdi
 930 *
 931 * Split @wb's portion of @nr_pages according to @wb's write bandwidth in
 932 * relation to the total write bandwidth of all wb's w/ dirty inodes on
 933 * @wb->bdi.
 934 */
 935static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
 936{
 937	unsigned long this_bw = wb->avg_write_bandwidth;
 938	unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
 939
 940	if (nr_pages == LONG_MAX)
 941		return LONG_MAX;
 942
 943	/*
 944	 * This may be called on clean wb's and proportional distribution
 945	 * may not make sense, just use the original @nr_pages in those
 946	 * cases.  In general, we wanna err on the side of writing more.
 947	 */
 948	if (!tot_bw || this_bw >= tot_bw)
 949		return nr_pages;
 950	else
 951		return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw);
 952}
 953
 954/**
 955 * bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi
 956 * @bdi: target backing_dev_info
 957 * @base_work: wb_writeback_work to issue
 958 * @skip_if_busy: skip wb's which already have writeback in progress
 959 *
 960 * Split and issue @base_work to all wb's (bdi_writeback's) of @bdi which
 961 * have dirty inodes.  If @base_work->nr_page isn't %LONG_MAX, it's
 962 * distributed to the busy wbs according to each wb's proportion in the
 963 * total active write bandwidth of @bdi.
 964 */
 965static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
 966				  struct wb_writeback_work *base_work,
 967				  bool skip_if_busy)
 968{
 969	struct bdi_writeback *last_wb = NULL;
 970	struct bdi_writeback *wb = list_entry(&bdi->wb_list,
 971					      struct bdi_writeback, bdi_node);
 972
 973	might_sleep();
 974restart:
 975	rcu_read_lock();
 976	list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
 977		DEFINE_WB_COMPLETION(fallback_work_done, bdi);
 978		struct wb_writeback_work fallback_work;
 979		struct wb_writeback_work *work;
 980		long nr_pages;
 981
 982		if (last_wb) {
 983			wb_put(last_wb);
 984			last_wb = NULL;
 985		}
 986
 987		/* SYNC_ALL writes out I_DIRTY_TIME too */
 988		if (!wb_has_dirty_io(wb) &&
 989		    (base_work->sync_mode == WB_SYNC_NONE ||
 990		     list_empty(&wb->b_dirty_time)))
 991			continue;
 992		if (skip_if_busy && writeback_in_progress(wb))
 993			continue;
 994
 995		nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages);
 996
 997		work = kmalloc(sizeof(*work), GFP_ATOMIC);
 998		if (work) {
 999			*work = *base_work;
1000			work->nr_pages = nr_pages;
1001			work->auto_free = 1;
1002			wb_queue_work(wb, work);
1003			continue;
1004		}
1005
1006		/* alloc failed, execute synchronously using on-stack fallback */
1007		work = &fallback_work;
1008		*work = *base_work;
1009		work->nr_pages = nr_pages;
1010		work->auto_free = 0;
1011		work->done = &fallback_work_done;
1012
1013		wb_queue_work(wb, work);
1014
1015		/*
1016		 * Pin @wb so that it stays on @bdi->wb_list.  This allows
1017		 * continuing iteration from @wb after dropping and
1018		 * regrabbing rcu read lock.
1019		 */
1020		wb_get(wb);
1021		last_wb = wb;
1022
1023		rcu_read_unlock();
1024		wb_wait_for_completion(&fallback_work_done);
1025		goto restart;
1026	}
1027	rcu_read_unlock();
1028
1029	if (last_wb)
1030		wb_put(last_wb);
1031}
1032
1033/**
1034 * cgroup_writeback_by_id - initiate cgroup writeback from bdi and memcg IDs
1035 * @bdi_id: target bdi id
1036 * @memcg_id: target memcg css id
1037 * @nr: number of pages to write, 0 for best-effort dirty flushing
1038 * @reason: reason why some writeback work initiated
1039 * @done: target wb_completion
1040 *
1041 * Initiate flush of the bdi_writeback identified by @bdi_id and @memcg_id
1042 * with the specified parameters.
1043 */
1044int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, unsigned long nr,
1045			   enum wb_reason reason, struct wb_completion *done)
1046{
1047	struct backing_dev_info *bdi;
1048	struct cgroup_subsys_state *memcg_css;
1049	struct bdi_writeback *wb;
1050	struct wb_writeback_work *work;
 
1051	int ret;
1052
1053	/* lookup bdi and memcg */
1054	bdi = bdi_get_by_id(bdi_id);
1055	if (!bdi)
1056		return -ENOENT;
1057
1058	rcu_read_lock();
1059	memcg_css = css_from_id(memcg_id, &memory_cgrp_subsys);
1060	if (memcg_css && !css_tryget(memcg_css))
1061		memcg_css = NULL;
1062	rcu_read_unlock();
1063	if (!memcg_css) {
1064		ret = -ENOENT;
1065		goto out_bdi_put;
1066	}
1067
1068	/*
1069	 * And find the associated wb.  If the wb isn't there already
1070	 * there's nothing to flush, don't create one.
1071	 */
1072	wb = wb_get_lookup(bdi, memcg_css);
1073	if (!wb) {
1074		ret = -ENOENT;
1075		goto out_css_put;
1076	}
1077
1078	/*
1079	 * If @nr is zero, the caller is attempting to write out most of
1080	 * the currently dirty pages.  Let's take the current dirty page
1081	 * count and inflate it by 25% which should be large enough to
1082	 * flush out most dirty pages while avoiding getting livelocked by
1083	 * concurrent dirtiers.
 
 
 
1084	 */
1085	if (!nr) {
1086		unsigned long filepages, headroom, dirty, writeback;
1087
1088		mem_cgroup_wb_stats(wb, &filepages, &headroom, &dirty,
1089				      &writeback);
1090		nr = dirty * 10 / 8;
1091	}
1092
1093	/* issue the writeback work */
1094	work = kzalloc(sizeof(*work), GFP_NOWAIT | __GFP_NOWARN);
1095	if (work) {
1096		work->nr_pages = nr;
1097		work->sync_mode = WB_SYNC_NONE;
1098		work->range_cyclic = 1;
1099		work->reason = reason;
1100		work->done = done;
1101		work->auto_free = 1;
1102		wb_queue_work(wb, work);
1103		ret = 0;
1104	} else {
1105		ret = -ENOMEM;
1106	}
1107
1108	wb_put(wb);
1109out_css_put:
1110	css_put(memcg_css);
1111out_bdi_put:
1112	bdi_put(bdi);
1113	return ret;
1114}
1115
1116/**
1117 * cgroup_writeback_umount - flush inode wb switches for umount
1118 *
1119 * This function is called when a super_block is about to be destroyed and
1120 * flushes in-flight inode wb switches.  An inode wb switch goes through
1121 * RCU and then workqueue, so the two need to be flushed in order to ensure
1122 * that all previously scheduled switches are finished.  As wb switches are
1123 * rare occurrences and synchronize_rcu() can take a while, perform
1124 * flushing iff wb switches are in flight.
1125 */
1126void cgroup_writeback_umount(void)
1127{
1128	/*
1129	 * SB_ACTIVE should be reliably cleared before checking
1130	 * isw_nr_in_flight, see generic_shutdown_super().
1131	 */
1132	smp_mb();
1133
1134	if (atomic_read(&isw_nr_in_flight)) {
1135		/*
1136		 * Use rcu_barrier() to wait for all pending callbacks to
1137		 * ensure that all in-flight wb switches are in the workqueue.
1138		 */
1139		rcu_barrier();
1140		flush_workqueue(isw_wq);
1141	}
1142}
1143
1144static int __init cgroup_writeback_init(void)
1145{
1146	isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0);
1147	if (!isw_wq)
1148		return -ENOMEM;
1149	return 0;
1150}
1151fs_initcall(cgroup_writeback_init);
1152
1153#else	/* CONFIG_CGROUP_WRITEBACK */
1154
1155static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
1156static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
1157
1158static void inode_cgwb_move_to_attached(struct inode *inode,
1159					struct bdi_writeback *wb)
1160{
1161	assert_spin_locked(&wb->list_lock);
1162	assert_spin_locked(&inode->i_lock);
 
1163
1164	inode->i_state &= ~I_SYNC_QUEUED;
1165	list_del_init(&inode->i_io_list);
1166	wb_io_lists_depopulated(wb);
1167}
1168
1169static struct bdi_writeback *
1170locked_inode_to_wb_and_lock_list(struct inode *inode)
1171	__releases(&inode->i_lock)
1172	__acquires(&wb->list_lock)
1173{
1174	struct bdi_writeback *wb = inode_to_wb(inode);
1175
1176	spin_unlock(&inode->i_lock);
1177	spin_lock(&wb->list_lock);
1178	return wb;
1179}
1180
1181static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
1182	__acquires(&wb->list_lock)
1183{
1184	struct bdi_writeback *wb = inode_to_wb(inode);
1185
1186	spin_lock(&wb->list_lock);
1187	return wb;
1188}
1189
1190static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
1191{
1192	return nr_pages;
1193}
1194
1195static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
1196				  struct wb_writeback_work *base_work,
1197				  bool skip_if_busy)
1198{
1199	might_sleep();
1200
1201	if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) {
1202		base_work->auto_free = 0;
1203		wb_queue_work(&bdi->wb, base_work);
1204	}
1205}
1206
1207#endif	/* CONFIG_CGROUP_WRITEBACK */
1208
1209/*
1210 * Add in the number of potentially dirty inodes, because each inode
1211 * write can dirty pagecache in the underlying blockdev.
1212 */
1213static unsigned long get_nr_dirty_pages(void)
1214{
1215	return global_node_page_state(NR_FILE_DIRTY) +
1216		get_nr_dirty_inodes();
1217}
1218
1219static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason)
1220{
1221	if (!wb_has_dirty_io(wb))
1222		return;
1223
1224	/*
1225	 * All callers of this function want to start writeback of all
1226	 * dirty pages. Places like vmscan can call this at a very
1227	 * high frequency, causing pointless allocations of tons of
1228	 * work items and keeping the flusher threads busy retrieving
1229	 * that work. Ensure that we only allow one of them pending and
1230	 * inflight at the time.
1231	 */
1232	if (test_bit(WB_start_all, &wb->state) ||
1233	    test_and_set_bit(WB_start_all, &wb->state))
1234		return;
1235
1236	wb->start_all_reason = reason;
1237	wb_wakeup(wb);
1238}
1239
1240/**
1241 * wb_start_background_writeback - start background writeback
1242 * @wb: bdi_writback to write from
1243 *
1244 * Description:
1245 *   This makes sure WB_SYNC_NONE background writeback happens. When
1246 *   this function returns, it is only guaranteed that for given wb
1247 *   some IO is happening if we are over background dirty threshold.
1248 *   Caller need not hold sb s_umount semaphore.
1249 */
1250void wb_start_background_writeback(struct bdi_writeback *wb)
1251{
1252	/*
1253	 * We just wake up the flusher thread. It will perform background
1254	 * writeback as soon as there is no other work to do.
1255	 */
1256	trace_writeback_wake_background(wb);
1257	wb_wakeup(wb);
1258}
1259
1260/*
1261 * Remove the inode from the writeback list it is on.
1262 */
1263void inode_io_list_del(struct inode *inode)
1264{
1265	struct bdi_writeback *wb;
1266
1267	wb = inode_to_wb_and_lock_list(inode);
1268	spin_lock(&inode->i_lock);
1269
1270	inode->i_state &= ~I_SYNC_QUEUED;
1271	list_del_init(&inode->i_io_list);
1272	wb_io_lists_depopulated(wb);
1273
1274	spin_unlock(&inode->i_lock);
1275	spin_unlock(&wb->list_lock);
1276}
1277EXPORT_SYMBOL(inode_io_list_del);
1278
1279/*
1280 * mark an inode as under writeback on the sb
1281 */
1282void sb_mark_inode_writeback(struct inode *inode)
1283{
1284	struct super_block *sb = inode->i_sb;
1285	unsigned long flags;
1286
1287	if (list_empty(&inode->i_wb_list)) {
1288		spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
1289		if (list_empty(&inode->i_wb_list)) {
1290			list_add_tail(&inode->i_wb_list, &sb->s_inodes_wb);
1291			trace_sb_mark_inode_writeback(inode);
1292		}
1293		spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
1294	}
1295}
1296
1297/*
1298 * clear an inode as under writeback on the sb
1299 */
1300void sb_clear_inode_writeback(struct inode *inode)
1301{
1302	struct super_block *sb = inode->i_sb;
1303	unsigned long flags;
1304
1305	if (!list_empty(&inode->i_wb_list)) {
1306		spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
1307		if (!list_empty(&inode->i_wb_list)) {
1308			list_del_init(&inode->i_wb_list);
1309			trace_sb_clear_inode_writeback(inode);
1310		}
1311		spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
1312	}
1313}
1314
1315/*
1316 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
1317 * furthest end of its superblock's dirty-inode list.
1318 *
1319 * Before stamping the inode's ->dirtied_when, we check to see whether it is
1320 * already the most-recently-dirtied inode on the b_dirty list.  If that is
1321 * the case then the inode must have been redirtied while it was being written
1322 * out and we don't reset its dirtied_when.
1323 */
1324static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
1325{
1326	assert_spin_locked(&inode->i_lock);
1327
 
 
 
 
 
 
 
 
 
 
 
1328	if (!list_empty(&wb->b_dirty)) {
1329		struct inode *tail;
1330
1331		tail = wb_inode(wb->b_dirty.next);
1332		if (time_before(inode->dirtied_when, tail->dirtied_when))
1333			inode->dirtied_when = jiffies;
1334	}
1335	inode_io_list_move_locked(inode, wb, &wb->b_dirty);
1336	inode->i_state &= ~I_SYNC_QUEUED;
1337}
1338
1339static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
1340{
1341	spin_lock(&inode->i_lock);
1342	redirty_tail_locked(inode, wb);
1343	spin_unlock(&inode->i_lock);
1344}
1345
1346/*
1347 * requeue inode for re-scanning after bdi->b_io list is exhausted.
1348 */
1349static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
1350{
1351	inode_io_list_move_locked(inode, wb, &wb->b_more_io);
1352}
1353
1354static void inode_sync_complete(struct inode *inode)
1355{
1356	inode->i_state &= ~I_SYNC;
1357	/* If inode is clean an unused, put it into LRU now... */
1358	inode_add_lru(inode);
1359	/* Waiters must see I_SYNC cleared before being woken up */
1360	smp_mb();
1361	wake_up_bit(&inode->i_state, __I_SYNC);
1362}
1363
1364static bool inode_dirtied_after(struct inode *inode, unsigned long t)
1365{
1366	bool ret = time_after(inode->dirtied_when, t);
1367#ifndef CONFIG_64BIT
1368	/*
1369	 * For inodes being constantly redirtied, dirtied_when can get stuck.
1370	 * It _appears_ to be in the future, but is actually in distant past.
1371	 * This test is necessary to prevent such wrapped-around relative times
1372	 * from permanently stopping the whole bdi writeback.
1373	 */
1374	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
1375#endif
1376	return ret;
1377}
1378
1379#define EXPIRE_DIRTY_ATIME 0x0001
1380
1381/*
1382 * Move expired (dirtied before dirtied_before) dirty inodes from
1383 * @delaying_queue to @dispatch_queue.
1384 */
1385static int move_expired_inodes(struct list_head *delaying_queue,
1386			       struct list_head *dispatch_queue,
1387			       unsigned long dirtied_before)
1388{
1389	LIST_HEAD(tmp);
1390	struct list_head *pos, *node;
1391	struct super_block *sb = NULL;
1392	struct inode *inode;
1393	int do_sb_sort = 0;
1394	int moved = 0;
1395
1396	while (!list_empty(delaying_queue)) {
1397		inode = wb_inode(delaying_queue->prev);
1398		if (inode_dirtied_after(inode, dirtied_before))
1399			break;
 
1400		list_move(&inode->i_io_list, &tmp);
1401		moved++;
1402		spin_lock(&inode->i_lock);
1403		inode->i_state |= I_SYNC_QUEUED;
1404		spin_unlock(&inode->i_lock);
1405		if (sb_is_blkdev_sb(inode->i_sb))
1406			continue;
1407		if (sb && sb != inode->i_sb)
1408			do_sb_sort = 1;
1409		sb = inode->i_sb;
1410	}
1411
1412	/* just one sb in list, splice to dispatch_queue and we're done */
1413	if (!do_sb_sort) {
1414		list_splice(&tmp, dispatch_queue);
1415		goto out;
1416	}
1417
1418	/* Move inodes from one superblock together */
 
 
 
 
 
1419	while (!list_empty(&tmp)) {
1420		sb = wb_inode(tmp.prev)->i_sb;
1421		list_for_each_prev_safe(pos, node, &tmp) {
1422			inode = wb_inode(pos);
1423			if (inode->i_sb == sb)
1424				list_move(&inode->i_io_list, dispatch_queue);
1425		}
1426	}
1427out:
1428	return moved;
1429}
1430
1431/*
1432 * Queue all expired dirty inodes for io, eldest first.
1433 * Before
1434 *         newly dirtied     b_dirty    b_io    b_more_io
1435 *         =============>    gf         edc     BA
1436 * After
1437 *         newly dirtied     b_dirty    b_io    b_more_io
1438 *         =============>    g          fBAedc
1439 *                                           |
1440 *                                           +--> dequeue for IO
1441 */
1442static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work,
1443		     unsigned long dirtied_before)
1444{
1445	int moved;
1446	unsigned long time_expire_jif = dirtied_before;
1447
1448	assert_spin_locked(&wb->list_lock);
1449	list_splice_init(&wb->b_more_io, &wb->b_io);
1450	moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before);
1451	if (!work->for_sync)
1452		time_expire_jif = jiffies - dirtytime_expire_interval * HZ;
1453	moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
1454				     time_expire_jif);
1455	if (moved)
1456		wb_io_lists_populated(wb);
1457	trace_writeback_queue_io(wb, work, dirtied_before, moved);
1458}
1459
1460static int write_inode(struct inode *inode, struct writeback_control *wbc)
1461{
1462	int ret;
1463
1464	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
1465		trace_writeback_write_inode_start(inode, wbc);
1466		ret = inode->i_sb->s_op->write_inode(inode, wbc);
1467		trace_writeback_write_inode(inode, wbc);
1468		return ret;
1469	}
1470	return 0;
1471}
1472
1473/*
1474 * Wait for writeback on an inode to complete. Called with i_lock held.
1475 * Caller must make sure inode cannot go away when we drop i_lock.
1476 */
1477static void __inode_wait_for_writeback(struct inode *inode)
1478	__releases(inode->i_lock)
1479	__acquires(inode->i_lock)
1480{
1481	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
1482	wait_queue_head_t *wqh;
1483
1484	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
1485	while (inode->i_state & I_SYNC) {
1486		spin_unlock(&inode->i_lock);
1487		__wait_on_bit(wqh, &wq, bit_wait,
1488			      TASK_UNINTERRUPTIBLE);
1489		spin_lock(&inode->i_lock);
1490	}
1491}
1492
1493/*
1494 * Wait for writeback on an inode to complete. Caller must have inode pinned.
1495 */
1496void inode_wait_for_writeback(struct inode *inode)
1497{
1498	spin_lock(&inode->i_lock);
1499	__inode_wait_for_writeback(inode);
1500	spin_unlock(&inode->i_lock);
1501}
1502
1503/*
1504 * Sleep until I_SYNC is cleared. This function must be called with i_lock
1505 * held and drops it. It is aimed for callers not holding any inode reference
1506 * so once i_lock is dropped, inode can go away.
1507 */
1508static void inode_sleep_on_writeback(struct inode *inode)
1509	__releases(inode->i_lock)
1510{
1511	DEFINE_WAIT(wait);
1512	wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
1513	int sleep;
1514
1515	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1516	sleep = inode->i_state & I_SYNC;
1517	spin_unlock(&inode->i_lock);
1518	if (sleep)
1519		schedule();
1520	finish_wait(wqh, &wait);
1521}
1522
1523/*
1524 * Find proper writeback list for the inode depending on its current state and
1525 * possibly also change of its state while we were doing writeback.  Here we
1526 * handle things such as livelock prevention or fairness of writeback among
1527 * inodes. This function can be called only by flusher thread - noone else
1528 * processes all inodes in writeback lists and requeueing inodes behind flusher
1529 * thread's back can have unexpected consequences.
1530 */
1531static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
1532			  struct writeback_control *wbc)
1533{
1534	if (inode->i_state & I_FREEING)
1535		return;
1536
1537	/*
1538	 * Sync livelock prevention. Each inode is tagged and synced in one
1539	 * shot. If still dirty, it will be redirty_tail()'ed below.  Update
1540	 * the dirty time to prevent enqueue and sync it again.
1541	 */
1542	if ((inode->i_state & I_DIRTY) &&
1543	    (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
1544		inode->dirtied_when = jiffies;
1545
1546	if (wbc->pages_skipped) {
1547		/*
1548		 * writeback is not making progress due to locked
1549		 * buffers. Skip this inode for now.
1550		 */
1551		redirty_tail_locked(inode, wb);
1552		return;
1553	}
1554
1555	if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
1556		/*
1557		 * We didn't write back all the pages.  nfs_writepages()
1558		 * sometimes bales out without doing anything.
1559		 */
1560		if (wbc->nr_to_write <= 0) {
1561			/* Slice used up. Queue for next turn. */
1562			requeue_io(inode, wb);
1563		} else {
1564			/*
1565			 * Writeback blocked by something other than
1566			 * congestion. Delay the inode for some time to
1567			 * avoid spinning on the CPU (100% iowait)
1568			 * retrying writeback of the dirty page/inode
1569			 * that cannot be performed immediately.
1570			 */
1571			redirty_tail_locked(inode, wb);
1572		}
1573	} else if (inode->i_state & I_DIRTY) {
1574		/*
1575		 * Filesystems can dirty the inode during writeback operations,
1576		 * such as delayed allocation during submission or metadata
1577		 * updates after data IO completion.
1578		 */
1579		redirty_tail_locked(inode, wb);
1580	} else if (inode->i_state & I_DIRTY_TIME) {
1581		inode->dirtied_when = jiffies;
1582		inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
1583		inode->i_state &= ~I_SYNC_QUEUED;
1584	} else {
1585		/* The inode is clean. Remove from writeback lists. */
1586		inode_cgwb_move_to_attached(inode, wb);
1587	}
1588}
1589
1590/*
1591 * Write out an inode and its dirty pages (or some of its dirty pages, depending
1592 * on @wbc->nr_to_write), and clear the relevant dirty flags from i_state.
1593 *
1594 * This doesn't remove the inode from the writeback list it is on, except
1595 * potentially to move it from b_dirty_time to b_dirty due to timestamp
1596 * expiration.  The caller is otherwise responsible for writeback list handling.
1597 *
1598 * The caller is also responsible for setting the I_SYNC flag beforehand and
1599 * calling inode_sync_complete() to clear it afterwards.
1600 */
1601static int
1602__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
1603{
1604	struct address_space *mapping = inode->i_mapping;
1605	long nr_to_write = wbc->nr_to_write;
1606	unsigned dirty;
1607	int ret;
1608
1609	WARN_ON(!(inode->i_state & I_SYNC));
1610
1611	trace_writeback_single_inode_start(inode, wbc, nr_to_write);
1612
1613	ret = do_writepages(mapping, wbc);
1614
1615	/*
1616	 * Make sure to wait on the data before writing out the metadata.
1617	 * This is important for filesystems that modify metadata on data
1618	 * I/O completion. We don't do it for sync(2) writeback because it has a
1619	 * separate, external IO completion path and ->sync_fs for guaranteeing
1620	 * inode metadata is written back correctly.
1621	 */
1622	if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
1623		int err = filemap_fdatawait(mapping);
1624		if (ret == 0)
1625			ret = err;
1626	}
1627
1628	/*
1629	 * If the inode has dirty timestamps and we need to write them, call
1630	 * mark_inode_dirty_sync() to notify the filesystem about it and to
1631	 * change I_DIRTY_TIME into I_DIRTY_SYNC.
1632	 */
1633	if ((inode->i_state & I_DIRTY_TIME) &&
1634	    (wbc->sync_mode == WB_SYNC_ALL ||
1635	     time_after(jiffies, inode->dirtied_time_when +
1636			dirtytime_expire_interval * HZ))) {
1637		trace_writeback_lazytime(inode);
1638		mark_inode_dirty_sync(inode);
1639	}
1640
1641	/*
1642	 * Get and clear the dirty flags from i_state.  This needs to be done
1643	 * after calling writepages because some filesystems may redirty the
1644	 * inode during writepages due to delalloc.  It also needs to be done
1645	 * after handling timestamp expiration, as that may dirty the inode too.
1646	 */
1647	spin_lock(&inode->i_lock);
1648	dirty = inode->i_state & I_DIRTY;
1649	inode->i_state &= ~dirty;
1650
1651	/*
1652	 * Paired with smp_mb() in __mark_inode_dirty().  This allows
1653	 * __mark_inode_dirty() to test i_state without grabbing i_lock -
1654	 * either they see the I_DIRTY bits cleared or we see the dirtied
1655	 * inode.
1656	 *
1657	 * I_DIRTY_PAGES is always cleared together above even if @mapping
1658	 * still has dirty pages.  The flag is reinstated after smp_mb() if
1659	 * necessary.  This guarantees that either __mark_inode_dirty()
1660	 * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
1661	 */
1662	smp_mb();
1663
1664	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
1665		inode->i_state |= I_DIRTY_PAGES;
 
 
 
 
 
 
 
1666
1667	spin_unlock(&inode->i_lock);
1668
1669	/* Don't write the inode if only I_DIRTY_PAGES was set */
1670	if (dirty & ~I_DIRTY_PAGES) {
1671		int err = write_inode(inode, wbc);
1672		if (ret == 0)
1673			ret = err;
1674	}
 
1675	trace_writeback_single_inode(inode, wbc, nr_to_write);
1676	return ret;
1677}
1678
1679/*
1680 * Write out an inode's dirty data and metadata on-demand, i.e. separately from
1681 * the regular batched writeback done by the flusher threads in
1682 * writeback_sb_inodes().  @wbc controls various aspects of the write, such as
1683 * whether it is a data-integrity sync (%WB_SYNC_ALL) or not (%WB_SYNC_NONE).
1684 *
1685 * To prevent the inode from going away, either the caller must have a reference
1686 * to the inode, or the inode must have I_WILL_FREE or I_FREEING set.
1687 */
1688static int writeback_single_inode(struct inode *inode,
1689				  struct writeback_control *wbc)
1690{
1691	struct bdi_writeback *wb;
1692	int ret = 0;
1693
1694	spin_lock(&inode->i_lock);
1695	if (!atomic_read(&inode->i_count))
1696		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
1697	else
1698		WARN_ON(inode->i_state & I_WILL_FREE);
1699
1700	if (inode->i_state & I_SYNC) {
1701		/*
1702		 * Writeback is already running on the inode.  For WB_SYNC_NONE,
1703		 * that's enough and we can just return.  For WB_SYNC_ALL, we
1704		 * must wait for the existing writeback to complete, then do
1705		 * writeback again if there's anything left.
1706		 */
1707		if (wbc->sync_mode != WB_SYNC_ALL)
1708			goto out;
1709		__inode_wait_for_writeback(inode);
1710	}
1711	WARN_ON(inode->i_state & I_SYNC);
1712	/*
1713	 * If the inode is already fully clean, then there's nothing to do.
1714	 *
1715	 * For data-integrity syncs we also need to check whether any pages are
1716	 * still under writeback, e.g. due to prior WB_SYNC_NONE writeback.  If
1717	 * there are any such pages, we'll need to wait for them.
1718	 */
1719	if (!(inode->i_state & I_DIRTY_ALL) &&
1720	    (wbc->sync_mode != WB_SYNC_ALL ||
1721	     !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
1722		goto out;
1723	inode->i_state |= I_SYNC;
1724	wbc_attach_and_unlock_inode(wbc, inode);
1725
1726	ret = __writeback_single_inode(inode, wbc);
1727
1728	wbc_detach_inode(wbc);
1729
1730	wb = inode_to_wb_and_lock_list(inode);
1731	spin_lock(&inode->i_lock);
1732	/*
1733	 * If the inode is now fully clean, then it can be safely removed from
1734	 * its writeback list (if any).  Otherwise the flusher threads are
1735	 * responsible for the writeback lists.
1736	 */
1737	if (!(inode->i_state & I_DIRTY_ALL))
1738		inode_cgwb_move_to_attached(inode, wb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1739	spin_unlock(&wb->list_lock);
1740	inode_sync_complete(inode);
1741out:
1742	spin_unlock(&inode->i_lock);
1743	return ret;
1744}
1745
1746static long writeback_chunk_size(struct bdi_writeback *wb,
1747				 struct wb_writeback_work *work)
1748{
1749	long pages;
1750
1751	/*
1752	 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
1753	 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
1754	 * here avoids calling into writeback_inodes_wb() more than once.
1755	 *
1756	 * The intended call sequence for WB_SYNC_ALL writeback is:
1757	 *
1758	 *      wb_writeback()
1759	 *          writeback_sb_inodes()       <== called only once
1760	 *              write_cache_pages()     <== called once for each inode
1761	 *                   (quickly) tag currently dirty pages
1762	 *                   (maybe slowly) sync all tagged pages
1763	 */
1764	if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
1765		pages = LONG_MAX;
1766	else {
1767		pages = min(wb->avg_write_bandwidth / 2,
1768			    global_wb_domain.dirty_limit / DIRTY_SCOPE);
1769		pages = min(pages, work->nr_pages);
1770		pages = round_down(pages + MIN_WRITEBACK_PAGES,
1771				   MIN_WRITEBACK_PAGES);
1772	}
1773
1774	return pages;
1775}
1776
1777/*
1778 * Write a portion of b_io inodes which belong to @sb.
1779 *
1780 * Return the number of pages and/or inodes written.
1781 *
1782 * NOTE! This is called with wb->list_lock held, and will
1783 * unlock and relock that for each inode it ends up doing
1784 * IO for.
1785 */
1786static long writeback_sb_inodes(struct super_block *sb,
1787				struct bdi_writeback *wb,
1788				struct wb_writeback_work *work)
1789{
1790	struct writeback_control wbc = {
1791		.sync_mode		= work->sync_mode,
1792		.tagged_writepages	= work->tagged_writepages,
1793		.for_kupdate		= work->for_kupdate,
1794		.for_background		= work->for_background,
1795		.for_sync		= work->for_sync,
1796		.range_cyclic		= work->range_cyclic,
1797		.range_start		= 0,
1798		.range_end		= LLONG_MAX,
1799	};
1800	unsigned long start_time = jiffies;
1801	long write_chunk;
1802	long wrote = 0;  /* count both pages and inodes */
1803
1804	while (!list_empty(&wb->b_io)) {
1805		struct inode *inode = wb_inode(wb->b_io.prev);
1806		struct bdi_writeback *tmp_wb;
 
1807
1808		if (inode->i_sb != sb) {
1809			if (work->sb) {
1810				/*
1811				 * We only want to write back data for this
1812				 * superblock, move all inodes not belonging
1813				 * to it back onto the dirty list.
1814				 */
1815				redirty_tail(inode, wb);
1816				continue;
1817			}
1818
1819			/*
1820			 * The inode belongs to a different superblock.
1821			 * Bounce back to the caller to unpin this and
1822			 * pin the next superblock.
1823			 */
1824			break;
1825		}
1826
1827		/*
1828		 * Don't bother with new inodes or inodes being freed, first
1829		 * kind does not need periodic writeout yet, and for the latter
1830		 * kind writeout is handled by the freer.
1831		 */
1832		spin_lock(&inode->i_lock);
1833		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
1834			redirty_tail_locked(inode, wb);
1835			spin_unlock(&inode->i_lock);
1836			continue;
1837		}
1838		if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
1839			/*
1840			 * If this inode is locked for writeback and we are not
1841			 * doing writeback-for-data-integrity, move it to
1842			 * b_more_io so that writeback can proceed with the
1843			 * other inodes on s_io.
1844			 *
1845			 * We'll have another go at writing back this inode
1846			 * when we completed a full scan of b_io.
1847			 */
1848			spin_unlock(&inode->i_lock);
1849			requeue_io(inode, wb);
 
1850			trace_writeback_sb_inodes_requeue(inode);
1851			continue;
1852		}
1853		spin_unlock(&wb->list_lock);
1854
1855		/*
1856		 * We already requeued the inode if it had I_SYNC set and we
1857		 * are doing WB_SYNC_NONE writeback. So this catches only the
1858		 * WB_SYNC_ALL case.
1859		 */
1860		if (inode->i_state & I_SYNC) {
1861			/* Wait for I_SYNC. This function drops i_lock... */
1862			inode_sleep_on_writeback(inode);
1863			/* Inode may be gone, start again */
1864			spin_lock(&wb->list_lock);
1865			continue;
1866		}
1867		inode->i_state |= I_SYNC;
1868		wbc_attach_and_unlock_inode(&wbc, inode);
1869
1870		write_chunk = writeback_chunk_size(wb, work);
1871		wbc.nr_to_write = write_chunk;
1872		wbc.pages_skipped = 0;
1873
1874		/*
1875		 * We use I_SYNC to pin the inode in memory. While it is set
1876		 * evict_inode() will wait so the inode cannot be freed.
1877		 */
1878		__writeback_single_inode(inode, &wbc);
1879
1880		wbc_detach_inode(&wbc);
1881		work->nr_pages -= write_chunk - wbc.nr_to_write;
1882		wrote += write_chunk - wbc.nr_to_write;
 
 
1883
1884		if (need_resched()) {
1885			/*
1886			 * We're trying to balance between building up a nice
1887			 * long list of IOs to improve our merge rate, and
1888			 * getting those IOs out quickly for anyone throttling
1889			 * in balance_dirty_pages().  cond_resched() doesn't
1890			 * unplug, so get our IOs out the door before we
1891			 * give up the CPU.
1892			 */
1893			blk_flush_plug(current);
1894			cond_resched();
1895		}
1896
1897		/*
1898		 * Requeue @inode if still dirty.  Be careful as @inode may
1899		 * have been switched to another wb in the meantime.
1900		 */
1901		tmp_wb = inode_to_wb_and_lock_list(inode);
1902		spin_lock(&inode->i_lock);
1903		if (!(inode->i_state & I_DIRTY_ALL))
1904			wrote++;
1905		requeue_inode(inode, tmp_wb, &wbc);
1906		inode_sync_complete(inode);
1907		spin_unlock(&inode->i_lock);
1908
1909		if (unlikely(tmp_wb != wb)) {
1910			spin_unlock(&tmp_wb->list_lock);
1911			spin_lock(&wb->list_lock);
1912		}
1913
1914		/*
1915		 * bail out to wb_writeback() often enough to check
1916		 * background threshold and other termination conditions.
1917		 */
1918		if (wrote) {
1919			if (time_is_before_jiffies(start_time + HZ / 10UL))
1920				break;
1921			if (work->nr_pages <= 0)
1922				break;
1923		}
1924	}
1925	return wrote;
1926}
1927
1928static long __writeback_inodes_wb(struct bdi_writeback *wb,
1929				  struct wb_writeback_work *work)
1930{
1931	unsigned long start_time = jiffies;
1932	long wrote = 0;
1933
1934	while (!list_empty(&wb->b_io)) {
1935		struct inode *inode = wb_inode(wb->b_io.prev);
1936		struct super_block *sb = inode->i_sb;
1937
1938		if (!trylock_super(sb)) {
1939			/*
1940			 * trylock_super() may fail consistently due to
1941			 * s_umount being grabbed by someone else. Don't use
1942			 * requeue_io() to avoid busy retrying the inode/sb.
1943			 */
1944			redirty_tail(inode, wb);
1945			continue;
1946		}
1947		wrote += writeback_sb_inodes(sb, wb, work);
1948		up_read(&sb->s_umount);
1949
1950		/* refer to the same tests at the end of writeback_sb_inodes */
1951		if (wrote) {
1952			if (time_is_before_jiffies(start_time + HZ / 10UL))
1953				break;
1954			if (work->nr_pages <= 0)
1955				break;
1956		}
1957	}
1958	/* Leave any unwritten inodes on b_io */
1959	return wrote;
1960}
1961
1962static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
1963				enum wb_reason reason)
1964{
1965	struct wb_writeback_work work = {
1966		.nr_pages	= nr_pages,
1967		.sync_mode	= WB_SYNC_NONE,
1968		.range_cyclic	= 1,
1969		.reason		= reason,
1970	};
1971	struct blk_plug plug;
1972
1973	blk_start_plug(&plug);
1974	spin_lock(&wb->list_lock);
1975	if (list_empty(&wb->b_io))
1976		queue_io(wb, &work, jiffies);
1977	__writeback_inodes_wb(wb, &work);
1978	spin_unlock(&wb->list_lock);
1979	blk_finish_plug(&plug);
1980
1981	return nr_pages - work.nr_pages;
1982}
1983
1984/*
1985 * Explicit flushing or periodic writeback of "old" data.
1986 *
1987 * Define "old": the first time one of an inode's pages is dirtied, we mark the
1988 * dirtying-time in the inode's address_space.  So this periodic writeback code
1989 * just walks the superblock inode list, writing back any inodes which are
1990 * older than a specific point in time.
1991 *
1992 * Try to run once per dirty_writeback_interval.  But if a writeback event
1993 * takes longer than a dirty_writeback_interval interval, then leave a
1994 * one-second gap.
1995 *
1996 * dirtied_before takes precedence over nr_to_write.  So we'll only write back
1997 * all dirty pages if they are all attached to "old" mappings.
1998 */
1999static long wb_writeback(struct bdi_writeback *wb,
2000			 struct wb_writeback_work *work)
2001{
2002	unsigned long wb_start = jiffies;
2003	long nr_pages = work->nr_pages;
2004	unsigned long dirtied_before = jiffies;
2005	struct inode *inode;
2006	long progress;
2007	struct blk_plug plug;
2008
2009	blk_start_plug(&plug);
2010	spin_lock(&wb->list_lock);
2011	for (;;) {
2012		/*
2013		 * Stop writeback when nr_pages has been consumed
2014		 */
2015		if (work->nr_pages <= 0)
2016			break;
2017
2018		/*
2019		 * Background writeout and kupdate-style writeback may
2020		 * run forever. Stop them if there is other work to do
2021		 * so that e.g. sync can proceed. They'll be restarted
2022		 * after the other works are all done.
2023		 */
2024		if ((work->for_background || work->for_kupdate) &&
2025		    !list_empty(&wb->work_list))
2026			break;
2027
2028		/*
2029		 * For background writeout, stop when we are below the
2030		 * background dirty threshold
2031		 */
2032		if (work->for_background && !wb_over_bg_thresh(wb))
2033			break;
2034
2035		/*
2036		 * Kupdate and background works are special and we want to
2037		 * include all inodes that need writing. Livelock avoidance is
2038		 * handled by these works yielding to any other work so we are
2039		 * safe.
2040		 */
2041		if (work->for_kupdate) {
2042			dirtied_before = jiffies -
2043				msecs_to_jiffies(dirty_expire_interval * 10);
2044		} else if (work->for_background)
2045			dirtied_before = jiffies;
2046
2047		trace_writeback_start(wb, work);
2048		if (list_empty(&wb->b_io))
2049			queue_io(wb, work, dirtied_before);
2050		if (work->sb)
2051			progress = writeback_sb_inodes(work->sb, wb, work);
2052		else
2053			progress = __writeback_inodes_wb(wb, work);
2054		trace_writeback_written(wb, work);
2055
2056		wb_update_bandwidth(wb, wb_start);
2057
2058		/*
2059		 * Did we write something? Try for more
2060		 *
2061		 * Dirty inodes are moved to b_io for writeback in batches.
2062		 * The completion of the current batch does not necessarily
2063		 * mean the overall work is done. So we keep looping as long
2064		 * as made some progress on cleaning pages or inodes.
2065		 */
2066		if (progress)
2067			continue;
2068		/*
2069		 * No more inodes for IO, bail
2070		 */
2071		if (list_empty(&wb->b_more_io))
2072			break;
2073		/*
2074		 * Nothing written. Wait for some inode to
2075		 * become available for writeback. Otherwise
2076		 * we'll just busyloop.
2077		 */
2078		trace_writeback_wait(wb, work);
2079		inode = wb_inode(wb->b_more_io.prev);
2080		spin_lock(&inode->i_lock);
2081		spin_unlock(&wb->list_lock);
2082		/* This function drops i_lock... */
2083		inode_sleep_on_writeback(inode);
2084		spin_lock(&wb->list_lock);
2085	}
2086	spin_unlock(&wb->list_lock);
2087	blk_finish_plug(&plug);
2088
2089	return nr_pages - work->nr_pages;
2090}
2091
2092/*
2093 * Return the next wb_writeback_work struct that hasn't been processed yet.
2094 */
2095static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
2096{
2097	struct wb_writeback_work *work = NULL;
2098
2099	spin_lock_bh(&wb->work_lock);
2100	if (!list_empty(&wb->work_list)) {
2101		work = list_entry(wb->work_list.next,
2102				  struct wb_writeback_work, list);
2103		list_del_init(&work->list);
2104	}
2105	spin_unlock_bh(&wb->work_lock);
2106	return work;
2107}
2108
2109static long wb_check_background_flush(struct bdi_writeback *wb)
2110{
2111	if (wb_over_bg_thresh(wb)) {
2112
2113		struct wb_writeback_work work = {
2114			.nr_pages	= LONG_MAX,
2115			.sync_mode	= WB_SYNC_NONE,
2116			.for_background	= 1,
2117			.range_cyclic	= 1,
2118			.reason		= WB_REASON_BACKGROUND,
2119		};
2120
2121		return wb_writeback(wb, &work);
2122	}
2123
2124	return 0;
2125}
2126
2127static long wb_check_old_data_flush(struct bdi_writeback *wb)
2128{
2129	unsigned long expired;
2130	long nr_pages;
2131
2132	/*
2133	 * When set to zero, disable periodic writeback
2134	 */
2135	if (!dirty_writeback_interval)
2136		return 0;
2137
2138	expired = wb->last_old_flush +
2139			msecs_to_jiffies(dirty_writeback_interval * 10);
2140	if (time_before(jiffies, expired))
2141		return 0;
2142
2143	wb->last_old_flush = jiffies;
2144	nr_pages = get_nr_dirty_pages();
2145
2146	if (nr_pages) {
2147		struct wb_writeback_work work = {
2148			.nr_pages	= nr_pages,
2149			.sync_mode	= WB_SYNC_NONE,
2150			.for_kupdate	= 1,
2151			.range_cyclic	= 1,
2152			.reason		= WB_REASON_PERIODIC,
2153		};
2154
2155		return wb_writeback(wb, &work);
2156	}
2157
2158	return 0;
2159}
2160
2161static long wb_check_start_all(struct bdi_writeback *wb)
2162{
2163	long nr_pages;
2164
2165	if (!test_bit(WB_start_all, &wb->state))
2166		return 0;
2167
2168	nr_pages = get_nr_dirty_pages();
2169	if (nr_pages) {
2170		struct wb_writeback_work work = {
2171			.nr_pages	= wb_split_bdi_pages(wb, nr_pages),
2172			.sync_mode	= WB_SYNC_NONE,
2173			.range_cyclic	= 1,
2174			.reason		= wb->start_all_reason,
2175		};
2176
2177		nr_pages = wb_writeback(wb, &work);
2178	}
2179
2180	clear_bit(WB_start_all, &wb->state);
2181	return nr_pages;
2182}
2183
2184
2185/*
2186 * Retrieve work items and do the writeback they describe
2187 */
2188static long wb_do_writeback(struct bdi_writeback *wb)
2189{
2190	struct wb_writeback_work *work;
2191	long wrote = 0;
2192
2193	set_bit(WB_writeback_running, &wb->state);
2194	while ((work = get_next_work_item(wb)) != NULL) {
2195		trace_writeback_exec(wb, work);
2196		wrote += wb_writeback(wb, work);
2197		finish_writeback_work(wb, work);
2198	}
2199
2200	/*
2201	 * Check for a flush-everything request
2202	 */
2203	wrote += wb_check_start_all(wb);
2204
2205	/*
2206	 * Check for periodic writeback, kupdated() style
2207	 */
2208	wrote += wb_check_old_data_flush(wb);
2209	wrote += wb_check_background_flush(wb);
2210	clear_bit(WB_writeback_running, &wb->state);
2211
2212	return wrote;
2213}
2214
2215/*
2216 * Handle writeback of dirty data for the device backed by this bdi. Also
2217 * reschedules periodically and does kupdated style flushing.
2218 */
2219void wb_workfn(struct work_struct *work)
2220{
2221	struct bdi_writeback *wb = container_of(to_delayed_work(work),
2222						struct bdi_writeback, dwork);
2223	long pages_written;
2224
2225	set_worker_desc("flush-%s", bdi_dev_name(wb->bdi));
2226	current->flags |= PF_SWAPWRITE;
2227
2228	if (likely(!current_is_workqueue_rescuer() ||
2229		   !test_bit(WB_registered, &wb->state))) {
2230		/*
2231		 * The normal path.  Keep writing back @wb until its
2232		 * work_list is empty.  Note that this path is also taken
2233		 * if @wb is shutting down even when we're running off the
2234		 * rescuer as work_list needs to be drained.
2235		 */
2236		do {
2237			pages_written = wb_do_writeback(wb);
2238			trace_writeback_pages_written(pages_written);
2239		} while (!list_empty(&wb->work_list));
2240	} else {
2241		/*
2242		 * bdi_wq can't get enough workers and we're running off
2243		 * the emergency worker.  Don't hog it.  Hopefully, 1024 is
2244		 * enough for efficient IO.
2245		 */
2246		pages_written = writeback_inodes_wb(wb, 1024,
2247						    WB_REASON_FORKER_THREAD);
2248		trace_writeback_pages_written(pages_written);
2249	}
2250
2251	if (!list_empty(&wb->work_list))
2252		wb_wakeup(wb);
2253	else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
2254		wb_wakeup_delayed(wb);
2255
2256	current->flags &= ~PF_SWAPWRITE;
2257}
2258
2259/*
2260 * Start writeback of `nr_pages' pages on this bdi. If `nr_pages' is zero,
2261 * write back the whole world.
2262 */
2263static void __wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
2264					 enum wb_reason reason)
2265{
2266	struct bdi_writeback *wb;
2267
2268	if (!bdi_has_dirty_io(bdi))
2269		return;
2270
2271	list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
2272		wb_start_writeback(wb, reason);
2273}
2274
2275void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
2276				enum wb_reason reason)
2277{
2278	rcu_read_lock();
2279	__wakeup_flusher_threads_bdi(bdi, reason);
2280	rcu_read_unlock();
2281}
2282
2283/*
2284 * Wakeup the flusher threads to start writeback of all currently dirty pages
2285 */
2286void wakeup_flusher_threads(enum wb_reason reason)
2287{
2288	struct backing_dev_info *bdi;
2289
2290	/*
2291	 * If we are expecting writeback progress we must submit plugged IO.
2292	 */
2293	if (blk_needs_flush_plug(current))
2294		blk_schedule_flush_plug(current);
2295
2296	rcu_read_lock();
2297	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
2298		__wakeup_flusher_threads_bdi(bdi, reason);
2299	rcu_read_unlock();
2300}
2301
2302/*
2303 * Wake up bdi's periodically to make sure dirtytime inodes gets
2304 * written back periodically.  We deliberately do *not* check the
2305 * b_dirtytime list in wb_has_dirty_io(), since this would cause the
2306 * kernel to be constantly waking up once there are any dirtytime
2307 * inodes on the system.  So instead we define a separate delayed work
2308 * function which gets called much more rarely.  (By default, only
2309 * once every 12 hours.)
2310 *
2311 * If there is any other write activity going on in the file system,
2312 * this function won't be necessary.  But if the only thing that has
2313 * happened on the file system is a dirtytime inode caused by an atime
2314 * update, we need this infrastructure below to make sure that inode
2315 * eventually gets pushed out to disk.
2316 */
2317static void wakeup_dirtytime_writeback(struct work_struct *w);
2318static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback);
2319
2320static void wakeup_dirtytime_writeback(struct work_struct *w)
2321{
2322	struct backing_dev_info *bdi;
2323
2324	rcu_read_lock();
2325	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
2326		struct bdi_writeback *wb;
2327
2328		list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
2329			if (!list_empty(&wb->b_dirty_time))
2330				wb_wakeup(wb);
2331	}
2332	rcu_read_unlock();
2333	schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
2334}
2335
2336static int __init start_dirtytime_writeback(void)
2337{
2338	schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
2339	return 0;
2340}
2341__initcall(start_dirtytime_writeback);
2342
2343int dirtytime_interval_handler(struct ctl_table *table, int write,
2344			       void *buffer, size_t *lenp, loff_t *ppos)
2345{
2346	int ret;
2347
2348	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2349	if (ret == 0 && write)
2350		mod_delayed_work(system_wq, &dirtytime_work, 0);
2351	return ret;
2352}
2353
2354/**
2355 * __mark_inode_dirty -	internal function to mark an inode dirty
2356 *
2357 * @inode: inode to mark
2358 * @flags: what kind of dirty, e.g. I_DIRTY_SYNC.  This can be a combination of
2359 *	   multiple I_DIRTY_* flags, except that I_DIRTY_TIME can't be combined
2360 *	   with I_DIRTY_PAGES.
2361 *
2362 * Mark an inode as dirty.  We notify the filesystem, then update the inode's
2363 * dirty flags.  Then, if needed we add the inode to the appropriate dirty list.
2364 *
2365 * Most callers should use mark_inode_dirty() or mark_inode_dirty_sync()
2366 * instead of calling this directly.
2367 *
2368 * CAREFUL!  We only add the inode to the dirty list if it is hashed or if it
2369 * refers to a blockdev.  Unhashed inodes will never be added to the dirty list
2370 * even if they are later hashed, as they will have been marked dirty already.
2371 *
2372 * In short, ensure you hash any inodes _before_ you start marking them dirty.
2373 *
2374 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
2375 * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
2376 * the kernel-internal blockdev inode represents the dirtying time of the
2377 * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
2378 * page->mapping->host, so the page-dirtying time is recorded in the internal
2379 * blockdev inode.
2380 */
2381void __mark_inode_dirty(struct inode *inode, int flags)
2382{
2383	struct super_block *sb = inode->i_sb;
2384	int dirtytime = 0;
 
2385
2386	trace_writeback_mark_inode_dirty(inode, flags);
2387
2388	if (flags & I_DIRTY_INODE) {
2389		/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2390		 * Notify the filesystem about the inode being dirtied, so that
2391		 * (if needed) it can update on-disk fields and journal the
2392		 * inode.  This is only needed when the inode itself is being
2393		 * dirtied now.  I.e. it's only needed for I_DIRTY_INODE, not
2394		 * for just I_DIRTY_PAGES or I_DIRTY_TIME.
2395		 */
2396		trace_writeback_dirty_inode_start(inode, flags);
2397		if (sb->s_op->dirty_inode)
2398			sb->s_op->dirty_inode(inode, flags & I_DIRTY_INODE);
 
2399		trace_writeback_dirty_inode(inode, flags);
2400
2401		/* I_DIRTY_INODE supersedes I_DIRTY_TIME. */
2402		flags &= ~I_DIRTY_TIME;
2403	} else {
2404		/*
2405		 * Else it's either I_DIRTY_PAGES, I_DIRTY_TIME, or nothing.
2406		 * (We don't support setting both I_DIRTY_PAGES and I_DIRTY_TIME
2407		 * in one call to __mark_inode_dirty().)
2408		 */
2409		dirtytime = flags & I_DIRTY_TIME;
2410		WARN_ON_ONCE(dirtytime && flags != I_DIRTY_TIME);
2411	}
2412
2413	/*
2414	 * Paired with smp_mb() in __writeback_single_inode() for the
2415	 * following lockless i_state test.  See there for details.
2416	 */
2417	smp_mb();
2418
2419	if (((inode->i_state & flags) == flags) ||
2420	    (dirtytime && (inode->i_state & I_DIRTY_INODE)))
2421		return;
2422
2423	spin_lock(&inode->i_lock);
2424	if (dirtytime && (inode->i_state & I_DIRTY_INODE))
2425		goto out_unlock_inode;
2426	if ((inode->i_state & flags) != flags) {
2427		const int was_dirty = inode->i_state & I_DIRTY;
2428
2429		inode_attach_wb(inode, NULL);
2430
2431		/* I_DIRTY_INODE supersedes I_DIRTY_TIME. */
2432		if (flags & I_DIRTY_INODE)
2433			inode->i_state &= ~I_DIRTY_TIME;
2434		inode->i_state |= flags;
2435
2436		/*
 
 
 
 
 
 
 
 
 
 
 
2437		 * If the inode is queued for writeback by flush worker, just
2438		 * update its dirty state. Once the flush worker is done with
2439		 * the inode it will place it on the appropriate superblock
2440		 * list, based upon its state.
2441		 */
2442		if (inode->i_state & I_SYNC_QUEUED)
2443			goto out_unlock_inode;
2444
2445		/*
2446		 * Only add valid (hashed) inodes to the superblock's
2447		 * dirty list.  Add blockdev inodes as well.
2448		 */
2449		if (!S_ISBLK(inode->i_mode)) {
2450			if (inode_unhashed(inode))
2451				goto out_unlock_inode;
2452		}
2453		if (inode->i_state & I_FREEING)
2454			goto out_unlock_inode;
2455
2456		/*
2457		 * If the inode was already on b_dirty/b_io/b_more_io, don't
2458		 * reposition it (that would break b_dirty time-ordering).
2459		 */
2460		if (!was_dirty) {
2461			struct bdi_writeback *wb;
2462			struct list_head *dirty_list;
2463			bool wakeup_bdi = false;
2464
2465			wb = locked_inode_to_wb_and_lock_list(inode);
2466
2467			inode->dirtied_when = jiffies;
2468			if (dirtytime)
2469				inode->dirtied_time_when = jiffies;
2470
2471			if (inode->i_state & I_DIRTY)
2472				dirty_list = &wb->b_dirty;
2473			else
2474				dirty_list = &wb->b_dirty_time;
2475
2476			wakeup_bdi = inode_io_list_move_locked(inode, wb,
2477							       dirty_list);
2478
2479			spin_unlock(&wb->list_lock);
 
2480			trace_writeback_dirty_inode_enqueue(inode);
2481
2482			/*
2483			 * If this is the first dirty inode for this bdi,
2484			 * we have to wake-up the corresponding bdi thread
2485			 * to make sure background write-back happens
2486			 * later.
2487			 */
2488			if (wakeup_bdi &&
2489			    (wb->bdi->capabilities & BDI_CAP_WRITEBACK))
2490				wb_wakeup_delayed(wb);
2491			return;
2492		}
2493	}
2494out_unlock_inode:
 
 
2495	spin_unlock(&inode->i_lock);
2496}
2497EXPORT_SYMBOL(__mark_inode_dirty);
2498
2499/*
2500 * The @s_sync_lock is used to serialise concurrent sync operations
2501 * to avoid lock contention problems with concurrent wait_sb_inodes() calls.
2502 * Concurrent callers will block on the s_sync_lock rather than doing contending
2503 * walks. The queueing maintains sync(2) required behaviour as all the IO that
2504 * has been issued up to the time this function is enter is guaranteed to be
2505 * completed by the time we have gained the lock and waited for all IO that is
2506 * in progress regardless of the order callers are granted the lock.
2507 */
2508static void wait_sb_inodes(struct super_block *sb)
2509{
2510	LIST_HEAD(sync_list);
2511
2512	/*
2513	 * We need to be protected against the filesystem going from
2514	 * r/o to r/w or vice versa.
2515	 */
2516	WARN_ON(!rwsem_is_locked(&sb->s_umount));
2517
2518	mutex_lock(&sb->s_sync_lock);
2519
2520	/*
2521	 * Splice the writeback list onto a temporary list to avoid waiting on
2522	 * inodes that have started writeback after this point.
2523	 *
2524	 * Use rcu_read_lock() to keep the inodes around until we have a
2525	 * reference. s_inode_wblist_lock protects sb->s_inodes_wb as well as
2526	 * the local list because inodes can be dropped from either by writeback
2527	 * completion.
2528	 */
2529	rcu_read_lock();
2530	spin_lock_irq(&sb->s_inode_wblist_lock);
2531	list_splice_init(&sb->s_inodes_wb, &sync_list);
2532
2533	/*
2534	 * Data integrity sync. Must wait for all pages under writeback, because
2535	 * there may have been pages dirtied before our sync call, but which had
2536	 * writeout started before we write it out.  In which case, the inode
2537	 * may not be on the dirty list, but we still have to wait for that
2538	 * writeout.
2539	 */
2540	while (!list_empty(&sync_list)) {
2541		struct inode *inode = list_first_entry(&sync_list, struct inode,
2542						       i_wb_list);
2543		struct address_space *mapping = inode->i_mapping;
2544
2545		/*
2546		 * Move each inode back to the wb list before we drop the lock
2547		 * to preserve consistency between i_wb_list and the mapping
2548		 * writeback tag. Writeback completion is responsible to remove
2549		 * the inode from either list once the writeback tag is cleared.
2550		 */
2551		list_move_tail(&inode->i_wb_list, &sb->s_inodes_wb);
2552
2553		/*
2554		 * The mapping can appear untagged while still on-list since we
2555		 * do not have the mapping lock. Skip it here, wb completion
2556		 * will remove it.
2557		 */
2558		if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
2559			continue;
2560
2561		spin_unlock_irq(&sb->s_inode_wblist_lock);
2562
2563		spin_lock(&inode->i_lock);
2564		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
2565			spin_unlock(&inode->i_lock);
2566
2567			spin_lock_irq(&sb->s_inode_wblist_lock);
2568			continue;
2569		}
2570		__iget(inode);
2571		spin_unlock(&inode->i_lock);
2572		rcu_read_unlock();
2573
2574		/*
2575		 * We keep the error status of individual mapping so that
2576		 * applications can catch the writeback error using fsync(2).
2577		 * See filemap_fdatawait_keep_errors() for details.
2578		 */
2579		filemap_fdatawait_keep_errors(mapping);
2580
2581		cond_resched();
2582
2583		iput(inode);
2584
2585		rcu_read_lock();
2586		spin_lock_irq(&sb->s_inode_wblist_lock);
2587	}
2588	spin_unlock_irq(&sb->s_inode_wblist_lock);
2589	rcu_read_unlock();
2590	mutex_unlock(&sb->s_sync_lock);
2591}
2592
2593static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
2594				     enum wb_reason reason, bool skip_if_busy)
2595{
2596	struct backing_dev_info *bdi = sb->s_bdi;
2597	DEFINE_WB_COMPLETION(done, bdi);
2598	struct wb_writeback_work work = {
2599		.sb			= sb,
2600		.sync_mode		= WB_SYNC_NONE,
2601		.tagged_writepages	= 1,
2602		.done			= &done,
2603		.nr_pages		= nr,
2604		.reason			= reason,
2605	};
2606
2607	if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
2608		return;
2609	WARN_ON(!rwsem_is_locked(&sb->s_umount));
2610
2611	bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy);
2612	wb_wait_for_completion(&done);
2613}
2614
2615/**
2616 * writeback_inodes_sb_nr -	writeback dirty inodes from given super_block
2617 * @sb: the superblock
2618 * @nr: the number of pages to write
2619 * @reason: reason why some writeback work initiated
2620 *
2621 * Start writeback on some inodes on this super_block. No guarantees are made
2622 * on how many (if any) will be written, and this function does not wait
2623 * for IO completion of submitted IO.
2624 */
2625void writeback_inodes_sb_nr(struct super_block *sb,
2626			    unsigned long nr,
2627			    enum wb_reason reason)
2628{
2629	__writeback_inodes_sb_nr(sb, nr, reason, false);
2630}
2631EXPORT_SYMBOL(writeback_inodes_sb_nr);
2632
2633/**
2634 * writeback_inodes_sb	-	writeback dirty inodes from given super_block
2635 * @sb: the superblock
2636 * @reason: reason why some writeback work was initiated
2637 *
2638 * Start writeback on some inodes on this super_block. No guarantees are made
2639 * on how many (if any) will be written, and this function does not wait
2640 * for IO completion of submitted IO.
2641 */
2642void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
2643{
2644	return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
2645}
2646EXPORT_SYMBOL(writeback_inodes_sb);
2647
2648/**
2649 * try_to_writeback_inodes_sb - try to start writeback if none underway
2650 * @sb: the superblock
2651 * @reason: reason why some writeback work was initiated
2652 *
2653 * Invoke __writeback_inodes_sb_nr if no writeback is currently underway.
2654 */
2655void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
2656{
2657	if (!down_read_trylock(&sb->s_umount))
2658		return;
2659
2660	__writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason, true);
2661	up_read(&sb->s_umount);
2662}
2663EXPORT_SYMBOL(try_to_writeback_inodes_sb);
2664
2665/**
2666 * sync_inodes_sb	-	sync sb inode pages
2667 * @sb: the superblock
2668 *
2669 * This function writes and waits on any dirty inode belonging to this
2670 * super_block.
2671 */
2672void sync_inodes_sb(struct super_block *sb)
2673{
2674	struct backing_dev_info *bdi = sb->s_bdi;
2675	DEFINE_WB_COMPLETION(done, bdi);
2676	struct wb_writeback_work work = {
2677		.sb		= sb,
2678		.sync_mode	= WB_SYNC_ALL,
2679		.nr_pages	= LONG_MAX,
2680		.range_cyclic	= 0,
2681		.done		= &done,
2682		.reason		= WB_REASON_SYNC,
2683		.for_sync	= 1,
2684	};
2685
2686	/*
2687	 * Can't skip on !bdi_has_dirty() because we should wait for !dirty
2688	 * inodes under writeback and I_DIRTY_TIME inodes ignored by
2689	 * bdi_has_dirty() need to be written out too.
2690	 */
2691	if (bdi == &noop_backing_dev_info)
2692		return;
2693	WARN_ON(!rwsem_is_locked(&sb->s_umount));
2694
2695	/* protect against inode wb switch, see inode_switch_wbs_work_fn() */
2696	bdi_down_write_wb_switch_rwsem(bdi);
2697	bdi_split_work_to_wbs(bdi, &work, false);
2698	wb_wait_for_completion(&done);
2699	bdi_up_write_wb_switch_rwsem(bdi);
2700
2701	wait_sb_inodes(sb);
2702}
2703EXPORT_SYMBOL(sync_inodes_sb);
2704
2705/**
2706 * write_inode_now	-	write an inode to disk
2707 * @inode: inode to write to disk
2708 * @sync: whether the write should be synchronous or not
2709 *
2710 * This function commits an inode to disk immediately if it is dirty. This is
2711 * primarily needed by knfsd.
2712 *
2713 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
2714 */
2715int write_inode_now(struct inode *inode, int sync)
2716{
2717	struct writeback_control wbc = {
2718		.nr_to_write = LONG_MAX,
2719		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
2720		.range_start = 0,
2721		.range_end = LLONG_MAX,
2722	};
2723
2724	if (!mapping_can_writeback(inode->i_mapping))
2725		wbc.nr_to_write = 0;
2726
2727	might_sleep();
2728	return writeback_single_inode(inode, &wbc);
2729}
2730EXPORT_SYMBOL(write_inode_now);
2731
2732/**
2733 * sync_inode - write an inode and its pages to disk.
2734 * @inode: the inode to sync
2735 * @wbc: controls the writeback mode
2736 *
2737 * sync_inode() will write an inode and its pages to disk.  It will also
2738 * correctly update the inode on its superblock's dirty inode lists and will
2739 * update inode->i_state.
2740 *
2741 * The caller must have a ref on the inode.
2742 */
2743int sync_inode(struct inode *inode, struct writeback_control *wbc)
2744{
2745	return writeback_single_inode(inode, wbc);
2746}
2747EXPORT_SYMBOL(sync_inode);
2748
2749/**
2750 * sync_inode_metadata - write an inode to disk
2751 * @inode: the inode to sync
2752 * @wait: wait for I/O to complete.
2753 *
2754 * Write an inode to disk and adjust its dirty state after completion.
2755 *
2756 * Note: only writes the actual inode, no associated data or other metadata.
2757 */
2758int sync_inode_metadata(struct inode *inode, int wait)
2759{
2760	struct writeback_control wbc = {
2761		.sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
2762		.nr_to_write = 0, /* metadata-only */
2763	};
2764
2765	return sync_inode(inode, &wbc);
2766}
2767EXPORT_SYMBOL(sync_inode_metadata);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * fs/fs-writeback.c
   4 *
   5 * Copyright (C) 2002, Linus Torvalds.
   6 *
   7 * Contains all the functions related to writing back and waiting
   8 * upon dirty inodes against superblocks, and writing back dirty
   9 * pages against inodes.  ie: data writeback.  Writeout of the
  10 * inode itself is not handled here.
  11 *
  12 * 10Apr2002	Andrew Morton
  13 *		Split out of fs/inode.c
  14 *		Additions for address_space-based writeback
  15 */
  16
  17#include <linux/kernel.h>
  18#include <linux/export.h>
  19#include <linux/spinlock.h>
  20#include <linux/slab.h>
  21#include <linux/sched.h>
  22#include <linux/fs.h>
  23#include <linux/mm.h>
  24#include <linux/pagemap.h>
  25#include <linux/kthread.h>
  26#include <linux/writeback.h>
  27#include <linux/blkdev.h>
  28#include <linux/backing-dev.h>
  29#include <linux/tracepoint.h>
  30#include <linux/device.h>
  31#include <linux/memcontrol.h>
  32#include "internal.h"
  33
  34/*
  35 * 4MB minimal write chunk size
  36 */
  37#define MIN_WRITEBACK_PAGES	(4096UL >> (PAGE_SHIFT - 10))
  38
  39/*
  40 * Passed into wb_writeback(), essentially a subset of writeback_control
  41 */
  42struct wb_writeback_work {
  43	long nr_pages;
  44	struct super_block *sb;
  45	enum writeback_sync_modes sync_mode;
  46	unsigned int tagged_writepages:1;
  47	unsigned int for_kupdate:1;
  48	unsigned int range_cyclic:1;
  49	unsigned int for_background:1;
  50	unsigned int for_sync:1;	/* sync(2) WB_SYNC_ALL writeback */
  51	unsigned int auto_free:1;	/* free on completion */
  52	enum wb_reason reason;		/* why was writeback initiated? */
  53
  54	struct list_head list;		/* pending work list */
  55	struct wb_completion *done;	/* set if the caller waits */
  56};
  57
  58/*
  59 * If an inode is constantly having its pages dirtied, but then the
  60 * updates stop dirtytime_expire_interval seconds in the past, it's
  61 * possible for the worst case time between when an inode has its
  62 * timestamps updated and when they finally get written out to be two
  63 * dirtytime_expire_intervals.  We set the default to 12 hours (in
  64 * seconds), which means most of the time inodes will have their
  65 * timestamps written to disk after 12 hours, but in the worst case a
  66 * few inodes might not their timestamps updated for 24 hours.
  67 */
  68unsigned int dirtytime_expire_interval = 12 * 60 * 60;
  69
  70static inline struct inode *wb_inode(struct list_head *head)
  71{
  72	return list_entry(head, struct inode, i_io_list);
  73}
  74
  75/*
  76 * Include the creation of the trace points after defining the
  77 * wb_writeback_work structure and inline functions so that the definition
  78 * remains local to this file.
  79 */
  80#define CREATE_TRACE_POINTS
  81#include <trace/events/writeback.h>
  82
  83EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage);
  84
  85static bool wb_io_lists_populated(struct bdi_writeback *wb)
  86{
  87	if (wb_has_dirty_io(wb)) {
  88		return false;
  89	} else {
  90		set_bit(WB_has_dirty_io, &wb->state);
  91		WARN_ON_ONCE(!wb->avg_write_bandwidth);
  92		atomic_long_add(wb->avg_write_bandwidth,
  93				&wb->bdi->tot_write_bandwidth);
  94		return true;
  95	}
  96}
  97
  98static void wb_io_lists_depopulated(struct bdi_writeback *wb)
  99{
 100	if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) &&
 101	    list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) {
 102		clear_bit(WB_has_dirty_io, &wb->state);
 103		WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth,
 104					&wb->bdi->tot_write_bandwidth) < 0);
 105	}
 106}
 107
 108/**
 109 * inode_io_list_move_locked - move an inode onto a bdi_writeback IO list
 110 * @inode: inode to be moved
 111 * @wb: target bdi_writeback
 112 * @head: one of @wb->b_{dirty|io|more_io|dirty_time}
 113 *
 114 * Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io.
 115 * Returns %true if @inode is the first occupant of the !dirty_time IO
 116 * lists; otherwise, %false.
 117 */
 118static bool inode_io_list_move_locked(struct inode *inode,
 119				      struct bdi_writeback *wb,
 120				      struct list_head *head)
 121{
 122	assert_spin_locked(&wb->list_lock);
 123	assert_spin_locked(&inode->i_lock);
 124	WARN_ON_ONCE(inode->i_state & I_FREEING);
 125
 126	list_move(&inode->i_io_list, head);
 127
 128	/* dirty_time doesn't count as dirty_io until expiration */
 129	if (head != &wb->b_dirty_time)
 130		return wb_io_lists_populated(wb);
 131
 132	wb_io_lists_depopulated(wb);
 133	return false;
 134}
 135
 136static void wb_wakeup(struct bdi_writeback *wb)
 137{
 138	spin_lock_irq(&wb->work_lock);
 139	if (test_bit(WB_registered, &wb->state))
 140		mod_delayed_work(bdi_wq, &wb->dwork, 0);
 141	spin_unlock_irq(&wb->work_lock);
 142}
 143
 144static void finish_writeback_work(struct bdi_writeback *wb,
 145				  struct wb_writeback_work *work)
 146{
 147	struct wb_completion *done = work->done;
 148
 149	if (work->auto_free)
 150		kfree(work);
 151	if (done) {
 152		wait_queue_head_t *waitq = done->waitq;
 153
 154		/* @done can't be accessed after the following dec */
 155		if (atomic_dec_and_test(&done->cnt))
 156			wake_up_all(waitq);
 157	}
 158}
 159
 160static void wb_queue_work(struct bdi_writeback *wb,
 161			  struct wb_writeback_work *work)
 162{
 163	trace_writeback_queue(wb, work);
 164
 165	if (work->done)
 166		atomic_inc(&work->done->cnt);
 167
 168	spin_lock_irq(&wb->work_lock);
 169
 170	if (test_bit(WB_registered, &wb->state)) {
 171		list_add_tail(&work->list, &wb->work_list);
 172		mod_delayed_work(bdi_wq, &wb->dwork, 0);
 173	} else
 174		finish_writeback_work(wb, work);
 175
 176	spin_unlock_irq(&wb->work_lock);
 177}
 178
 179/**
 180 * wb_wait_for_completion - wait for completion of bdi_writeback_works
 181 * @done: target wb_completion
 182 *
 183 * Wait for one or more work items issued to @bdi with their ->done field
 184 * set to @done, which should have been initialized with
 185 * DEFINE_WB_COMPLETION().  This function returns after all such work items
 186 * are completed.  Work items which are waited upon aren't freed
 187 * automatically on completion.
 188 */
 189void wb_wait_for_completion(struct wb_completion *done)
 190{
 191	atomic_dec(&done->cnt);		/* put down the initial count */
 192	wait_event(*done->waitq, !atomic_read(&done->cnt));
 193}
 194
 195#ifdef CONFIG_CGROUP_WRITEBACK
 196
 197/*
 198 * Parameters for foreign inode detection, see wbc_detach_inode() to see
 199 * how they're used.
 200 *
 201 * These paramters are inherently heuristical as the detection target
 202 * itself is fuzzy.  All we want to do is detaching an inode from the
 203 * current owner if it's being written to by some other cgroups too much.
 204 *
 205 * The current cgroup writeback is built on the assumption that multiple
 206 * cgroups writing to the same inode concurrently is very rare and a mode
 207 * of operation which isn't well supported.  As such, the goal is not
 208 * taking too long when a different cgroup takes over an inode while
 209 * avoiding too aggressive flip-flops from occasional foreign writes.
 210 *
 211 * We record, very roughly, 2s worth of IO time history and if more than
 212 * half of that is foreign, trigger the switch.  The recording is quantized
 213 * to 16 slots.  To avoid tiny writes from swinging the decision too much,
 214 * writes smaller than 1/8 of avg size are ignored.
 215 */
 216#define WB_FRN_TIME_SHIFT	13	/* 1s = 2^13, upto 8 secs w/ 16bit */
 217#define WB_FRN_TIME_AVG_SHIFT	3	/* avg = avg * 7/8 + new * 1/8 */
 218#define WB_FRN_TIME_CUT_DIV	8	/* ignore rounds < avg / 8 */
 219#define WB_FRN_TIME_PERIOD	(2 * (1 << WB_FRN_TIME_SHIFT))	/* 2s */
 220
 221#define WB_FRN_HIST_SLOTS	16	/* inode->i_wb_frn_history is 16bit */
 222#define WB_FRN_HIST_UNIT	(WB_FRN_TIME_PERIOD / WB_FRN_HIST_SLOTS)
 223					/* each slot's duration is 2s / 16 */
 224#define WB_FRN_HIST_THR_SLOTS	(WB_FRN_HIST_SLOTS / 2)
 225					/* if foreign slots >= 8, switch */
 226#define WB_FRN_HIST_MAX_SLOTS	(WB_FRN_HIST_THR_SLOTS / 2 + 1)
 227					/* one round can affect upto 5 slots */
 228#define WB_FRN_MAX_IN_FLIGHT	1024	/* don't queue too many concurrently */
 229
 230/*
 231 * Maximum inodes per isw.  A specific value has been chosen to make
 232 * struct inode_switch_wbs_context fit into 1024 bytes kmalloc.
 233 */
 234#define WB_MAX_INODES_PER_ISW  ((1024UL - sizeof(struct inode_switch_wbs_context)) \
 235                                / sizeof(struct inode *))
 236
 237static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
 238static struct workqueue_struct *isw_wq;
 239
 240void __inode_attach_wb(struct inode *inode, struct page *page)
 241{
 242	struct backing_dev_info *bdi = inode_to_bdi(inode);
 243	struct bdi_writeback *wb = NULL;
 244
 245	if (inode_cgwb_enabled(inode)) {
 246		struct cgroup_subsys_state *memcg_css;
 247
 248		if (page) {
 249			memcg_css = mem_cgroup_css_from_page(page);
 250			wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
 251		} else {
 252			/* must pin memcg_css, see wb_get_create() */
 253			memcg_css = task_get_css(current, memory_cgrp_id);
 254			wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
 255			css_put(memcg_css);
 256		}
 257	}
 258
 259	if (!wb)
 260		wb = &bdi->wb;
 261
 262	/*
 263	 * There may be multiple instances of this function racing to
 264	 * update the same inode.  Use cmpxchg() to tell the winner.
 265	 */
 266	if (unlikely(cmpxchg(&inode->i_wb, NULL, wb)))
 267		wb_put(wb);
 268}
 269EXPORT_SYMBOL_GPL(__inode_attach_wb);
 270
 271/**
 272 * inode_cgwb_move_to_attached - put the inode onto wb->b_attached list
 273 * @inode: inode of interest with i_lock held
 274 * @wb: target bdi_writeback
 275 *
 276 * Remove the inode from wb's io lists and if necessarily put onto b_attached
 277 * list.  Only inodes attached to cgwb's are kept on this list.
 278 */
 279static void inode_cgwb_move_to_attached(struct inode *inode,
 280					struct bdi_writeback *wb)
 281{
 282	assert_spin_locked(&wb->list_lock);
 283	assert_spin_locked(&inode->i_lock);
 284	WARN_ON_ONCE(inode->i_state & I_FREEING);
 285
 286	inode->i_state &= ~I_SYNC_QUEUED;
 287	if (wb != &wb->bdi->wb)
 288		list_move(&inode->i_io_list, &wb->b_attached);
 289	else
 290		list_del_init(&inode->i_io_list);
 291	wb_io_lists_depopulated(wb);
 292}
 293
 294/**
 295 * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it
 296 * @inode: inode of interest with i_lock held
 297 *
 298 * Returns @inode's wb with its list_lock held.  @inode->i_lock must be
 299 * held on entry and is released on return.  The returned wb is guaranteed
 300 * to stay @inode's associated wb until its list_lock is released.
 301 */
 302static struct bdi_writeback *
 303locked_inode_to_wb_and_lock_list(struct inode *inode)
 304	__releases(&inode->i_lock)
 305	__acquires(&wb->list_lock)
 306{
 307	while (true) {
 308		struct bdi_writeback *wb = inode_to_wb(inode);
 309
 310		/*
 311		 * inode_to_wb() association is protected by both
 312		 * @inode->i_lock and @wb->list_lock but list_lock nests
 313		 * outside i_lock.  Drop i_lock and verify that the
 314		 * association hasn't changed after acquiring list_lock.
 315		 */
 316		wb_get(wb);
 317		spin_unlock(&inode->i_lock);
 318		spin_lock(&wb->list_lock);
 319
 320		/* i_wb may have changed inbetween, can't use inode_to_wb() */
 321		if (likely(wb == inode->i_wb)) {
 322			wb_put(wb);	/* @inode already has ref */
 323			return wb;
 324		}
 325
 326		spin_unlock(&wb->list_lock);
 327		wb_put(wb);
 328		cpu_relax();
 329		spin_lock(&inode->i_lock);
 330	}
 331}
 332
 333/**
 334 * inode_to_wb_and_lock_list - determine an inode's wb and lock it
 335 * @inode: inode of interest
 336 *
 337 * Same as locked_inode_to_wb_and_lock_list() but @inode->i_lock isn't held
 338 * on entry.
 339 */
 340static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
 341	__acquires(&wb->list_lock)
 342{
 343	spin_lock(&inode->i_lock);
 344	return locked_inode_to_wb_and_lock_list(inode);
 345}
 346
 347struct inode_switch_wbs_context {
 348	struct rcu_work		work;
 349
 350	/*
 351	 * Multiple inodes can be switched at once.  The switching procedure
 352	 * consists of two parts, separated by a RCU grace period.  To make
 353	 * sure that the second part is executed for each inode gone through
 354	 * the first part, all inode pointers are placed into a NULL-terminated
 355	 * array embedded into struct inode_switch_wbs_context.  Otherwise
 356	 * an inode could be left in a non-consistent state.
 357	 */
 358	struct bdi_writeback	*new_wb;
 359	struct inode		*inodes[];
 360};
 361
 362static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
 363{
 364	down_write(&bdi->wb_switch_rwsem);
 365}
 366
 367static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
 368{
 369	up_write(&bdi->wb_switch_rwsem);
 370}
 371
 372static bool inode_do_switch_wbs(struct inode *inode,
 373				struct bdi_writeback *old_wb,
 374				struct bdi_writeback *new_wb)
 375{
 376	struct address_space *mapping = inode->i_mapping;
 377	XA_STATE(xas, &mapping->i_pages, 0);
 378	struct folio *folio;
 379	bool switched = false;
 380
 381	spin_lock(&inode->i_lock);
 382	xa_lock_irq(&mapping->i_pages);
 383
 384	/*
 385	 * Once I_FREEING or I_WILL_FREE are visible under i_lock, the eviction
 386	 * path owns the inode and we shouldn't modify ->i_io_list.
 387	 */
 388	if (unlikely(inode->i_state & (I_FREEING | I_WILL_FREE)))
 389		goto skip_switch;
 390
 391	trace_inode_switch_wbs(inode, old_wb, new_wb);
 392
 393	/*
 394	 * Count and transfer stats.  Note that PAGECACHE_TAG_DIRTY points
 395	 * to possibly dirty folios while PAGECACHE_TAG_WRITEBACK points to
 396	 * folios actually under writeback.
 397	 */
 398	xas_for_each_marked(&xas, folio, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
 399		if (folio_test_dirty(folio)) {
 400			long nr = folio_nr_pages(folio);
 401			wb_stat_mod(old_wb, WB_RECLAIMABLE, -nr);
 402			wb_stat_mod(new_wb, WB_RECLAIMABLE, nr);
 403		}
 404	}
 405
 406	xas_set(&xas, 0);
 407	xas_for_each_marked(&xas, folio, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
 408		long nr = folio_nr_pages(folio);
 409		WARN_ON_ONCE(!folio_test_writeback(folio));
 410		wb_stat_mod(old_wb, WB_WRITEBACK, -nr);
 411		wb_stat_mod(new_wb, WB_WRITEBACK, nr);
 412	}
 413
 414	if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) {
 415		atomic_dec(&old_wb->writeback_inodes);
 416		atomic_inc(&new_wb->writeback_inodes);
 417	}
 418
 419	wb_get(new_wb);
 420
 421	/*
 422	 * Transfer to @new_wb's IO list if necessary.  If the @inode is dirty,
 423	 * the specific list @inode was on is ignored and the @inode is put on
 424	 * ->b_dirty which is always correct including from ->b_dirty_time.
 425	 * The transfer preserves @inode->dirtied_when ordering.  If the @inode
 426	 * was clean, it means it was on the b_attached list, so move it onto
 427	 * the b_attached list of @new_wb.
 428	 */
 429	if (!list_empty(&inode->i_io_list)) {
 430		inode->i_wb = new_wb;
 431
 432		if (inode->i_state & I_DIRTY_ALL) {
 433			struct inode *pos;
 434
 435			list_for_each_entry(pos, &new_wb->b_dirty, i_io_list)
 436				if (time_after_eq(inode->dirtied_when,
 437						  pos->dirtied_when))
 438					break;
 439			inode_io_list_move_locked(inode, new_wb,
 440						  pos->i_io_list.prev);
 441		} else {
 442			inode_cgwb_move_to_attached(inode, new_wb);
 443		}
 444	} else {
 445		inode->i_wb = new_wb;
 446	}
 447
 448	/* ->i_wb_frn updates may race wbc_detach_inode() but doesn't matter */
 449	inode->i_wb_frn_winner = 0;
 450	inode->i_wb_frn_avg_time = 0;
 451	inode->i_wb_frn_history = 0;
 452	switched = true;
 453skip_switch:
 454	/*
 455	 * Paired with load_acquire in unlocked_inode_to_wb_begin() and
 456	 * ensures that the new wb is visible if they see !I_WB_SWITCH.
 457	 */
 458	smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH);
 459
 460	xa_unlock_irq(&mapping->i_pages);
 461	spin_unlock(&inode->i_lock);
 462
 463	return switched;
 464}
 465
 466static void inode_switch_wbs_work_fn(struct work_struct *work)
 467{
 468	struct inode_switch_wbs_context *isw =
 469		container_of(to_rcu_work(work), struct inode_switch_wbs_context, work);
 470	struct backing_dev_info *bdi = inode_to_bdi(isw->inodes[0]);
 471	struct bdi_writeback *old_wb = isw->inodes[0]->i_wb;
 472	struct bdi_writeback *new_wb = isw->new_wb;
 473	unsigned long nr_switched = 0;
 474	struct inode **inodep;
 475
 476	/*
 477	 * If @inode switches cgwb membership while sync_inodes_sb() is
 478	 * being issued, sync_inodes_sb() might miss it.  Synchronize.
 479	 */
 480	down_read(&bdi->wb_switch_rwsem);
 481
 482	/*
 483	 * By the time control reaches here, RCU grace period has passed
 484	 * since I_WB_SWITCH assertion and all wb stat update transactions
 485	 * between unlocked_inode_to_wb_begin/end() are guaranteed to be
 486	 * synchronizing against the i_pages lock.
 487	 *
 488	 * Grabbing old_wb->list_lock, inode->i_lock and the i_pages lock
 489	 * gives us exclusion against all wb related operations on @inode
 490	 * including IO list manipulations and stat updates.
 491	 */
 492	if (old_wb < new_wb) {
 493		spin_lock(&old_wb->list_lock);
 494		spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
 495	} else {
 496		spin_lock(&new_wb->list_lock);
 497		spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
 498	}
 499
 500	for (inodep = isw->inodes; *inodep; inodep++) {
 501		WARN_ON_ONCE((*inodep)->i_wb != old_wb);
 502		if (inode_do_switch_wbs(*inodep, old_wb, new_wb))
 503			nr_switched++;
 504	}
 505
 506	spin_unlock(&new_wb->list_lock);
 507	spin_unlock(&old_wb->list_lock);
 508
 509	up_read(&bdi->wb_switch_rwsem);
 510
 511	if (nr_switched) {
 512		wb_wakeup(new_wb);
 513		wb_put_many(old_wb, nr_switched);
 514	}
 515
 516	for (inodep = isw->inodes; *inodep; inodep++)
 517		iput(*inodep);
 518	wb_put(new_wb);
 519	kfree(isw);
 520	atomic_dec(&isw_nr_in_flight);
 521}
 522
 523static bool inode_prepare_wbs_switch(struct inode *inode,
 524				     struct bdi_writeback *new_wb)
 525{
 526	/*
 527	 * Paired with smp_mb() in cgroup_writeback_umount().
 528	 * isw_nr_in_flight must be increased before checking SB_ACTIVE and
 529	 * grabbing an inode, otherwise isw_nr_in_flight can be observed as 0
 530	 * in cgroup_writeback_umount() and the isw_wq will be not flushed.
 531	 */
 532	smp_mb();
 533
 534	if (IS_DAX(inode))
 535		return false;
 536
 537	/* while holding I_WB_SWITCH, no one else can update the association */
 538	spin_lock(&inode->i_lock);
 539	if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
 540	    inode->i_state & (I_WB_SWITCH | I_FREEING | I_WILL_FREE) ||
 541	    inode_to_wb(inode) == new_wb) {
 542		spin_unlock(&inode->i_lock);
 543		return false;
 544	}
 545	inode->i_state |= I_WB_SWITCH;
 546	__iget(inode);
 547	spin_unlock(&inode->i_lock);
 548
 549	return true;
 550}
 551
 552/**
 553 * inode_switch_wbs - change the wb association of an inode
 554 * @inode: target inode
 555 * @new_wb_id: ID of the new wb
 556 *
 557 * Switch @inode's wb association to the wb identified by @new_wb_id.  The
 558 * switching is performed asynchronously and may fail silently.
 559 */
 560static void inode_switch_wbs(struct inode *inode, int new_wb_id)
 561{
 562	struct backing_dev_info *bdi = inode_to_bdi(inode);
 563	struct cgroup_subsys_state *memcg_css;
 564	struct inode_switch_wbs_context *isw;
 565
 566	/* noop if seems to be already in progress */
 567	if (inode->i_state & I_WB_SWITCH)
 568		return;
 569
 570	/* avoid queueing a new switch if too many are already in flight */
 571	if (atomic_read(&isw_nr_in_flight) > WB_FRN_MAX_IN_FLIGHT)
 572		return;
 573
 574	isw = kzalloc(struct_size(isw, inodes, 2), GFP_ATOMIC);
 575	if (!isw)
 576		return;
 577
 578	atomic_inc(&isw_nr_in_flight);
 579
 580	/* find and pin the new wb */
 581	rcu_read_lock();
 582	memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys);
 583	if (memcg_css && !css_tryget(memcg_css))
 584		memcg_css = NULL;
 585	rcu_read_unlock();
 586	if (!memcg_css)
 587		goto out_free;
 588
 589	isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
 590	css_put(memcg_css);
 591	if (!isw->new_wb)
 592		goto out_free;
 593
 594	if (!inode_prepare_wbs_switch(inode, isw->new_wb))
 595		goto out_free;
 596
 597	isw->inodes[0] = inode;
 598
 599	/*
 600	 * In addition to synchronizing among switchers, I_WB_SWITCH tells
 601	 * the RCU protected stat update paths to grab the i_page
 602	 * lock so that stat transfer can synchronize against them.
 603	 * Let's continue after I_WB_SWITCH is guaranteed to be visible.
 604	 */
 605	INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn);
 606	queue_rcu_work(isw_wq, &isw->work);
 607	return;
 608
 609out_free:
 610	atomic_dec(&isw_nr_in_flight);
 611	if (isw->new_wb)
 612		wb_put(isw->new_wb);
 613	kfree(isw);
 614}
 615
 616/**
 617 * cleanup_offline_cgwb - detach associated inodes
 618 * @wb: target wb
 619 *
 620 * Switch all inodes attached to @wb to a nearest living ancestor's wb in order
 621 * to eventually release the dying @wb.  Returns %true if not all inodes were
 622 * switched and the function has to be restarted.
 623 */
 624bool cleanup_offline_cgwb(struct bdi_writeback *wb)
 625{
 626	struct cgroup_subsys_state *memcg_css;
 627	struct inode_switch_wbs_context *isw;
 628	struct inode *inode;
 629	int nr;
 630	bool restart = false;
 631
 632	isw = kzalloc(struct_size(isw, inodes, WB_MAX_INODES_PER_ISW),
 633		      GFP_KERNEL);
 634	if (!isw)
 635		return restart;
 636
 637	atomic_inc(&isw_nr_in_flight);
 638
 639	for (memcg_css = wb->memcg_css->parent; memcg_css;
 640	     memcg_css = memcg_css->parent) {
 641		isw->new_wb = wb_get_create(wb->bdi, memcg_css, GFP_KERNEL);
 642		if (isw->new_wb)
 643			break;
 644	}
 645	if (unlikely(!isw->new_wb))
 646		isw->new_wb = &wb->bdi->wb; /* wb_get() is noop for bdi's wb */
 647
 648	nr = 0;
 649	spin_lock(&wb->list_lock);
 650	list_for_each_entry(inode, &wb->b_attached, i_io_list) {
 651		if (!inode_prepare_wbs_switch(inode, isw->new_wb))
 652			continue;
 653
 654		isw->inodes[nr++] = inode;
 655
 656		if (nr >= WB_MAX_INODES_PER_ISW - 1) {
 657			restart = true;
 658			break;
 659		}
 660	}
 661	spin_unlock(&wb->list_lock);
 662
 663	/* no attached inodes? bail out */
 664	if (nr == 0) {
 665		atomic_dec(&isw_nr_in_flight);
 666		wb_put(isw->new_wb);
 667		kfree(isw);
 668		return restart;
 669	}
 670
 671	/*
 672	 * In addition to synchronizing among switchers, I_WB_SWITCH tells
 673	 * the RCU protected stat update paths to grab the i_page
 674	 * lock so that stat transfer can synchronize against them.
 675	 * Let's continue after I_WB_SWITCH is guaranteed to be visible.
 676	 */
 677	INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn);
 678	queue_rcu_work(isw_wq, &isw->work);
 679
 680	return restart;
 681}
 682
 683/**
 684 * wbc_attach_and_unlock_inode - associate wbc with target inode and unlock it
 685 * @wbc: writeback_control of interest
 686 * @inode: target inode
 687 *
 688 * @inode is locked and about to be written back under the control of @wbc.
 689 * Record @inode's writeback context into @wbc and unlock the i_lock.  On
 690 * writeback completion, wbc_detach_inode() should be called.  This is used
 691 * to track the cgroup writeback context.
 692 */
 693void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
 694				 struct inode *inode)
 695{
 696	if (!inode_cgwb_enabled(inode)) {
 697		spin_unlock(&inode->i_lock);
 698		return;
 699	}
 700
 701	wbc->wb = inode_to_wb(inode);
 702	wbc->inode = inode;
 703
 704	wbc->wb_id = wbc->wb->memcg_css->id;
 705	wbc->wb_lcand_id = inode->i_wb_frn_winner;
 706	wbc->wb_tcand_id = 0;
 707	wbc->wb_bytes = 0;
 708	wbc->wb_lcand_bytes = 0;
 709	wbc->wb_tcand_bytes = 0;
 710
 711	wb_get(wbc->wb);
 712	spin_unlock(&inode->i_lock);
 713
 714	/*
 715	 * A dying wb indicates that either the blkcg associated with the
 716	 * memcg changed or the associated memcg is dying.  In the first
 717	 * case, a replacement wb should already be available and we should
 718	 * refresh the wb immediately.  In the second case, trying to
 719	 * refresh will keep failing.
 720	 */
 721	if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css)))
 722		inode_switch_wbs(inode, wbc->wb_id);
 723}
 724EXPORT_SYMBOL_GPL(wbc_attach_and_unlock_inode);
 725
 726/**
 727 * wbc_detach_inode - disassociate wbc from inode and perform foreign detection
 728 * @wbc: writeback_control of the just finished writeback
 729 *
 730 * To be called after a writeback attempt of an inode finishes and undoes
 731 * wbc_attach_and_unlock_inode().  Can be called under any context.
 732 *
 733 * As concurrent write sharing of an inode is expected to be very rare and
 734 * memcg only tracks page ownership on first-use basis severely confining
 735 * the usefulness of such sharing, cgroup writeback tracks ownership
 736 * per-inode.  While the support for concurrent write sharing of an inode
 737 * is deemed unnecessary, an inode being written to by different cgroups at
 738 * different points in time is a lot more common, and, more importantly,
 739 * charging only by first-use can too readily lead to grossly incorrect
 740 * behaviors (single foreign page can lead to gigabytes of writeback to be
 741 * incorrectly attributed).
 742 *
 743 * To resolve this issue, cgroup writeback detects the majority dirtier of
 744 * an inode and transfers the ownership to it.  To avoid unnecessary
 745 * oscillation, the detection mechanism keeps track of history and gives
 746 * out the switch verdict only if the foreign usage pattern is stable over
 747 * a certain amount of time and/or writeback attempts.
 748 *
 749 * On each writeback attempt, @wbc tries to detect the majority writer
 750 * using Boyer-Moore majority vote algorithm.  In addition to the byte
 751 * count from the majority voting, it also counts the bytes written for the
 752 * current wb and the last round's winner wb (max of last round's current
 753 * wb, the winner from two rounds ago, and the last round's majority
 754 * candidate).  Keeping track of the historical winner helps the algorithm
 755 * to semi-reliably detect the most active writer even when it's not the
 756 * absolute majority.
 757 *
 758 * Once the winner of the round is determined, whether the winner is
 759 * foreign or not and how much IO time the round consumed is recorded in
 760 * inode->i_wb_frn_history.  If the amount of recorded foreign IO time is
 761 * over a certain threshold, the switch verdict is given.
 762 */
 763void wbc_detach_inode(struct writeback_control *wbc)
 764{
 765	struct bdi_writeback *wb = wbc->wb;
 766	struct inode *inode = wbc->inode;
 767	unsigned long avg_time, max_bytes, max_time;
 768	u16 history;
 769	int max_id;
 770
 771	if (!wb)
 772		return;
 773
 774	history = inode->i_wb_frn_history;
 775	avg_time = inode->i_wb_frn_avg_time;
 776
 777	/* pick the winner of this round */
 778	if (wbc->wb_bytes >= wbc->wb_lcand_bytes &&
 779	    wbc->wb_bytes >= wbc->wb_tcand_bytes) {
 780		max_id = wbc->wb_id;
 781		max_bytes = wbc->wb_bytes;
 782	} else if (wbc->wb_lcand_bytes >= wbc->wb_tcand_bytes) {
 783		max_id = wbc->wb_lcand_id;
 784		max_bytes = wbc->wb_lcand_bytes;
 785	} else {
 786		max_id = wbc->wb_tcand_id;
 787		max_bytes = wbc->wb_tcand_bytes;
 788	}
 789
 790	/*
 791	 * Calculate the amount of IO time the winner consumed and fold it
 792	 * into the running average kept per inode.  If the consumed IO
 793	 * time is lower than avag / WB_FRN_TIME_CUT_DIV, ignore it for
 794	 * deciding whether to switch or not.  This is to prevent one-off
 795	 * small dirtiers from skewing the verdict.
 796	 */
 797	max_time = DIV_ROUND_UP((max_bytes >> PAGE_SHIFT) << WB_FRN_TIME_SHIFT,
 798				wb->avg_write_bandwidth);
 799	if (avg_time)
 800		avg_time += (max_time >> WB_FRN_TIME_AVG_SHIFT) -
 801			    (avg_time >> WB_FRN_TIME_AVG_SHIFT);
 802	else
 803		avg_time = max_time;	/* immediate catch up on first run */
 804
 805	if (max_time >= avg_time / WB_FRN_TIME_CUT_DIV) {
 806		int slots;
 807
 808		/*
 809		 * The switch verdict is reached if foreign wb's consume
 810		 * more than a certain proportion of IO time in a
 811		 * WB_FRN_TIME_PERIOD.  This is loosely tracked by 16 slot
 812		 * history mask where each bit represents one sixteenth of
 813		 * the period.  Determine the number of slots to shift into
 814		 * history from @max_time.
 815		 */
 816		slots = min(DIV_ROUND_UP(max_time, WB_FRN_HIST_UNIT),
 817			    (unsigned long)WB_FRN_HIST_MAX_SLOTS);
 818		history <<= slots;
 819		if (wbc->wb_id != max_id)
 820			history |= (1U << slots) - 1;
 821
 822		if (history)
 823			trace_inode_foreign_history(inode, wbc, history);
 824
 825		/*
 826		 * Switch if the current wb isn't the consistent winner.
 827		 * If there are multiple closely competing dirtiers, the
 828		 * inode may switch across them repeatedly over time, which
 829		 * is okay.  The main goal is avoiding keeping an inode on
 830		 * the wrong wb for an extended period of time.
 831		 */
 832		if (hweight32(history) > WB_FRN_HIST_THR_SLOTS)
 833			inode_switch_wbs(inode, max_id);
 834	}
 835
 836	/*
 837	 * Multiple instances of this function may race to update the
 838	 * following fields but we don't mind occassional inaccuracies.
 839	 */
 840	inode->i_wb_frn_winner = max_id;
 841	inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX);
 842	inode->i_wb_frn_history = history;
 843
 844	wb_put(wbc->wb);
 845	wbc->wb = NULL;
 846}
 847EXPORT_SYMBOL_GPL(wbc_detach_inode);
 848
 849/**
 850 * wbc_account_cgroup_owner - account writeback to update inode cgroup ownership
 851 * @wbc: writeback_control of the writeback in progress
 852 * @page: page being written out
 853 * @bytes: number of bytes being written out
 854 *
 855 * @bytes from @page are about to written out during the writeback
 856 * controlled by @wbc.  Keep the book for foreign inode detection.  See
 857 * wbc_detach_inode().
 858 */
 859void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
 860			      size_t bytes)
 861{
 862	struct cgroup_subsys_state *css;
 863	int id;
 864
 865	/*
 866	 * pageout() path doesn't attach @wbc to the inode being written
 867	 * out.  This is intentional as we don't want the function to block
 868	 * behind a slow cgroup.  Ultimately, we want pageout() to kick off
 869	 * regular writeback instead of writing things out itself.
 870	 */
 871	if (!wbc->wb || wbc->no_cgroup_owner)
 872		return;
 873
 874	css = mem_cgroup_css_from_page(page);
 875	/* dead cgroups shouldn't contribute to inode ownership arbitration */
 876	if (!(css->flags & CSS_ONLINE))
 877		return;
 878
 879	id = css->id;
 880
 881	if (id == wbc->wb_id) {
 882		wbc->wb_bytes += bytes;
 883		return;
 884	}
 885
 886	if (id == wbc->wb_lcand_id)
 887		wbc->wb_lcand_bytes += bytes;
 888
 889	/* Boyer-Moore majority vote algorithm */
 890	if (!wbc->wb_tcand_bytes)
 891		wbc->wb_tcand_id = id;
 892	if (id == wbc->wb_tcand_id)
 893		wbc->wb_tcand_bytes += bytes;
 894	else
 895		wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
 896}
 897EXPORT_SYMBOL_GPL(wbc_account_cgroup_owner);
 898
 899/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 900 * wb_split_bdi_pages - split nr_pages to write according to bandwidth
 901 * @wb: target bdi_writeback to split @nr_pages to
 902 * @nr_pages: number of pages to write for the whole bdi
 903 *
 904 * Split @wb's portion of @nr_pages according to @wb's write bandwidth in
 905 * relation to the total write bandwidth of all wb's w/ dirty inodes on
 906 * @wb->bdi.
 907 */
 908static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
 909{
 910	unsigned long this_bw = wb->avg_write_bandwidth;
 911	unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
 912
 913	if (nr_pages == LONG_MAX)
 914		return LONG_MAX;
 915
 916	/*
 917	 * This may be called on clean wb's and proportional distribution
 918	 * may not make sense, just use the original @nr_pages in those
 919	 * cases.  In general, we wanna err on the side of writing more.
 920	 */
 921	if (!tot_bw || this_bw >= tot_bw)
 922		return nr_pages;
 923	else
 924		return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw);
 925}
 926
 927/**
 928 * bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi
 929 * @bdi: target backing_dev_info
 930 * @base_work: wb_writeback_work to issue
 931 * @skip_if_busy: skip wb's which already have writeback in progress
 932 *
 933 * Split and issue @base_work to all wb's (bdi_writeback's) of @bdi which
 934 * have dirty inodes.  If @base_work->nr_page isn't %LONG_MAX, it's
 935 * distributed to the busy wbs according to each wb's proportion in the
 936 * total active write bandwidth of @bdi.
 937 */
 938static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
 939				  struct wb_writeback_work *base_work,
 940				  bool skip_if_busy)
 941{
 942	struct bdi_writeback *last_wb = NULL;
 943	struct bdi_writeback *wb = list_entry(&bdi->wb_list,
 944					      struct bdi_writeback, bdi_node);
 945
 946	might_sleep();
 947restart:
 948	rcu_read_lock();
 949	list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
 950		DEFINE_WB_COMPLETION(fallback_work_done, bdi);
 951		struct wb_writeback_work fallback_work;
 952		struct wb_writeback_work *work;
 953		long nr_pages;
 954
 955		if (last_wb) {
 956			wb_put(last_wb);
 957			last_wb = NULL;
 958		}
 959
 960		/* SYNC_ALL writes out I_DIRTY_TIME too */
 961		if (!wb_has_dirty_io(wb) &&
 962		    (base_work->sync_mode == WB_SYNC_NONE ||
 963		     list_empty(&wb->b_dirty_time)))
 964			continue;
 965		if (skip_if_busy && writeback_in_progress(wb))
 966			continue;
 967
 968		nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages);
 969
 970		work = kmalloc(sizeof(*work), GFP_ATOMIC);
 971		if (work) {
 972			*work = *base_work;
 973			work->nr_pages = nr_pages;
 974			work->auto_free = 1;
 975			wb_queue_work(wb, work);
 976			continue;
 977		}
 978
 979		/* alloc failed, execute synchronously using on-stack fallback */
 980		work = &fallback_work;
 981		*work = *base_work;
 982		work->nr_pages = nr_pages;
 983		work->auto_free = 0;
 984		work->done = &fallback_work_done;
 985
 986		wb_queue_work(wb, work);
 987
 988		/*
 989		 * Pin @wb so that it stays on @bdi->wb_list.  This allows
 990		 * continuing iteration from @wb after dropping and
 991		 * regrabbing rcu read lock.
 992		 */
 993		wb_get(wb);
 994		last_wb = wb;
 995
 996		rcu_read_unlock();
 997		wb_wait_for_completion(&fallback_work_done);
 998		goto restart;
 999	}
1000	rcu_read_unlock();
1001
1002	if (last_wb)
1003		wb_put(last_wb);
1004}
1005
1006/**
1007 * cgroup_writeback_by_id - initiate cgroup writeback from bdi and memcg IDs
1008 * @bdi_id: target bdi id
1009 * @memcg_id: target memcg css id
 
1010 * @reason: reason why some writeback work initiated
1011 * @done: target wb_completion
1012 *
1013 * Initiate flush of the bdi_writeback identified by @bdi_id and @memcg_id
1014 * with the specified parameters.
1015 */
1016int cgroup_writeback_by_id(u64 bdi_id, int memcg_id,
1017			   enum wb_reason reason, struct wb_completion *done)
1018{
1019	struct backing_dev_info *bdi;
1020	struct cgroup_subsys_state *memcg_css;
1021	struct bdi_writeback *wb;
1022	struct wb_writeback_work *work;
1023	unsigned long dirty;
1024	int ret;
1025
1026	/* lookup bdi and memcg */
1027	bdi = bdi_get_by_id(bdi_id);
1028	if (!bdi)
1029		return -ENOENT;
1030
1031	rcu_read_lock();
1032	memcg_css = css_from_id(memcg_id, &memory_cgrp_subsys);
1033	if (memcg_css && !css_tryget(memcg_css))
1034		memcg_css = NULL;
1035	rcu_read_unlock();
1036	if (!memcg_css) {
1037		ret = -ENOENT;
1038		goto out_bdi_put;
1039	}
1040
1041	/*
1042	 * And find the associated wb.  If the wb isn't there already
1043	 * there's nothing to flush, don't create one.
1044	 */
1045	wb = wb_get_lookup(bdi, memcg_css);
1046	if (!wb) {
1047		ret = -ENOENT;
1048		goto out_css_put;
1049	}
1050
1051	/*
1052	 * The caller is attempting to write out most of
1053	 * the currently dirty pages.  Let's take the current dirty page
1054	 * count and inflate it by 25% which should be large enough to
1055	 * flush out most dirty pages while avoiding getting livelocked by
1056	 * concurrent dirtiers.
1057	 *
1058	 * BTW the memcg stats are flushed periodically and this is best-effort
1059	 * estimation, so some potential error is ok.
1060	 */
1061	dirty = memcg_page_state(mem_cgroup_from_css(memcg_css), NR_FILE_DIRTY);
1062	dirty = dirty * 10 / 8;
 
 
 
 
 
1063
1064	/* issue the writeback work */
1065	work = kzalloc(sizeof(*work), GFP_NOWAIT | __GFP_NOWARN);
1066	if (work) {
1067		work->nr_pages = dirty;
1068		work->sync_mode = WB_SYNC_NONE;
1069		work->range_cyclic = 1;
1070		work->reason = reason;
1071		work->done = done;
1072		work->auto_free = 1;
1073		wb_queue_work(wb, work);
1074		ret = 0;
1075	} else {
1076		ret = -ENOMEM;
1077	}
1078
1079	wb_put(wb);
1080out_css_put:
1081	css_put(memcg_css);
1082out_bdi_put:
1083	bdi_put(bdi);
1084	return ret;
1085}
1086
1087/**
1088 * cgroup_writeback_umount - flush inode wb switches for umount
1089 *
1090 * This function is called when a super_block is about to be destroyed and
1091 * flushes in-flight inode wb switches.  An inode wb switch goes through
1092 * RCU and then workqueue, so the two need to be flushed in order to ensure
1093 * that all previously scheduled switches are finished.  As wb switches are
1094 * rare occurrences and synchronize_rcu() can take a while, perform
1095 * flushing iff wb switches are in flight.
1096 */
1097void cgroup_writeback_umount(void)
1098{
1099	/*
1100	 * SB_ACTIVE should be reliably cleared before checking
1101	 * isw_nr_in_flight, see generic_shutdown_super().
1102	 */
1103	smp_mb();
1104
1105	if (atomic_read(&isw_nr_in_flight)) {
1106		/*
1107		 * Use rcu_barrier() to wait for all pending callbacks to
1108		 * ensure that all in-flight wb switches are in the workqueue.
1109		 */
1110		rcu_barrier();
1111		flush_workqueue(isw_wq);
1112	}
1113}
1114
1115static int __init cgroup_writeback_init(void)
1116{
1117	isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0);
1118	if (!isw_wq)
1119		return -ENOMEM;
1120	return 0;
1121}
1122fs_initcall(cgroup_writeback_init);
1123
1124#else	/* CONFIG_CGROUP_WRITEBACK */
1125
1126static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
1127static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
1128
1129static void inode_cgwb_move_to_attached(struct inode *inode,
1130					struct bdi_writeback *wb)
1131{
1132	assert_spin_locked(&wb->list_lock);
1133	assert_spin_locked(&inode->i_lock);
1134	WARN_ON_ONCE(inode->i_state & I_FREEING);
1135
1136	inode->i_state &= ~I_SYNC_QUEUED;
1137	list_del_init(&inode->i_io_list);
1138	wb_io_lists_depopulated(wb);
1139}
1140
1141static struct bdi_writeback *
1142locked_inode_to_wb_and_lock_list(struct inode *inode)
1143	__releases(&inode->i_lock)
1144	__acquires(&wb->list_lock)
1145{
1146	struct bdi_writeback *wb = inode_to_wb(inode);
1147
1148	spin_unlock(&inode->i_lock);
1149	spin_lock(&wb->list_lock);
1150	return wb;
1151}
1152
1153static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
1154	__acquires(&wb->list_lock)
1155{
1156	struct bdi_writeback *wb = inode_to_wb(inode);
1157
1158	spin_lock(&wb->list_lock);
1159	return wb;
1160}
1161
1162static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
1163{
1164	return nr_pages;
1165}
1166
1167static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
1168				  struct wb_writeback_work *base_work,
1169				  bool skip_if_busy)
1170{
1171	might_sleep();
1172
1173	if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) {
1174		base_work->auto_free = 0;
1175		wb_queue_work(&bdi->wb, base_work);
1176	}
1177}
1178
1179#endif	/* CONFIG_CGROUP_WRITEBACK */
1180
1181/*
1182 * Add in the number of potentially dirty inodes, because each inode
1183 * write can dirty pagecache in the underlying blockdev.
1184 */
1185static unsigned long get_nr_dirty_pages(void)
1186{
1187	return global_node_page_state(NR_FILE_DIRTY) +
1188		get_nr_dirty_inodes();
1189}
1190
1191static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason)
1192{
1193	if (!wb_has_dirty_io(wb))
1194		return;
1195
1196	/*
1197	 * All callers of this function want to start writeback of all
1198	 * dirty pages. Places like vmscan can call this at a very
1199	 * high frequency, causing pointless allocations of tons of
1200	 * work items and keeping the flusher threads busy retrieving
1201	 * that work. Ensure that we only allow one of them pending and
1202	 * inflight at the time.
1203	 */
1204	if (test_bit(WB_start_all, &wb->state) ||
1205	    test_and_set_bit(WB_start_all, &wb->state))
1206		return;
1207
1208	wb->start_all_reason = reason;
1209	wb_wakeup(wb);
1210}
1211
1212/**
1213 * wb_start_background_writeback - start background writeback
1214 * @wb: bdi_writback to write from
1215 *
1216 * Description:
1217 *   This makes sure WB_SYNC_NONE background writeback happens. When
1218 *   this function returns, it is only guaranteed that for given wb
1219 *   some IO is happening if we are over background dirty threshold.
1220 *   Caller need not hold sb s_umount semaphore.
1221 */
1222void wb_start_background_writeback(struct bdi_writeback *wb)
1223{
1224	/*
1225	 * We just wake up the flusher thread. It will perform background
1226	 * writeback as soon as there is no other work to do.
1227	 */
1228	trace_writeback_wake_background(wb);
1229	wb_wakeup(wb);
1230}
1231
1232/*
1233 * Remove the inode from the writeback list it is on.
1234 */
1235void inode_io_list_del(struct inode *inode)
1236{
1237	struct bdi_writeback *wb;
1238
1239	wb = inode_to_wb_and_lock_list(inode);
1240	spin_lock(&inode->i_lock);
1241
1242	inode->i_state &= ~I_SYNC_QUEUED;
1243	list_del_init(&inode->i_io_list);
1244	wb_io_lists_depopulated(wb);
1245
1246	spin_unlock(&inode->i_lock);
1247	spin_unlock(&wb->list_lock);
1248}
1249EXPORT_SYMBOL(inode_io_list_del);
1250
1251/*
1252 * mark an inode as under writeback on the sb
1253 */
1254void sb_mark_inode_writeback(struct inode *inode)
1255{
1256	struct super_block *sb = inode->i_sb;
1257	unsigned long flags;
1258
1259	if (list_empty(&inode->i_wb_list)) {
1260		spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
1261		if (list_empty(&inode->i_wb_list)) {
1262			list_add_tail(&inode->i_wb_list, &sb->s_inodes_wb);
1263			trace_sb_mark_inode_writeback(inode);
1264		}
1265		spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
1266	}
1267}
1268
1269/*
1270 * clear an inode as under writeback on the sb
1271 */
1272void sb_clear_inode_writeback(struct inode *inode)
1273{
1274	struct super_block *sb = inode->i_sb;
1275	unsigned long flags;
1276
1277	if (!list_empty(&inode->i_wb_list)) {
1278		spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
1279		if (!list_empty(&inode->i_wb_list)) {
1280			list_del_init(&inode->i_wb_list);
1281			trace_sb_clear_inode_writeback(inode);
1282		}
1283		spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
1284	}
1285}
1286
1287/*
1288 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
1289 * furthest end of its superblock's dirty-inode list.
1290 *
1291 * Before stamping the inode's ->dirtied_when, we check to see whether it is
1292 * already the most-recently-dirtied inode on the b_dirty list.  If that is
1293 * the case then the inode must have been redirtied while it was being written
1294 * out and we don't reset its dirtied_when.
1295 */
1296static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
1297{
1298	assert_spin_locked(&inode->i_lock);
1299
1300	inode->i_state &= ~I_SYNC_QUEUED;
1301	/*
1302	 * When the inode is being freed just don't bother with dirty list
1303	 * tracking. Flush worker will ignore this inode anyway and it will
1304	 * trigger assertions in inode_io_list_move_locked().
1305	 */
1306	if (inode->i_state & I_FREEING) {
1307		list_del_init(&inode->i_io_list);
1308		wb_io_lists_depopulated(wb);
1309		return;
1310	}
1311	if (!list_empty(&wb->b_dirty)) {
1312		struct inode *tail;
1313
1314		tail = wb_inode(wb->b_dirty.next);
1315		if (time_before(inode->dirtied_when, tail->dirtied_when))
1316			inode->dirtied_when = jiffies;
1317	}
1318	inode_io_list_move_locked(inode, wb, &wb->b_dirty);
 
1319}
1320
1321static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
1322{
1323	spin_lock(&inode->i_lock);
1324	redirty_tail_locked(inode, wb);
1325	spin_unlock(&inode->i_lock);
1326}
1327
1328/*
1329 * requeue inode for re-scanning after bdi->b_io list is exhausted.
1330 */
1331static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
1332{
1333	inode_io_list_move_locked(inode, wb, &wb->b_more_io);
1334}
1335
1336static void inode_sync_complete(struct inode *inode)
1337{
1338	inode->i_state &= ~I_SYNC;
1339	/* If inode is clean an unused, put it into LRU now... */
1340	inode_add_lru(inode);
1341	/* Waiters must see I_SYNC cleared before being woken up */
1342	smp_mb();
1343	wake_up_bit(&inode->i_state, __I_SYNC);
1344}
1345
1346static bool inode_dirtied_after(struct inode *inode, unsigned long t)
1347{
1348	bool ret = time_after(inode->dirtied_when, t);
1349#ifndef CONFIG_64BIT
1350	/*
1351	 * For inodes being constantly redirtied, dirtied_when can get stuck.
1352	 * It _appears_ to be in the future, but is actually in distant past.
1353	 * This test is necessary to prevent such wrapped-around relative times
1354	 * from permanently stopping the whole bdi writeback.
1355	 */
1356	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
1357#endif
1358	return ret;
1359}
1360
 
 
1361/*
1362 * Move expired (dirtied before dirtied_before) dirty inodes from
1363 * @delaying_queue to @dispatch_queue.
1364 */
1365static int move_expired_inodes(struct list_head *delaying_queue,
1366			       struct list_head *dispatch_queue,
1367			       unsigned long dirtied_before)
1368{
1369	LIST_HEAD(tmp);
1370	struct list_head *pos, *node;
1371	struct super_block *sb = NULL;
1372	struct inode *inode;
1373	int do_sb_sort = 0;
1374	int moved = 0;
1375
1376	while (!list_empty(delaying_queue)) {
1377		inode = wb_inode(delaying_queue->prev);
1378		if (inode_dirtied_after(inode, dirtied_before))
1379			break;
1380		spin_lock(&inode->i_lock);
1381		list_move(&inode->i_io_list, &tmp);
1382		moved++;
 
1383		inode->i_state |= I_SYNC_QUEUED;
1384		spin_unlock(&inode->i_lock);
1385		if (sb_is_blkdev_sb(inode->i_sb))
1386			continue;
1387		if (sb && sb != inode->i_sb)
1388			do_sb_sort = 1;
1389		sb = inode->i_sb;
1390	}
1391
1392	/* just one sb in list, splice to dispatch_queue and we're done */
1393	if (!do_sb_sort) {
1394		list_splice(&tmp, dispatch_queue);
1395		goto out;
1396	}
1397
1398	/*
1399	 * Although inode's i_io_list is moved from 'tmp' to 'dispatch_queue',
1400	 * we don't take inode->i_lock here because it is just a pointless overhead.
1401	 * Inode is already marked as I_SYNC_QUEUED so writeback list handling is
1402	 * fully under our control.
1403	 */
1404	while (!list_empty(&tmp)) {
1405		sb = wb_inode(tmp.prev)->i_sb;
1406		list_for_each_prev_safe(pos, node, &tmp) {
1407			inode = wb_inode(pos);
1408			if (inode->i_sb == sb)
1409				list_move(&inode->i_io_list, dispatch_queue);
1410		}
1411	}
1412out:
1413	return moved;
1414}
1415
1416/*
1417 * Queue all expired dirty inodes for io, eldest first.
1418 * Before
1419 *         newly dirtied     b_dirty    b_io    b_more_io
1420 *         =============>    gf         edc     BA
1421 * After
1422 *         newly dirtied     b_dirty    b_io    b_more_io
1423 *         =============>    g          fBAedc
1424 *                                           |
1425 *                                           +--> dequeue for IO
1426 */
1427static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work,
1428		     unsigned long dirtied_before)
1429{
1430	int moved;
1431	unsigned long time_expire_jif = dirtied_before;
1432
1433	assert_spin_locked(&wb->list_lock);
1434	list_splice_init(&wb->b_more_io, &wb->b_io);
1435	moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before);
1436	if (!work->for_sync)
1437		time_expire_jif = jiffies - dirtytime_expire_interval * HZ;
1438	moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
1439				     time_expire_jif);
1440	if (moved)
1441		wb_io_lists_populated(wb);
1442	trace_writeback_queue_io(wb, work, dirtied_before, moved);
1443}
1444
1445static int write_inode(struct inode *inode, struct writeback_control *wbc)
1446{
1447	int ret;
1448
1449	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
1450		trace_writeback_write_inode_start(inode, wbc);
1451		ret = inode->i_sb->s_op->write_inode(inode, wbc);
1452		trace_writeback_write_inode(inode, wbc);
1453		return ret;
1454	}
1455	return 0;
1456}
1457
1458/*
1459 * Wait for writeback on an inode to complete. Called with i_lock held.
1460 * Caller must make sure inode cannot go away when we drop i_lock.
1461 */
1462static void __inode_wait_for_writeback(struct inode *inode)
1463	__releases(inode->i_lock)
1464	__acquires(inode->i_lock)
1465{
1466	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
1467	wait_queue_head_t *wqh;
1468
1469	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
1470	while (inode->i_state & I_SYNC) {
1471		spin_unlock(&inode->i_lock);
1472		__wait_on_bit(wqh, &wq, bit_wait,
1473			      TASK_UNINTERRUPTIBLE);
1474		spin_lock(&inode->i_lock);
1475	}
1476}
1477
1478/*
1479 * Wait for writeback on an inode to complete. Caller must have inode pinned.
1480 */
1481void inode_wait_for_writeback(struct inode *inode)
1482{
1483	spin_lock(&inode->i_lock);
1484	__inode_wait_for_writeback(inode);
1485	spin_unlock(&inode->i_lock);
1486}
1487
1488/*
1489 * Sleep until I_SYNC is cleared. This function must be called with i_lock
1490 * held and drops it. It is aimed for callers not holding any inode reference
1491 * so once i_lock is dropped, inode can go away.
1492 */
1493static void inode_sleep_on_writeback(struct inode *inode)
1494	__releases(inode->i_lock)
1495{
1496	DEFINE_WAIT(wait);
1497	wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
1498	int sleep;
1499
1500	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1501	sleep = inode->i_state & I_SYNC;
1502	spin_unlock(&inode->i_lock);
1503	if (sleep)
1504		schedule();
1505	finish_wait(wqh, &wait);
1506}
1507
1508/*
1509 * Find proper writeback list for the inode depending on its current state and
1510 * possibly also change of its state while we were doing writeback.  Here we
1511 * handle things such as livelock prevention or fairness of writeback among
1512 * inodes. This function can be called only by flusher thread - noone else
1513 * processes all inodes in writeback lists and requeueing inodes behind flusher
1514 * thread's back can have unexpected consequences.
1515 */
1516static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
1517			  struct writeback_control *wbc)
1518{
1519	if (inode->i_state & I_FREEING)
1520		return;
1521
1522	/*
1523	 * Sync livelock prevention. Each inode is tagged and synced in one
1524	 * shot. If still dirty, it will be redirty_tail()'ed below.  Update
1525	 * the dirty time to prevent enqueue and sync it again.
1526	 */
1527	if ((inode->i_state & I_DIRTY) &&
1528	    (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
1529		inode->dirtied_when = jiffies;
1530
1531	if (wbc->pages_skipped) {
1532		/*
1533		 * writeback is not making progress due to locked
1534		 * buffers. Skip this inode for now.
1535		 */
1536		redirty_tail_locked(inode, wb);
1537		return;
1538	}
1539
1540	if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
1541		/*
1542		 * We didn't write back all the pages.  nfs_writepages()
1543		 * sometimes bales out without doing anything.
1544		 */
1545		if (wbc->nr_to_write <= 0) {
1546			/* Slice used up. Queue for next turn. */
1547			requeue_io(inode, wb);
1548		} else {
1549			/*
1550			 * Writeback blocked by something other than
1551			 * congestion. Delay the inode for some time to
1552			 * avoid spinning on the CPU (100% iowait)
1553			 * retrying writeback of the dirty page/inode
1554			 * that cannot be performed immediately.
1555			 */
1556			redirty_tail_locked(inode, wb);
1557		}
1558	} else if (inode->i_state & I_DIRTY) {
1559		/*
1560		 * Filesystems can dirty the inode during writeback operations,
1561		 * such as delayed allocation during submission or metadata
1562		 * updates after data IO completion.
1563		 */
1564		redirty_tail_locked(inode, wb);
1565	} else if (inode->i_state & I_DIRTY_TIME) {
1566		inode->dirtied_when = jiffies;
1567		inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
1568		inode->i_state &= ~I_SYNC_QUEUED;
1569	} else {
1570		/* The inode is clean. Remove from writeback lists. */
1571		inode_cgwb_move_to_attached(inode, wb);
1572	}
1573}
1574
1575/*
1576 * Write out an inode and its dirty pages (or some of its dirty pages, depending
1577 * on @wbc->nr_to_write), and clear the relevant dirty flags from i_state.
1578 *
1579 * This doesn't remove the inode from the writeback list it is on, except
1580 * potentially to move it from b_dirty_time to b_dirty due to timestamp
1581 * expiration.  The caller is otherwise responsible for writeback list handling.
1582 *
1583 * The caller is also responsible for setting the I_SYNC flag beforehand and
1584 * calling inode_sync_complete() to clear it afterwards.
1585 */
1586static int
1587__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
1588{
1589	struct address_space *mapping = inode->i_mapping;
1590	long nr_to_write = wbc->nr_to_write;
1591	unsigned dirty;
1592	int ret;
1593
1594	WARN_ON(!(inode->i_state & I_SYNC));
1595
1596	trace_writeback_single_inode_start(inode, wbc, nr_to_write);
1597
1598	ret = do_writepages(mapping, wbc);
1599
1600	/*
1601	 * Make sure to wait on the data before writing out the metadata.
1602	 * This is important for filesystems that modify metadata on data
1603	 * I/O completion. We don't do it for sync(2) writeback because it has a
1604	 * separate, external IO completion path and ->sync_fs for guaranteeing
1605	 * inode metadata is written back correctly.
1606	 */
1607	if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
1608		int err = filemap_fdatawait(mapping);
1609		if (ret == 0)
1610			ret = err;
1611	}
1612
1613	/*
1614	 * If the inode has dirty timestamps and we need to write them, call
1615	 * mark_inode_dirty_sync() to notify the filesystem about it and to
1616	 * change I_DIRTY_TIME into I_DIRTY_SYNC.
1617	 */
1618	if ((inode->i_state & I_DIRTY_TIME) &&
1619	    (wbc->sync_mode == WB_SYNC_ALL ||
1620	     time_after(jiffies, inode->dirtied_time_when +
1621			dirtytime_expire_interval * HZ))) {
1622		trace_writeback_lazytime(inode);
1623		mark_inode_dirty_sync(inode);
1624	}
1625
1626	/*
1627	 * Get and clear the dirty flags from i_state.  This needs to be done
1628	 * after calling writepages because some filesystems may redirty the
1629	 * inode during writepages due to delalloc.  It also needs to be done
1630	 * after handling timestamp expiration, as that may dirty the inode too.
1631	 */
1632	spin_lock(&inode->i_lock);
1633	dirty = inode->i_state & I_DIRTY;
1634	inode->i_state &= ~dirty;
1635
1636	/*
1637	 * Paired with smp_mb() in __mark_inode_dirty().  This allows
1638	 * __mark_inode_dirty() to test i_state without grabbing i_lock -
1639	 * either they see the I_DIRTY bits cleared or we see the dirtied
1640	 * inode.
1641	 *
1642	 * I_DIRTY_PAGES is always cleared together above even if @mapping
1643	 * still has dirty pages.  The flag is reinstated after smp_mb() if
1644	 * necessary.  This guarantees that either __mark_inode_dirty()
1645	 * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
1646	 */
1647	smp_mb();
1648
1649	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
1650		inode->i_state |= I_DIRTY_PAGES;
1651	else if (unlikely(inode->i_state & I_PINNING_FSCACHE_WB)) {
1652		if (!(inode->i_state & I_DIRTY_PAGES)) {
1653			inode->i_state &= ~I_PINNING_FSCACHE_WB;
1654			wbc->unpinned_fscache_wb = true;
1655			dirty |= I_PINNING_FSCACHE_WB; /* Cause write_inode */
1656		}
1657	}
1658
1659	spin_unlock(&inode->i_lock);
1660
1661	/* Don't write the inode if only I_DIRTY_PAGES was set */
1662	if (dirty & ~I_DIRTY_PAGES) {
1663		int err = write_inode(inode, wbc);
1664		if (ret == 0)
1665			ret = err;
1666	}
1667	wbc->unpinned_fscache_wb = false;
1668	trace_writeback_single_inode(inode, wbc, nr_to_write);
1669	return ret;
1670}
1671
1672/*
1673 * Write out an inode's dirty data and metadata on-demand, i.e. separately from
1674 * the regular batched writeback done by the flusher threads in
1675 * writeback_sb_inodes().  @wbc controls various aspects of the write, such as
1676 * whether it is a data-integrity sync (%WB_SYNC_ALL) or not (%WB_SYNC_NONE).
1677 *
1678 * To prevent the inode from going away, either the caller must have a reference
1679 * to the inode, or the inode must have I_WILL_FREE or I_FREEING set.
1680 */
1681static int writeback_single_inode(struct inode *inode,
1682				  struct writeback_control *wbc)
1683{
1684	struct bdi_writeback *wb;
1685	int ret = 0;
1686
1687	spin_lock(&inode->i_lock);
1688	if (!atomic_read(&inode->i_count))
1689		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
1690	else
1691		WARN_ON(inode->i_state & I_WILL_FREE);
1692
1693	if (inode->i_state & I_SYNC) {
1694		/*
1695		 * Writeback is already running on the inode.  For WB_SYNC_NONE,
1696		 * that's enough and we can just return.  For WB_SYNC_ALL, we
1697		 * must wait for the existing writeback to complete, then do
1698		 * writeback again if there's anything left.
1699		 */
1700		if (wbc->sync_mode != WB_SYNC_ALL)
1701			goto out;
1702		__inode_wait_for_writeback(inode);
1703	}
1704	WARN_ON(inode->i_state & I_SYNC);
1705	/*
1706	 * If the inode is already fully clean, then there's nothing to do.
1707	 *
1708	 * For data-integrity syncs we also need to check whether any pages are
1709	 * still under writeback, e.g. due to prior WB_SYNC_NONE writeback.  If
1710	 * there are any such pages, we'll need to wait for them.
1711	 */
1712	if (!(inode->i_state & I_DIRTY_ALL) &&
1713	    (wbc->sync_mode != WB_SYNC_ALL ||
1714	     !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
1715		goto out;
1716	inode->i_state |= I_SYNC;
1717	wbc_attach_and_unlock_inode(wbc, inode);
1718
1719	ret = __writeback_single_inode(inode, wbc);
1720
1721	wbc_detach_inode(wbc);
1722
1723	wb = inode_to_wb_and_lock_list(inode);
1724	spin_lock(&inode->i_lock);
1725	/*
1726	 * If the inode is freeing, its i_io_list shoudn't be updated
1727	 * as it can be finally deleted at this moment.
 
1728	 */
1729	if (!(inode->i_state & I_FREEING)) {
1730		/*
1731		 * If the inode is now fully clean, then it can be safely
1732		 * removed from its writeback list (if any). Otherwise the
1733		 * flusher threads are responsible for the writeback lists.
1734		 */
1735		if (!(inode->i_state & I_DIRTY_ALL))
1736			inode_cgwb_move_to_attached(inode, wb);
1737		else if (!(inode->i_state & I_SYNC_QUEUED)) {
1738			if ((inode->i_state & I_DIRTY))
1739				redirty_tail_locked(inode, wb);
1740			else if (inode->i_state & I_DIRTY_TIME) {
1741				inode->dirtied_when = jiffies;
1742				inode_io_list_move_locked(inode,
1743							  wb,
1744							  &wb->b_dirty_time);
1745			}
1746		}
1747	}
1748
1749	spin_unlock(&wb->list_lock);
1750	inode_sync_complete(inode);
1751out:
1752	spin_unlock(&inode->i_lock);
1753	return ret;
1754}
1755
1756static long writeback_chunk_size(struct bdi_writeback *wb,
1757				 struct wb_writeback_work *work)
1758{
1759	long pages;
1760
1761	/*
1762	 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
1763	 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
1764	 * here avoids calling into writeback_inodes_wb() more than once.
1765	 *
1766	 * The intended call sequence for WB_SYNC_ALL writeback is:
1767	 *
1768	 *      wb_writeback()
1769	 *          writeback_sb_inodes()       <== called only once
1770	 *              write_cache_pages()     <== called once for each inode
1771	 *                   (quickly) tag currently dirty pages
1772	 *                   (maybe slowly) sync all tagged pages
1773	 */
1774	if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
1775		pages = LONG_MAX;
1776	else {
1777		pages = min(wb->avg_write_bandwidth / 2,
1778			    global_wb_domain.dirty_limit / DIRTY_SCOPE);
1779		pages = min(pages, work->nr_pages);
1780		pages = round_down(pages + MIN_WRITEBACK_PAGES,
1781				   MIN_WRITEBACK_PAGES);
1782	}
1783
1784	return pages;
1785}
1786
1787/*
1788 * Write a portion of b_io inodes which belong to @sb.
1789 *
1790 * Return the number of pages and/or inodes written.
1791 *
1792 * NOTE! This is called with wb->list_lock held, and will
1793 * unlock and relock that for each inode it ends up doing
1794 * IO for.
1795 */
1796static long writeback_sb_inodes(struct super_block *sb,
1797				struct bdi_writeback *wb,
1798				struct wb_writeback_work *work)
1799{
1800	struct writeback_control wbc = {
1801		.sync_mode		= work->sync_mode,
1802		.tagged_writepages	= work->tagged_writepages,
1803		.for_kupdate		= work->for_kupdate,
1804		.for_background		= work->for_background,
1805		.for_sync		= work->for_sync,
1806		.range_cyclic		= work->range_cyclic,
1807		.range_start		= 0,
1808		.range_end		= LLONG_MAX,
1809	};
1810	unsigned long start_time = jiffies;
1811	long write_chunk;
1812	long total_wrote = 0;  /* count both pages and inodes */
1813
1814	while (!list_empty(&wb->b_io)) {
1815		struct inode *inode = wb_inode(wb->b_io.prev);
1816		struct bdi_writeback *tmp_wb;
1817		long wrote;
1818
1819		if (inode->i_sb != sb) {
1820			if (work->sb) {
1821				/*
1822				 * We only want to write back data for this
1823				 * superblock, move all inodes not belonging
1824				 * to it back onto the dirty list.
1825				 */
1826				redirty_tail(inode, wb);
1827				continue;
1828			}
1829
1830			/*
1831			 * The inode belongs to a different superblock.
1832			 * Bounce back to the caller to unpin this and
1833			 * pin the next superblock.
1834			 */
1835			break;
1836		}
1837
1838		/*
1839		 * Don't bother with new inodes or inodes being freed, first
1840		 * kind does not need periodic writeout yet, and for the latter
1841		 * kind writeout is handled by the freer.
1842		 */
1843		spin_lock(&inode->i_lock);
1844		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
1845			redirty_tail_locked(inode, wb);
1846			spin_unlock(&inode->i_lock);
1847			continue;
1848		}
1849		if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
1850			/*
1851			 * If this inode is locked for writeback and we are not
1852			 * doing writeback-for-data-integrity, move it to
1853			 * b_more_io so that writeback can proceed with the
1854			 * other inodes on s_io.
1855			 *
1856			 * We'll have another go at writing back this inode
1857			 * when we completed a full scan of b_io.
1858			 */
 
1859			requeue_io(inode, wb);
1860			spin_unlock(&inode->i_lock);
1861			trace_writeback_sb_inodes_requeue(inode);
1862			continue;
1863		}
1864		spin_unlock(&wb->list_lock);
1865
1866		/*
1867		 * We already requeued the inode if it had I_SYNC set and we
1868		 * are doing WB_SYNC_NONE writeback. So this catches only the
1869		 * WB_SYNC_ALL case.
1870		 */
1871		if (inode->i_state & I_SYNC) {
1872			/* Wait for I_SYNC. This function drops i_lock... */
1873			inode_sleep_on_writeback(inode);
1874			/* Inode may be gone, start again */
1875			spin_lock(&wb->list_lock);
1876			continue;
1877		}
1878		inode->i_state |= I_SYNC;
1879		wbc_attach_and_unlock_inode(&wbc, inode);
1880
1881		write_chunk = writeback_chunk_size(wb, work);
1882		wbc.nr_to_write = write_chunk;
1883		wbc.pages_skipped = 0;
1884
1885		/*
1886		 * We use I_SYNC to pin the inode in memory. While it is set
1887		 * evict_inode() will wait so the inode cannot be freed.
1888		 */
1889		__writeback_single_inode(inode, &wbc);
1890
1891		wbc_detach_inode(&wbc);
1892		work->nr_pages -= write_chunk - wbc.nr_to_write;
1893		wrote = write_chunk - wbc.nr_to_write - wbc.pages_skipped;
1894		wrote = wrote < 0 ? 0 : wrote;
1895		total_wrote += wrote;
1896
1897		if (need_resched()) {
1898			/*
1899			 * We're trying to balance between building up a nice
1900			 * long list of IOs to improve our merge rate, and
1901			 * getting those IOs out quickly for anyone throttling
1902			 * in balance_dirty_pages().  cond_resched() doesn't
1903			 * unplug, so get our IOs out the door before we
1904			 * give up the CPU.
1905			 */
1906			blk_flush_plug(current->plug, false);
1907			cond_resched();
1908		}
1909
1910		/*
1911		 * Requeue @inode if still dirty.  Be careful as @inode may
1912		 * have been switched to another wb in the meantime.
1913		 */
1914		tmp_wb = inode_to_wb_and_lock_list(inode);
1915		spin_lock(&inode->i_lock);
1916		if (!(inode->i_state & I_DIRTY_ALL))
1917			total_wrote++;
1918		requeue_inode(inode, tmp_wb, &wbc);
1919		inode_sync_complete(inode);
1920		spin_unlock(&inode->i_lock);
1921
1922		if (unlikely(tmp_wb != wb)) {
1923			spin_unlock(&tmp_wb->list_lock);
1924			spin_lock(&wb->list_lock);
1925		}
1926
1927		/*
1928		 * bail out to wb_writeback() often enough to check
1929		 * background threshold and other termination conditions.
1930		 */
1931		if (total_wrote) {
1932			if (time_is_before_jiffies(start_time + HZ / 10UL))
1933				break;
1934			if (work->nr_pages <= 0)
1935				break;
1936		}
1937	}
1938	return total_wrote;
1939}
1940
1941static long __writeback_inodes_wb(struct bdi_writeback *wb,
1942				  struct wb_writeback_work *work)
1943{
1944	unsigned long start_time = jiffies;
1945	long wrote = 0;
1946
1947	while (!list_empty(&wb->b_io)) {
1948		struct inode *inode = wb_inode(wb->b_io.prev);
1949		struct super_block *sb = inode->i_sb;
1950
1951		if (!trylock_super(sb)) {
1952			/*
1953			 * trylock_super() may fail consistently due to
1954			 * s_umount being grabbed by someone else. Don't use
1955			 * requeue_io() to avoid busy retrying the inode/sb.
1956			 */
1957			redirty_tail(inode, wb);
1958			continue;
1959		}
1960		wrote += writeback_sb_inodes(sb, wb, work);
1961		up_read(&sb->s_umount);
1962
1963		/* refer to the same tests at the end of writeback_sb_inodes */
1964		if (wrote) {
1965			if (time_is_before_jiffies(start_time + HZ / 10UL))
1966				break;
1967			if (work->nr_pages <= 0)
1968				break;
1969		}
1970	}
1971	/* Leave any unwritten inodes on b_io */
1972	return wrote;
1973}
1974
1975static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
1976				enum wb_reason reason)
1977{
1978	struct wb_writeback_work work = {
1979		.nr_pages	= nr_pages,
1980		.sync_mode	= WB_SYNC_NONE,
1981		.range_cyclic	= 1,
1982		.reason		= reason,
1983	};
1984	struct blk_plug plug;
1985
1986	blk_start_plug(&plug);
1987	spin_lock(&wb->list_lock);
1988	if (list_empty(&wb->b_io))
1989		queue_io(wb, &work, jiffies);
1990	__writeback_inodes_wb(wb, &work);
1991	spin_unlock(&wb->list_lock);
1992	blk_finish_plug(&plug);
1993
1994	return nr_pages - work.nr_pages;
1995}
1996
1997/*
1998 * Explicit flushing or periodic writeback of "old" data.
1999 *
2000 * Define "old": the first time one of an inode's pages is dirtied, we mark the
2001 * dirtying-time in the inode's address_space.  So this periodic writeback code
2002 * just walks the superblock inode list, writing back any inodes which are
2003 * older than a specific point in time.
2004 *
2005 * Try to run once per dirty_writeback_interval.  But if a writeback event
2006 * takes longer than a dirty_writeback_interval interval, then leave a
2007 * one-second gap.
2008 *
2009 * dirtied_before takes precedence over nr_to_write.  So we'll only write back
2010 * all dirty pages if they are all attached to "old" mappings.
2011 */
2012static long wb_writeback(struct bdi_writeback *wb,
2013			 struct wb_writeback_work *work)
2014{
 
2015	long nr_pages = work->nr_pages;
2016	unsigned long dirtied_before = jiffies;
2017	struct inode *inode;
2018	long progress;
2019	struct blk_plug plug;
2020
2021	blk_start_plug(&plug);
2022	spin_lock(&wb->list_lock);
2023	for (;;) {
2024		/*
2025		 * Stop writeback when nr_pages has been consumed
2026		 */
2027		if (work->nr_pages <= 0)
2028			break;
2029
2030		/*
2031		 * Background writeout and kupdate-style writeback may
2032		 * run forever. Stop them if there is other work to do
2033		 * so that e.g. sync can proceed. They'll be restarted
2034		 * after the other works are all done.
2035		 */
2036		if ((work->for_background || work->for_kupdate) &&
2037		    !list_empty(&wb->work_list))
2038			break;
2039
2040		/*
2041		 * For background writeout, stop when we are below the
2042		 * background dirty threshold
2043		 */
2044		if (work->for_background && !wb_over_bg_thresh(wb))
2045			break;
2046
2047		/*
2048		 * Kupdate and background works are special and we want to
2049		 * include all inodes that need writing. Livelock avoidance is
2050		 * handled by these works yielding to any other work so we are
2051		 * safe.
2052		 */
2053		if (work->for_kupdate) {
2054			dirtied_before = jiffies -
2055				msecs_to_jiffies(dirty_expire_interval * 10);
2056		} else if (work->for_background)
2057			dirtied_before = jiffies;
2058
2059		trace_writeback_start(wb, work);
2060		if (list_empty(&wb->b_io))
2061			queue_io(wb, work, dirtied_before);
2062		if (work->sb)
2063			progress = writeback_sb_inodes(work->sb, wb, work);
2064		else
2065			progress = __writeback_inodes_wb(wb, work);
2066		trace_writeback_written(wb, work);
2067
 
 
2068		/*
2069		 * Did we write something? Try for more
2070		 *
2071		 * Dirty inodes are moved to b_io for writeback in batches.
2072		 * The completion of the current batch does not necessarily
2073		 * mean the overall work is done. So we keep looping as long
2074		 * as made some progress on cleaning pages or inodes.
2075		 */
2076		if (progress)
2077			continue;
2078		/*
2079		 * No more inodes for IO, bail
2080		 */
2081		if (list_empty(&wb->b_more_io))
2082			break;
2083		/*
2084		 * Nothing written. Wait for some inode to
2085		 * become available for writeback. Otherwise
2086		 * we'll just busyloop.
2087		 */
2088		trace_writeback_wait(wb, work);
2089		inode = wb_inode(wb->b_more_io.prev);
2090		spin_lock(&inode->i_lock);
2091		spin_unlock(&wb->list_lock);
2092		/* This function drops i_lock... */
2093		inode_sleep_on_writeback(inode);
2094		spin_lock(&wb->list_lock);
2095	}
2096	spin_unlock(&wb->list_lock);
2097	blk_finish_plug(&plug);
2098
2099	return nr_pages - work->nr_pages;
2100}
2101
2102/*
2103 * Return the next wb_writeback_work struct that hasn't been processed yet.
2104 */
2105static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
2106{
2107	struct wb_writeback_work *work = NULL;
2108
2109	spin_lock_irq(&wb->work_lock);
2110	if (!list_empty(&wb->work_list)) {
2111		work = list_entry(wb->work_list.next,
2112				  struct wb_writeback_work, list);
2113		list_del_init(&work->list);
2114	}
2115	spin_unlock_irq(&wb->work_lock);
2116	return work;
2117}
2118
2119static long wb_check_background_flush(struct bdi_writeback *wb)
2120{
2121	if (wb_over_bg_thresh(wb)) {
2122
2123		struct wb_writeback_work work = {
2124			.nr_pages	= LONG_MAX,
2125			.sync_mode	= WB_SYNC_NONE,
2126			.for_background	= 1,
2127			.range_cyclic	= 1,
2128			.reason		= WB_REASON_BACKGROUND,
2129		};
2130
2131		return wb_writeback(wb, &work);
2132	}
2133
2134	return 0;
2135}
2136
2137static long wb_check_old_data_flush(struct bdi_writeback *wb)
2138{
2139	unsigned long expired;
2140	long nr_pages;
2141
2142	/*
2143	 * When set to zero, disable periodic writeback
2144	 */
2145	if (!dirty_writeback_interval)
2146		return 0;
2147
2148	expired = wb->last_old_flush +
2149			msecs_to_jiffies(dirty_writeback_interval * 10);
2150	if (time_before(jiffies, expired))
2151		return 0;
2152
2153	wb->last_old_flush = jiffies;
2154	nr_pages = get_nr_dirty_pages();
2155
2156	if (nr_pages) {
2157		struct wb_writeback_work work = {
2158			.nr_pages	= nr_pages,
2159			.sync_mode	= WB_SYNC_NONE,
2160			.for_kupdate	= 1,
2161			.range_cyclic	= 1,
2162			.reason		= WB_REASON_PERIODIC,
2163		};
2164
2165		return wb_writeback(wb, &work);
2166	}
2167
2168	return 0;
2169}
2170
2171static long wb_check_start_all(struct bdi_writeback *wb)
2172{
2173	long nr_pages;
2174
2175	if (!test_bit(WB_start_all, &wb->state))
2176		return 0;
2177
2178	nr_pages = get_nr_dirty_pages();
2179	if (nr_pages) {
2180		struct wb_writeback_work work = {
2181			.nr_pages	= wb_split_bdi_pages(wb, nr_pages),
2182			.sync_mode	= WB_SYNC_NONE,
2183			.range_cyclic	= 1,
2184			.reason		= wb->start_all_reason,
2185		};
2186
2187		nr_pages = wb_writeback(wb, &work);
2188	}
2189
2190	clear_bit(WB_start_all, &wb->state);
2191	return nr_pages;
2192}
2193
2194
2195/*
2196 * Retrieve work items and do the writeback they describe
2197 */
2198static long wb_do_writeback(struct bdi_writeback *wb)
2199{
2200	struct wb_writeback_work *work;
2201	long wrote = 0;
2202
2203	set_bit(WB_writeback_running, &wb->state);
2204	while ((work = get_next_work_item(wb)) != NULL) {
2205		trace_writeback_exec(wb, work);
2206		wrote += wb_writeback(wb, work);
2207		finish_writeback_work(wb, work);
2208	}
2209
2210	/*
2211	 * Check for a flush-everything request
2212	 */
2213	wrote += wb_check_start_all(wb);
2214
2215	/*
2216	 * Check for periodic writeback, kupdated() style
2217	 */
2218	wrote += wb_check_old_data_flush(wb);
2219	wrote += wb_check_background_flush(wb);
2220	clear_bit(WB_writeback_running, &wb->state);
2221
2222	return wrote;
2223}
2224
2225/*
2226 * Handle writeback of dirty data for the device backed by this bdi. Also
2227 * reschedules periodically and does kupdated style flushing.
2228 */
2229void wb_workfn(struct work_struct *work)
2230{
2231	struct bdi_writeback *wb = container_of(to_delayed_work(work),
2232						struct bdi_writeback, dwork);
2233	long pages_written;
2234
2235	set_worker_desc("flush-%s", bdi_dev_name(wb->bdi));
 
2236
2237	if (likely(!current_is_workqueue_rescuer() ||
2238		   !test_bit(WB_registered, &wb->state))) {
2239		/*
2240		 * The normal path.  Keep writing back @wb until its
2241		 * work_list is empty.  Note that this path is also taken
2242		 * if @wb is shutting down even when we're running off the
2243		 * rescuer as work_list needs to be drained.
2244		 */
2245		do {
2246			pages_written = wb_do_writeback(wb);
2247			trace_writeback_pages_written(pages_written);
2248		} while (!list_empty(&wb->work_list));
2249	} else {
2250		/*
2251		 * bdi_wq can't get enough workers and we're running off
2252		 * the emergency worker.  Don't hog it.  Hopefully, 1024 is
2253		 * enough for efficient IO.
2254		 */
2255		pages_written = writeback_inodes_wb(wb, 1024,
2256						    WB_REASON_FORKER_THREAD);
2257		trace_writeback_pages_written(pages_written);
2258	}
2259
2260	if (!list_empty(&wb->work_list))
2261		wb_wakeup(wb);
2262	else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
2263		wb_wakeup_delayed(wb);
 
 
2264}
2265
2266/*
2267 * Start writeback of `nr_pages' pages on this bdi. If `nr_pages' is zero,
2268 * write back the whole world.
2269 */
2270static void __wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
2271					 enum wb_reason reason)
2272{
2273	struct bdi_writeback *wb;
2274
2275	if (!bdi_has_dirty_io(bdi))
2276		return;
2277
2278	list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
2279		wb_start_writeback(wb, reason);
2280}
2281
2282void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
2283				enum wb_reason reason)
2284{
2285	rcu_read_lock();
2286	__wakeup_flusher_threads_bdi(bdi, reason);
2287	rcu_read_unlock();
2288}
2289
2290/*
2291 * Wakeup the flusher threads to start writeback of all currently dirty pages
2292 */
2293void wakeup_flusher_threads(enum wb_reason reason)
2294{
2295	struct backing_dev_info *bdi;
2296
2297	/*
2298	 * If we are expecting writeback progress we must submit plugged IO.
2299	 */
2300	blk_flush_plug(current->plug, true);
 
2301
2302	rcu_read_lock();
2303	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
2304		__wakeup_flusher_threads_bdi(bdi, reason);
2305	rcu_read_unlock();
2306}
2307
2308/*
2309 * Wake up bdi's periodically to make sure dirtytime inodes gets
2310 * written back periodically.  We deliberately do *not* check the
2311 * b_dirtytime list in wb_has_dirty_io(), since this would cause the
2312 * kernel to be constantly waking up once there are any dirtytime
2313 * inodes on the system.  So instead we define a separate delayed work
2314 * function which gets called much more rarely.  (By default, only
2315 * once every 12 hours.)
2316 *
2317 * If there is any other write activity going on in the file system,
2318 * this function won't be necessary.  But if the only thing that has
2319 * happened on the file system is a dirtytime inode caused by an atime
2320 * update, we need this infrastructure below to make sure that inode
2321 * eventually gets pushed out to disk.
2322 */
2323static void wakeup_dirtytime_writeback(struct work_struct *w);
2324static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback);
2325
2326static void wakeup_dirtytime_writeback(struct work_struct *w)
2327{
2328	struct backing_dev_info *bdi;
2329
2330	rcu_read_lock();
2331	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
2332		struct bdi_writeback *wb;
2333
2334		list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
2335			if (!list_empty(&wb->b_dirty_time))
2336				wb_wakeup(wb);
2337	}
2338	rcu_read_unlock();
2339	schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
2340}
2341
2342static int __init start_dirtytime_writeback(void)
2343{
2344	schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
2345	return 0;
2346}
2347__initcall(start_dirtytime_writeback);
2348
2349int dirtytime_interval_handler(struct ctl_table *table, int write,
2350			       void *buffer, size_t *lenp, loff_t *ppos)
2351{
2352	int ret;
2353
2354	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2355	if (ret == 0 && write)
2356		mod_delayed_work(system_wq, &dirtytime_work, 0);
2357	return ret;
2358}
2359
2360/**
2361 * __mark_inode_dirty -	internal function to mark an inode dirty
2362 *
2363 * @inode: inode to mark
2364 * @flags: what kind of dirty, e.g. I_DIRTY_SYNC.  This can be a combination of
2365 *	   multiple I_DIRTY_* flags, except that I_DIRTY_TIME can't be combined
2366 *	   with I_DIRTY_PAGES.
2367 *
2368 * Mark an inode as dirty.  We notify the filesystem, then update the inode's
2369 * dirty flags.  Then, if needed we add the inode to the appropriate dirty list.
2370 *
2371 * Most callers should use mark_inode_dirty() or mark_inode_dirty_sync()
2372 * instead of calling this directly.
2373 *
2374 * CAREFUL!  We only add the inode to the dirty list if it is hashed or if it
2375 * refers to a blockdev.  Unhashed inodes will never be added to the dirty list
2376 * even if they are later hashed, as they will have been marked dirty already.
2377 *
2378 * In short, ensure you hash any inodes _before_ you start marking them dirty.
2379 *
2380 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
2381 * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
2382 * the kernel-internal blockdev inode represents the dirtying time of the
2383 * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
2384 * page->mapping->host, so the page-dirtying time is recorded in the internal
2385 * blockdev inode.
2386 */
2387void __mark_inode_dirty(struct inode *inode, int flags)
2388{
2389	struct super_block *sb = inode->i_sb;
2390	int dirtytime = 0;
2391	struct bdi_writeback *wb = NULL;
2392
2393	trace_writeback_mark_inode_dirty(inode, flags);
2394
2395	if (flags & I_DIRTY_INODE) {
2396		/*
2397		 * Inode timestamp update will piggback on this dirtying.
2398		 * We tell ->dirty_inode callback that timestamps need to
2399		 * be updated by setting I_DIRTY_TIME in flags.
2400		 */
2401		if (inode->i_state & I_DIRTY_TIME) {
2402			spin_lock(&inode->i_lock);
2403			if (inode->i_state & I_DIRTY_TIME) {
2404				inode->i_state &= ~I_DIRTY_TIME;
2405				flags |= I_DIRTY_TIME;
2406			}
2407			spin_unlock(&inode->i_lock);
2408		}
2409
2410		/*
2411		 * Notify the filesystem about the inode being dirtied, so that
2412		 * (if needed) it can update on-disk fields and journal the
2413		 * inode.  This is only needed when the inode itself is being
2414		 * dirtied now.  I.e. it's only needed for I_DIRTY_INODE, not
2415		 * for just I_DIRTY_PAGES or I_DIRTY_TIME.
2416		 */
2417		trace_writeback_dirty_inode_start(inode, flags);
2418		if (sb->s_op->dirty_inode)
2419			sb->s_op->dirty_inode(inode,
2420				flags & (I_DIRTY_INODE | I_DIRTY_TIME));
2421		trace_writeback_dirty_inode(inode, flags);
2422
2423		/* I_DIRTY_INODE supersedes I_DIRTY_TIME. */
2424		flags &= ~I_DIRTY_TIME;
2425	} else {
2426		/*
2427		 * Else it's either I_DIRTY_PAGES, I_DIRTY_TIME, or nothing.
2428		 * (We don't support setting both I_DIRTY_PAGES and I_DIRTY_TIME
2429		 * in one call to __mark_inode_dirty().)
2430		 */
2431		dirtytime = flags & I_DIRTY_TIME;
2432		WARN_ON_ONCE(dirtytime && flags != I_DIRTY_TIME);
2433	}
2434
2435	/*
2436	 * Paired with smp_mb() in __writeback_single_inode() for the
2437	 * following lockless i_state test.  See there for details.
2438	 */
2439	smp_mb();
2440
2441	if ((inode->i_state & flags) == flags)
 
2442		return;
2443
2444	spin_lock(&inode->i_lock);
 
 
2445	if ((inode->i_state & flags) != flags) {
2446		const int was_dirty = inode->i_state & I_DIRTY;
2447
2448		inode_attach_wb(inode, NULL);
2449
 
 
 
2450		inode->i_state |= flags;
2451
2452		/*
2453		 * Grab inode's wb early because it requires dropping i_lock and we
2454		 * need to make sure following checks happen atomically with dirty
2455		 * list handling so that we don't move inodes under flush worker's
2456		 * hands.
2457		 */
2458		if (!was_dirty) {
2459			wb = locked_inode_to_wb_and_lock_list(inode);
2460			spin_lock(&inode->i_lock);
2461		}
2462
2463		/*
2464		 * If the inode is queued for writeback by flush worker, just
2465		 * update its dirty state. Once the flush worker is done with
2466		 * the inode it will place it on the appropriate superblock
2467		 * list, based upon its state.
2468		 */
2469		if (inode->i_state & I_SYNC_QUEUED)
2470			goto out_unlock;
2471
2472		/*
2473		 * Only add valid (hashed) inodes to the superblock's
2474		 * dirty list.  Add blockdev inodes as well.
2475		 */
2476		if (!S_ISBLK(inode->i_mode)) {
2477			if (inode_unhashed(inode))
2478				goto out_unlock;
2479		}
2480		if (inode->i_state & I_FREEING)
2481			goto out_unlock;
2482
2483		/*
2484		 * If the inode was already on b_dirty/b_io/b_more_io, don't
2485		 * reposition it (that would break b_dirty time-ordering).
2486		 */
2487		if (!was_dirty) {
 
2488			struct list_head *dirty_list;
2489			bool wakeup_bdi = false;
2490
 
 
2491			inode->dirtied_when = jiffies;
2492			if (dirtytime)
2493				inode->dirtied_time_when = jiffies;
2494
2495			if (inode->i_state & I_DIRTY)
2496				dirty_list = &wb->b_dirty;
2497			else
2498				dirty_list = &wb->b_dirty_time;
2499
2500			wakeup_bdi = inode_io_list_move_locked(inode, wb,
2501							       dirty_list);
2502
2503			spin_unlock(&wb->list_lock);
2504			spin_unlock(&inode->i_lock);
2505			trace_writeback_dirty_inode_enqueue(inode);
2506
2507			/*
2508			 * If this is the first dirty inode for this bdi,
2509			 * we have to wake-up the corresponding bdi thread
2510			 * to make sure background write-back happens
2511			 * later.
2512			 */
2513			if (wakeup_bdi &&
2514			    (wb->bdi->capabilities & BDI_CAP_WRITEBACK))
2515				wb_wakeup_delayed(wb);
2516			return;
2517		}
2518	}
2519out_unlock:
2520	if (wb)
2521		spin_unlock(&wb->list_lock);
2522	spin_unlock(&inode->i_lock);
2523}
2524EXPORT_SYMBOL(__mark_inode_dirty);
2525
2526/*
2527 * The @s_sync_lock is used to serialise concurrent sync operations
2528 * to avoid lock contention problems with concurrent wait_sb_inodes() calls.
2529 * Concurrent callers will block on the s_sync_lock rather than doing contending
2530 * walks. The queueing maintains sync(2) required behaviour as all the IO that
2531 * has been issued up to the time this function is enter is guaranteed to be
2532 * completed by the time we have gained the lock and waited for all IO that is
2533 * in progress regardless of the order callers are granted the lock.
2534 */
2535static void wait_sb_inodes(struct super_block *sb)
2536{
2537	LIST_HEAD(sync_list);
2538
2539	/*
2540	 * We need to be protected against the filesystem going from
2541	 * r/o to r/w or vice versa.
2542	 */
2543	WARN_ON(!rwsem_is_locked(&sb->s_umount));
2544
2545	mutex_lock(&sb->s_sync_lock);
2546
2547	/*
2548	 * Splice the writeback list onto a temporary list to avoid waiting on
2549	 * inodes that have started writeback after this point.
2550	 *
2551	 * Use rcu_read_lock() to keep the inodes around until we have a
2552	 * reference. s_inode_wblist_lock protects sb->s_inodes_wb as well as
2553	 * the local list because inodes can be dropped from either by writeback
2554	 * completion.
2555	 */
2556	rcu_read_lock();
2557	spin_lock_irq(&sb->s_inode_wblist_lock);
2558	list_splice_init(&sb->s_inodes_wb, &sync_list);
2559
2560	/*
2561	 * Data integrity sync. Must wait for all pages under writeback, because
2562	 * there may have been pages dirtied before our sync call, but which had
2563	 * writeout started before we write it out.  In which case, the inode
2564	 * may not be on the dirty list, but we still have to wait for that
2565	 * writeout.
2566	 */
2567	while (!list_empty(&sync_list)) {
2568		struct inode *inode = list_first_entry(&sync_list, struct inode,
2569						       i_wb_list);
2570		struct address_space *mapping = inode->i_mapping;
2571
2572		/*
2573		 * Move each inode back to the wb list before we drop the lock
2574		 * to preserve consistency between i_wb_list and the mapping
2575		 * writeback tag. Writeback completion is responsible to remove
2576		 * the inode from either list once the writeback tag is cleared.
2577		 */
2578		list_move_tail(&inode->i_wb_list, &sb->s_inodes_wb);
2579
2580		/*
2581		 * The mapping can appear untagged while still on-list since we
2582		 * do not have the mapping lock. Skip it here, wb completion
2583		 * will remove it.
2584		 */
2585		if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
2586			continue;
2587
2588		spin_unlock_irq(&sb->s_inode_wblist_lock);
2589
2590		spin_lock(&inode->i_lock);
2591		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
2592			spin_unlock(&inode->i_lock);
2593
2594			spin_lock_irq(&sb->s_inode_wblist_lock);
2595			continue;
2596		}
2597		__iget(inode);
2598		spin_unlock(&inode->i_lock);
2599		rcu_read_unlock();
2600
2601		/*
2602		 * We keep the error status of individual mapping so that
2603		 * applications can catch the writeback error using fsync(2).
2604		 * See filemap_fdatawait_keep_errors() for details.
2605		 */
2606		filemap_fdatawait_keep_errors(mapping);
2607
2608		cond_resched();
2609
2610		iput(inode);
2611
2612		rcu_read_lock();
2613		spin_lock_irq(&sb->s_inode_wblist_lock);
2614	}
2615	spin_unlock_irq(&sb->s_inode_wblist_lock);
2616	rcu_read_unlock();
2617	mutex_unlock(&sb->s_sync_lock);
2618}
2619
2620static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
2621				     enum wb_reason reason, bool skip_if_busy)
2622{
2623	struct backing_dev_info *bdi = sb->s_bdi;
2624	DEFINE_WB_COMPLETION(done, bdi);
2625	struct wb_writeback_work work = {
2626		.sb			= sb,
2627		.sync_mode		= WB_SYNC_NONE,
2628		.tagged_writepages	= 1,
2629		.done			= &done,
2630		.nr_pages		= nr,
2631		.reason			= reason,
2632	};
2633
2634	if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
2635		return;
2636	WARN_ON(!rwsem_is_locked(&sb->s_umount));
2637
2638	bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy);
2639	wb_wait_for_completion(&done);
2640}
2641
2642/**
2643 * writeback_inodes_sb_nr -	writeback dirty inodes from given super_block
2644 * @sb: the superblock
2645 * @nr: the number of pages to write
2646 * @reason: reason why some writeback work initiated
2647 *
2648 * Start writeback on some inodes on this super_block. No guarantees are made
2649 * on how many (if any) will be written, and this function does not wait
2650 * for IO completion of submitted IO.
2651 */
2652void writeback_inodes_sb_nr(struct super_block *sb,
2653			    unsigned long nr,
2654			    enum wb_reason reason)
2655{
2656	__writeback_inodes_sb_nr(sb, nr, reason, false);
2657}
2658EXPORT_SYMBOL(writeback_inodes_sb_nr);
2659
2660/**
2661 * writeback_inodes_sb	-	writeback dirty inodes from given super_block
2662 * @sb: the superblock
2663 * @reason: reason why some writeback work was initiated
2664 *
2665 * Start writeback on some inodes on this super_block. No guarantees are made
2666 * on how many (if any) will be written, and this function does not wait
2667 * for IO completion of submitted IO.
2668 */
2669void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
2670{
2671	return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
2672}
2673EXPORT_SYMBOL(writeback_inodes_sb);
2674
2675/**
2676 * try_to_writeback_inodes_sb - try to start writeback if none underway
2677 * @sb: the superblock
2678 * @reason: reason why some writeback work was initiated
2679 *
2680 * Invoke __writeback_inodes_sb_nr if no writeback is currently underway.
2681 */
2682void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
2683{
2684	if (!down_read_trylock(&sb->s_umount))
2685		return;
2686
2687	__writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason, true);
2688	up_read(&sb->s_umount);
2689}
2690EXPORT_SYMBOL(try_to_writeback_inodes_sb);
2691
2692/**
2693 * sync_inodes_sb	-	sync sb inode pages
2694 * @sb: the superblock
2695 *
2696 * This function writes and waits on any dirty inode belonging to this
2697 * super_block.
2698 */
2699void sync_inodes_sb(struct super_block *sb)
2700{
2701	struct backing_dev_info *bdi = sb->s_bdi;
2702	DEFINE_WB_COMPLETION(done, bdi);
2703	struct wb_writeback_work work = {
2704		.sb		= sb,
2705		.sync_mode	= WB_SYNC_ALL,
2706		.nr_pages	= LONG_MAX,
2707		.range_cyclic	= 0,
2708		.done		= &done,
2709		.reason		= WB_REASON_SYNC,
2710		.for_sync	= 1,
2711	};
2712
2713	/*
2714	 * Can't skip on !bdi_has_dirty() because we should wait for !dirty
2715	 * inodes under writeback and I_DIRTY_TIME inodes ignored by
2716	 * bdi_has_dirty() need to be written out too.
2717	 */
2718	if (bdi == &noop_backing_dev_info)
2719		return;
2720	WARN_ON(!rwsem_is_locked(&sb->s_umount));
2721
2722	/* protect against inode wb switch, see inode_switch_wbs_work_fn() */
2723	bdi_down_write_wb_switch_rwsem(bdi);
2724	bdi_split_work_to_wbs(bdi, &work, false);
2725	wb_wait_for_completion(&done);
2726	bdi_up_write_wb_switch_rwsem(bdi);
2727
2728	wait_sb_inodes(sb);
2729}
2730EXPORT_SYMBOL(sync_inodes_sb);
2731
2732/**
2733 * write_inode_now	-	write an inode to disk
2734 * @inode: inode to write to disk
2735 * @sync: whether the write should be synchronous or not
2736 *
2737 * This function commits an inode to disk immediately if it is dirty. This is
2738 * primarily needed by knfsd.
2739 *
2740 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
2741 */
2742int write_inode_now(struct inode *inode, int sync)
2743{
2744	struct writeback_control wbc = {
2745		.nr_to_write = LONG_MAX,
2746		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
2747		.range_start = 0,
2748		.range_end = LLONG_MAX,
2749	};
2750
2751	if (!mapping_can_writeback(inode->i_mapping))
2752		wbc.nr_to_write = 0;
2753
2754	might_sleep();
2755	return writeback_single_inode(inode, &wbc);
2756}
2757EXPORT_SYMBOL(write_inode_now);
2758
2759/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2760 * sync_inode_metadata - write an inode to disk
2761 * @inode: the inode to sync
2762 * @wait: wait for I/O to complete.
2763 *
2764 * Write an inode to disk and adjust its dirty state after completion.
2765 *
2766 * Note: only writes the actual inode, no associated data or other metadata.
2767 */
2768int sync_inode_metadata(struct inode *inode, int wait)
2769{
2770	struct writeback_control wbc = {
2771		.sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
2772		.nr_to_write = 0, /* metadata-only */
2773	};
2774
2775	return writeback_single_inode(inode, &wbc);
2776}
2777EXPORT_SYMBOL(sync_inode_metadata);