Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* handling of writes to regular files and writing back to the server
   3 *
   4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   5 * Written by David Howells (dhowells@redhat.com)
 
 
 
 
 
   6 */
   7
   8#include <linux/backing-dev.h>
   9#include <linux/slab.h>
  10#include <linux/fs.h>
  11#include <linux/pagemap.h>
  12#include <linux/writeback.h>
  13#include <linux/pagevec.h>
  14#include <linux/netfs.h>
  15#include "internal.h"
  16
  17static int afs_writepages_region(struct address_space *mapping,
  18				 struct writeback_control *wbc,
  19				 loff_t start, loff_t end, loff_t *_next,
  20				 bool max_one_loop);
  21
  22static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len,
  23			       loff_t i_size, bool caching);
  24
  25#ifdef CONFIG_AFS_FSCACHE
  26/*
  27 * Mark a page as having been made dirty and thus needing writeback.  We also
  28 * need to pin the cache object to write back to.
  29 */
  30bool afs_dirty_folio(struct address_space *mapping, struct folio *folio)
  31{
  32	return fscache_dirty_folio(mapping, folio,
  33				afs_vnode_cache(AFS_FS_I(mapping->host)));
  34}
  35static void afs_folio_start_fscache(bool caching, struct folio *folio)
 
 
 
 
 
  36{
  37	if (caching)
  38		folio_start_fscache(folio);
 
 
 
 
 
 
 
 
 
 
 
 
 
  39}
  40#else
  41static void afs_folio_start_fscache(bool caching, struct folio *folio)
 
 
 
  42{
 
 
 
  43}
  44#endif
  45
  46/*
  47 * Flush out a conflicting write.  This may extend the write to the surrounding
  48 * pages if also dirty and contiguous to the conflicting region..
  49 */
  50static int afs_flush_conflicting_write(struct address_space *mapping,
  51				       struct folio *folio)
  52{
  53	struct writeback_control wbc = {
  54		.sync_mode	= WB_SYNC_ALL,
  55		.nr_to_write	= LONG_MAX,
  56		.range_start	= folio_pos(folio),
  57		.range_end	= LLONG_MAX,
  58	};
  59	loff_t next;
  60
  61	return afs_writepages_region(mapping, &wbc, folio_pos(folio), LLONG_MAX,
  62				     &next, true);
 
 
 
 
 
 
  63}
  64
  65/*
  66 * prepare to perform part of a write to a page
  67 */
  68int afs_write_begin(struct file *file, struct address_space *mapping,
  69		    loff_t pos, unsigned len,
  70		    struct page **_page, void **fsdata)
  71{
  72	struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
  73	struct folio *folio;
  74	unsigned long priv;
  75	unsigned f, from;
  76	unsigned t, to;
  77	pgoff_t index;
  78	int ret;
 
  79
  80	_enter("{%llx:%llu},%llx,%x",
  81	       vnode->fid.vid, vnode->fid.vnode, pos, len);
  82
  83	/* Prefetch area to be written into the cache if we're caching this
  84	 * file.  We need to do this before we get a lock on the page in case
  85	 * there's more than one writer competing for the same cache block.
  86	 */
  87	ret = netfs_write_begin(&vnode->netfs, file, mapping, pos, len, &folio, fsdata);
  88	if (ret < 0)
  89		return ret;
  90
  91	index = folio_index(folio);
  92	from = pos - index * PAGE_SIZE;
  93	to = from + len;
  94
  95try_again:
  96	/* See if this page is already partially written in a way that we can
  97	 * merge the new write with.
  98	 */
  99	if (folio_test_private(folio)) {
 100		priv = (unsigned long)folio_get_private(folio);
 101		f = afs_folio_dirty_from(folio, priv);
 102		t = afs_folio_dirty_to(folio, priv);
 103		ASSERTCMP(f, <=, t);
 104
 105		if (folio_test_writeback(folio)) {
 106			trace_afs_folio_dirty(vnode, tracepoint_string("alrdy"), folio);
 107			folio_unlock(folio);
 108			goto wait_for_writeback;
 109		}
 110		/* If the file is being filled locally, allow inter-write
 111		 * spaces to be merged into writes.  If it's not, only write
 112		 * back what the user gives us.
 113		 */
 114		if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
 115		    (to < f || from > t))
 116			goto flush_conflicting_write;
 117	}
 118
 119	*_page = folio_file_page(folio, pos / PAGE_SIZE);
 120	_leave(" = 0");
 121	return 0;
 122
 123	/* The previous write and this write aren't adjacent or overlapping, so
 124	 * flush the page out.
 125	 */
 126flush_conflicting_write:
 127	trace_afs_folio_dirty(vnode, tracepoint_string("confl"), folio);
 128	folio_unlock(folio);
 129
 130	ret = afs_flush_conflicting_write(mapping, folio);
 131	if (ret < 0)
 132		goto error;
 133
 134wait_for_writeback:
 135	ret = folio_wait_writeback_killable(folio);
 136	if (ret < 0)
 137		goto error;
 138
 139	ret = folio_lock_killable(folio);
 140	if (ret < 0)
 141		goto error;
 142	goto try_again;
 143
 144error:
 145	folio_put(folio);
 146	_leave(" = %d", ret);
 147	return ret;
 148}
 149
 150/*
 151 * finalise part of a write to a page
 152 */
 153int afs_write_end(struct file *file, struct address_space *mapping,
 154		  loff_t pos, unsigned len, unsigned copied,
 155		  struct page *subpage, void *fsdata)
 156{
 157	struct folio *folio = page_folio(subpage);
 158	struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
 159	unsigned long priv;
 160	unsigned int f, from = offset_in_folio(folio, pos);
 161	unsigned int t, to = from + copied;
 162	loff_t i_size, write_end_pos;
 163
 164	_enter("{%llx:%llu},{%lx}",
 165	       vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
 166
 167	if (!folio_test_uptodate(folio)) {
 168		if (copied < len) {
 169			copied = 0;
 170			goto out;
 171		}
 172
 173		folio_mark_uptodate(folio);
 174	}
 175
 176	if (copied == 0)
 177		goto out;
 178
 179	write_end_pos = pos + copied;
 
 180
 181	i_size = i_size_read(&vnode->netfs.inode);
 182	if (write_end_pos > i_size) {
 183		write_seqlock(&vnode->cb_lock);
 184		i_size = i_size_read(&vnode->netfs.inode);
 185		if (write_end_pos > i_size)
 186			afs_set_i_size(vnode, write_end_pos);
 187		write_sequnlock(&vnode->cb_lock);
 188		fscache_update_cookie(afs_vnode_cache(vnode), NULL, &write_end_pos);
 
 
 
 
 
 
 
 
 189	}
 
 
 190
 191	if (folio_test_private(folio)) {
 192		priv = (unsigned long)folio_get_private(folio);
 193		f = afs_folio_dirty_from(folio, priv);
 194		t = afs_folio_dirty_to(folio, priv);
 195		if (from < f)
 196			f = from;
 197		if (to > t)
 198			t = to;
 199		priv = afs_folio_dirty(folio, f, t);
 200		folio_change_private(folio, (void *)priv);
 201		trace_afs_folio_dirty(vnode, tracepoint_string("dirty+"), folio);
 202	} else {
 203		priv = afs_folio_dirty(folio, from, to);
 204		folio_attach_private(folio, (void *)priv);
 205		trace_afs_folio_dirty(vnode, tracepoint_string("dirty"), folio);
 206	}
 207
 208	if (folio_mark_dirty(folio))
 209		_debug("dirtied %lx", folio_index(folio));
 210
 211out:
 212	folio_unlock(folio);
 213	folio_put(folio);
 214	return copied;
 215}
 
 
 
 
 
 
 
 
 
 
 
 
 
 216
 217/*
 218 * kill all the pages in the given range
 219 */
 220static void afs_kill_pages(struct address_space *mapping,
 221			   loff_t start, loff_t len)
 222{
 223	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 224	struct folio *folio;
 225	pgoff_t index = start / PAGE_SIZE;
 226	pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
 227
 228	_enter("{%llx:%llu},%llx @%llx",
 229	       vnode->fid.vid, vnode->fid.vnode, len, start);
 
 
 
 
 
 
 
 
 
 230
 231	do {
 232		_debug("kill %lx (to %lx)", index, last);
 
 
 
 
 
 
 
 
 
 233
 234		folio = filemap_get_folio(mapping, index);
 235		if (!folio) {
 236			next = index + 1;
 237			continue;
 
 
 
 
 
 
 
 
 
 238		}
 
 239
 240		next = folio_next_index(folio);
 241
 242		folio_clear_uptodate(folio);
 243		folio_end_writeback(folio);
 244		folio_lock(folio);
 245		generic_error_remove_page(mapping, &folio->page);
 246		folio_unlock(folio);
 247		folio_put(folio);
 248
 249	} while (index = next, index <= last);
 250
 251	_leave("");
 252}
 253
 254/*
 255 * Redirty all the pages in a given range.
 256 */
 257static void afs_redirty_pages(struct writeback_control *wbc,
 258			      struct address_space *mapping,
 259			      loff_t start, loff_t len)
 260{
 261	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 262	struct folio *folio;
 263	pgoff_t index = start / PAGE_SIZE;
 264	pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
 265
 266	_enter("{%llx:%llu},%llx @%llx",
 267	       vnode->fid.vid, vnode->fid.vnode, len, start);
 268
 269	do {
 270		_debug("redirty %llx @%llx", len, start);
 271
 272		folio = filemap_get_folio(mapping, index);
 273		if (!folio) {
 274			next = index + 1;
 275			continue;
 276		}
 277
 278		next = index + folio_nr_pages(folio);
 279		folio_redirty_for_writepage(wbc, folio);
 280		folio_end_writeback(folio);
 281		folio_put(folio);
 282	} while (index = next, index <= last);
 
 
 283
 284	_leave("");
 285}
 286
 287/*
 288 * completion of write to server
 289 */
 290static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len)
 
 291{
 292	struct address_space *mapping = vnode->netfs.inode.i_mapping;
 293	struct folio *folio;
 294	pgoff_t end;
 295
 296	XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
 297
 298	_enter("{%llx:%llu},{%x @%llx}",
 299	       vnode->fid.vid, vnode->fid.vnode, len, start);
 300
 301	rcu_read_lock();
 302
 303	end = (start + len - 1) / PAGE_SIZE;
 304	xas_for_each(&xas, folio, end) {
 305		if (!folio_test_writeback(folio)) {
 306			kdebug("bad %x @%llx page %lx %lx",
 307			       len, start, folio_index(folio), end);
 308			ASSERT(folio_test_writeback(folio));
 309		}
 310
 311		trace_afs_folio_dirty(vnode, tracepoint_string("clear"), folio);
 312		folio_detach_private(folio);
 313		folio_end_writeback(folio);
 314	}
 315
 316	rcu_read_unlock();
 
 317
 318	afs_prune_wb_keys(vnode);
 319	_leave("");
 320}
 321
 322/*
 323 * Find a key to use for the writeback.  We cached the keys used to author the
 324 * writes on the vnode.  *_wbk will contain the last writeback key used or NULL
 325 * and we need to start from there if it's set.
 326 */
 327static int afs_get_writeback_key(struct afs_vnode *vnode,
 328				 struct afs_wb_key **_wbk)
 329{
 330	struct afs_wb_key *wbk = NULL;
 331	struct list_head *p;
 332	int ret = -ENOKEY, ret2;
 333
 334	spin_lock(&vnode->wb_lock);
 335	if (*_wbk)
 336		p = (*_wbk)->vnode_link.next;
 337	else
 338		p = vnode->wb_keys.next;
 339
 340	while (p != &vnode->wb_keys) {
 341		wbk = list_entry(p, struct afs_wb_key, vnode_link);
 342		_debug("wbk %u", key_serial(wbk->key));
 343		ret2 = key_validate(wbk->key);
 344		if (ret2 == 0) {
 345			refcount_inc(&wbk->usage);
 346			_debug("USE WB KEY %u", key_serial(wbk->key));
 347			break;
 
 
 
 
 348		}
 349
 350		wbk = NULL;
 351		if (ret == -ENOKEY)
 352			ret = ret2;
 353		p = p->next;
 354	}
 355
 356	spin_unlock(&vnode->wb_lock);
 357	if (*_wbk)
 358		afs_put_wb_key(*_wbk);
 359	*_wbk = wbk;
 360	return 0;
 361}
 362
 363static void afs_store_data_success(struct afs_operation *op)
 364{
 365	struct afs_vnode *vnode = op->file[0].vnode;
 366
 367	op->ctime = op->file[0].scb.status.mtime_client;
 368	afs_vnode_commit_status(op, &op->file[0]);
 369	if (op->error == 0) {
 370		if (!op->store.laundering)
 371			afs_pages_written_back(vnode, op->store.pos, op->store.size);
 372		afs_stat_v(vnode, n_stores);
 373		atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes);
 374	}
 375}
 376
 377static const struct afs_operation_ops afs_store_data_operation = {
 378	.issue_afs_rpc	= afs_fs_store_data,
 379	.issue_yfs_rpc	= yfs_fs_store_data,
 380	.success	= afs_store_data_success,
 381};
 382
 383/*
 384 * write to a file
 
 385 */
 386static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos,
 387			  bool laundering)
 388{
 389	struct afs_operation *op;
 390	struct afs_wb_key *wbk = NULL;
 391	loff_t size = iov_iter_count(iter);
 392	int ret = -ENOKEY;
 393
 394	_enter("%s{%llx:%llu.%u},%llx,%llx",
 395	       vnode->volume->name,
 396	       vnode->fid.vid,
 397	       vnode->fid.vnode,
 398	       vnode->fid.unique,
 399	       size, pos);
 400
 401	ret = afs_get_writeback_key(vnode, &wbk);
 402	if (ret) {
 403		_leave(" = %d [no keys]", ret);
 404		return ret;
 405	}
 406
 407	op = afs_alloc_operation(wbk->key, vnode->volume);
 408	if (IS_ERR(op)) {
 409		afs_put_wb_key(wbk);
 410		return -ENOMEM;
 411	}
 412
 413	afs_op_set_vnode(op, 0, vnode);
 414	op->file[0].dv_delta = 1;
 415	op->file[0].modification = true;
 416	op->store.write_iter = iter;
 417	op->store.pos = pos;
 418	op->store.size = size;
 419	op->store.i_size = max(pos + size, vnode->netfs.remote_i_size);
 420	op->store.laundering = laundering;
 421	op->mtime = vnode->netfs.inode.i_mtime;
 422	op->flags |= AFS_OPERATION_UNINTR;
 423	op->ops = &afs_store_data_operation;
 424
 425try_next_key:
 426	afs_begin_vnode_operation(op);
 427	afs_wait_for_operation(op);
 428
 429	switch (op->error) {
 430	case -EACCES:
 431	case -EPERM:
 432	case -ENOKEY:
 433	case -EKEYEXPIRED:
 434	case -EKEYREJECTED:
 435	case -EKEYREVOKED:
 436		_debug("next");
 437
 438		ret = afs_get_writeback_key(vnode, &wbk);
 439		if (ret == 0) {
 440			key_put(op->key);
 441			op->key = key_get(wbk->key);
 442			goto try_next_key;
 443		}
 444		break;
 445	}
 446
 447	afs_put_wb_key(wbk);
 448	_leave(" = %d", op->error);
 449	return afs_put_operation(op);
 450}
 451
 452/*
 453 * Extend the region to be written back to include subsequent contiguously
 454 * dirty pages if possible, but don't sleep while doing so.
 455 *
 456 * If this page holds new content, then we can include filler zeros in the
 457 * writeback.
 458 */
 459static void afs_extend_writeback(struct address_space *mapping,
 460				 struct afs_vnode *vnode,
 461				 long *_count,
 462				 loff_t start,
 463				 loff_t max_len,
 464				 bool new_content,
 465				 bool caching,
 466				 unsigned int *_len)
 467{
 468	struct pagevec pvec;
 469	struct folio *folio;
 470	unsigned long priv;
 471	unsigned int psize, filler = 0;
 472	unsigned int f, t;
 473	loff_t len = *_len;
 474	pgoff_t index = (start + len) / PAGE_SIZE;
 475	bool stop = true;
 476	unsigned int i;
 477
 478	XA_STATE(xas, &mapping->i_pages, index);
 479	pagevec_init(&pvec);
 
 
 
 480
 
 
 
 
 
 
 
 481	do {
 482		/* Firstly, we gather up a batch of contiguous dirty pages
 483		 * under the RCU read lock - but we can't clear the dirty flags
 484		 * there if any of those pages are mapped.
 485		 */
 486		rcu_read_lock();
 487
 488		xas_for_each(&xas, folio, ULONG_MAX) {
 489			stop = true;
 490			if (xas_retry(&xas, folio))
 491				continue;
 492			if (xa_is_value(folio))
 
 
 
 
 
 
 
 
 493				break;
 494			if (folio_index(folio) != index)
 495				break;
 496
 497			if (!folio_try_get_rcu(folio)) {
 498				xas_reset(&xas);
 499				continue;
 500			}
 501
 502			/* Has the page moved or been split? */
 503			if (unlikely(folio != xas_reload(&xas))) {
 504				folio_put(folio);
 505				break;
 506			}
 507
 508			if (!folio_trylock(folio)) {
 509				folio_put(folio);
 510				break;
 511			}
 512			if (!folio_test_dirty(folio) ||
 513			    folio_test_writeback(folio) ||
 514			    folio_test_fscache(folio)) {
 515				folio_unlock(folio);
 516				folio_put(folio);
 517				break;
 518			}
 519
 520			psize = folio_size(folio);
 521			priv = (unsigned long)folio_get_private(folio);
 522			f = afs_folio_dirty_from(folio, priv);
 523			t = afs_folio_dirty_to(folio, priv);
 524			if (f != 0 && !new_content) {
 525				folio_unlock(folio);
 526				folio_put(folio);
 527				break;
 528			}
 529
 530			len += filler + t;
 531			filler = psize - t;
 532			if (len >= max_len || *_count <= 0)
 533				stop = true;
 534			else if (t == psize || new_content)
 535				stop = false;
 536
 537			index += folio_nr_pages(folio);
 538			if (!pagevec_add(&pvec, &folio->page))
 539				break;
 540			if (stop)
 541				break;
 542		}
 543
 544		if (!stop)
 545			xas_pause(&xas);
 546		rcu_read_unlock();
 547
 548		/* Now, if we obtained any pages, we can shift them to being
 549		 * writable and mark them for caching.
 550		 */
 551		if (!pagevec_count(&pvec))
 552			break;
 553
 554		for (i = 0; i < pagevec_count(&pvec); i++) {
 555			folio = page_folio(pvec.pages[i]);
 556			trace_afs_folio_dirty(vnode, tracepoint_string("store+"), folio);
 557
 558			if (!folio_clear_dirty_for_io(folio))
 559				BUG();
 560			if (folio_start_writeback(folio))
 561				BUG();
 562			afs_folio_start_fscache(caching, folio);
 563
 564			*_count -= folio_nr_pages(folio);
 565			folio_unlock(folio);
 566		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 567
 568		pagevec_release(&pvec);
 569		cond_resched();
 570	} while (!stop);
 571
 572	*_len = len;
 573}
 574
 575/*
 576 * Synchronously write back the locked page and any subsequent non-locked dirty
 577 * pages.
 578 */
 579static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
 580						struct writeback_control *wbc,
 581						struct folio *folio,
 582						loff_t start, loff_t end)
 583{
 584	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 585	struct iov_iter iter;
 586	unsigned long priv;
 587	unsigned int offset, to, len, max_len;
 588	loff_t i_size = i_size_read(&vnode->netfs.inode);
 589	bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
 590	bool caching = fscache_cookie_enabled(afs_vnode_cache(vnode));
 591	long count = wbc->nr_to_write;
 592	int ret;
 593
 594	_enter(",%lx,%llx-%llx", folio_index(folio), start, end);
 595
 596	if (folio_start_writeback(folio))
 597		BUG();
 598	afs_folio_start_fscache(caching, folio);
 599
 600	count -= folio_nr_pages(folio);
 601
 602	/* Find all consecutive lockable dirty pages that have contiguous
 603	 * written regions, stopping when we find a page that is not
 604	 * immediately lockable, is not dirty or is missing, or we reach the
 605	 * end of the range.
 606	 */
 607	priv = (unsigned long)folio_get_private(folio);
 608	offset = afs_folio_dirty_from(folio, priv);
 609	to = afs_folio_dirty_to(folio, priv);
 610	trace_afs_folio_dirty(vnode, tracepoint_string("store"), folio);
 611
 612	len = to - offset;
 613	start += offset;
 614	if (start < i_size) {
 615		/* Trim the write to the EOF; the extra data is ignored.  Also
 616		 * put an upper limit on the size of a single storedata op.
 617		 */
 618		max_len = 65536 * 4096;
 619		max_len = min_t(unsigned long long, max_len, end - start + 1);
 620		max_len = min_t(unsigned long long, max_len, i_size - start);
 621
 622		if (len < max_len &&
 623		    (to == folio_size(folio) || new_content))
 624			afs_extend_writeback(mapping, vnode, &count,
 625					     start, max_len, new_content,
 626					     caching, &len);
 627		len = min_t(loff_t, len, max_len);
 628	}
 629
 630	/* We now have a contiguous set of dirty pages, each with writeback
 631	 * set; the first page is still locked at this point, but all the rest
 632	 * have been unlocked.
 633	 */
 634	folio_unlock(folio);
 635
 636	if (start < i_size) {
 637		_debug("write back %x @%llx [%llx]", len, start, i_size);
 638
 639		/* Speculatively write to the cache.  We have to fix this up
 640		 * later if the store fails.
 641		 */
 642		afs_write_to_cache(vnode, start, len, i_size, caching);
 643
 644		iov_iter_xarray(&iter, ITER_SOURCE, &mapping->i_pages, start, len);
 645		ret = afs_store_data(vnode, &iter, start, false);
 646	} else {
 647		_debug("write discard %x @%llx [%llx]", len, start, i_size);
 648
 649		/* The dirty region was entirely beyond the EOF. */
 650		fscache_clear_page_bits(mapping, start, len, caching);
 651		afs_pages_written_back(vnode, start, len);
 652		ret = 0;
 653	}
 654
 655	switch (ret) {
 656	case 0:
 657		wbc->nr_to_write = count;
 658		ret = len;
 659		break;
 660
 661	default:
 662		pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
 663		fallthrough;
 664	case -EACCES:
 665	case -EPERM:
 666	case -ENOKEY:
 667	case -EKEYEXPIRED:
 668	case -EKEYREJECTED:
 669	case -EKEYREVOKED:
 670	case -ENETRESET:
 671		afs_redirty_pages(wbc, mapping, start, len);
 672		mapping_set_error(mapping, ret);
 673		break;
 674
 675	case -EDQUOT:
 676	case -ENOSPC:
 677		afs_redirty_pages(wbc, mapping, start, len);
 678		mapping_set_error(mapping, -ENOSPC);
 679		break;
 680
 681	case -EROFS:
 682	case -EIO:
 683	case -EREMOTEIO:
 684	case -EFBIG:
 685	case -ENOENT:
 686	case -ENOMEDIUM:
 687	case -ENXIO:
 688		trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
 689		afs_kill_pages(mapping, start, len);
 690		mapping_set_error(mapping, ret);
 691		break;
 692	}
 693
 694	_leave(" = %d", ret);
 695	return ret;
 696}
 697
 698/*
 699 * write a region of pages back to the server
 700 */
 701static int afs_writepages_region(struct address_space *mapping,
 702				 struct writeback_control *wbc,
 703				 loff_t start, loff_t end, loff_t *_next,
 704				 bool max_one_loop)
 705{
 706	struct folio *folio;
 707	struct page *head_page;
 708	ssize_t ret;
 709	int n, skips = 0;
 710
 711	_enter("%llx,%llx,", start, end);
 712
 713	do {
 714		pgoff_t index = start / PAGE_SIZE;
 715
 716		n = find_get_pages_range_tag(mapping, &index, end / PAGE_SIZE,
 717					     PAGECACHE_TAG_DIRTY, 1, &head_page);
 718		if (!n)
 719			break;
 720
 721		folio = page_folio(head_page);
 722		start = folio_pos(folio); /* May regress with THPs */
 723
 724		_debug("wback %lx", folio_index(folio));
 
 
 
 
 
 725
 726		/* At this point we hold neither the i_pages lock nor the
 727		 * page lock: the page may be truncated or invalidated
 728		 * (changing page->mapping to NULL), or even swizzled
 729		 * back from swapper_space to tmpfs file mapping
 730		 */
 731		if (wbc->sync_mode != WB_SYNC_NONE) {
 732			ret = folio_lock_killable(folio);
 733			if (ret < 0) {
 734				folio_put(folio);
 735				return ret;
 736			}
 737		} else {
 738			if (!folio_trylock(folio)) {
 739				folio_put(folio);
 740				return 0;
 741			}
 742		}
 743
 744		if (folio_mapping(folio) != mapping ||
 745		    !folio_test_dirty(folio)) {
 746			start += folio_size(folio);
 747			folio_unlock(folio);
 748			folio_put(folio);
 749			continue;
 750		}
 751
 752		if (folio_test_writeback(folio) ||
 753		    folio_test_fscache(folio)) {
 754			folio_unlock(folio);
 755			if (wbc->sync_mode != WB_SYNC_NONE) {
 756				folio_wait_writeback(folio);
 757#ifdef CONFIG_AFS_FSCACHE
 758				folio_wait_fscache(folio);
 759#endif
 760			} else {
 761				start += folio_size(folio);
 762			}
 763			folio_put(folio);
 764			if (wbc->sync_mode == WB_SYNC_NONE) {
 765				if (skips >= 5 || need_resched())
 766					break;
 767				skips++;
 768			}
 769			continue;
 770		}
 771
 772		if (!folio_clear_dirty_for_io(folio))
 773			BUG();
 774		ret = afs_write_back_from_locked_folio(mapping, wbc, folio, start, end);
 775		folio_put(folio);
 
 
 
 
 
 
 776		if (ret < 0) {
 777			_leave(" = %zd", ret);
 778			return ret;
 779		}
 780
 781		start += ret;
 782
 783		if (max_one_loop)
 784			break;
 785
 786		cond_resched();
 787	} while (wbc->nr_to_write > 0);
 788
 789	*_next = start;
 790	_leave(" = 0 [%llx]", *_next);
 791	return 0;
 792}
 793
 794/*
 795 * write some of the pending data back to the server
 796 */
 797int afs_writepages(struct address_space *mapping,
 798		   struct writeback_control *wbc)
 799{
 800	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
 801	loff_t start, next;
 802	int ret;
 803
 804	_enter("");
 805
 806	/* We have to be careful as we can end up racing with setattr()
 807	 * truncating the pagecache since the caller doesn't take a lock here
 808	 * to prevent it.
 809	 */
 810	if (wbc->sync_mode == WB_SYNC_ALL)
 811		down_read(&vnode->validate_lock);
 812	else if (!down_read_trylock(&vnode->validate_lock))
 813		return 0;
 814
 815	if (wbc->range_cyclic) {
 816		start = mapping->writeback_index * PAGE_SIZE;
 817		ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX,
 818					    &next, false);
 819		if (ret == 0) {
 820			mapping->writeback_index = next / PAGE_SIZE;
 821			if (start > 0 && wbc->nr_to_write > 0) {
 822				ret = afs_writepages_region(mapping, wbc, 0,
 823							    start, &next, false);
 824				if (ret == 0)
 825					mapping->writeback_index =
 826						next / PAGE_SIZE;
 827			}
 828		}
 829	} else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
 830		ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX,
 831					    &next, false);
 832		if (wbc->nr_to_write > 0 && ret == 0)
 833			mapping->writeback_index = next / PAGE_SIZE;
 834	} else {
 835		ret = afs_writepages_region(mapping, wbc,
 836					    wbc->range_start, wbc->range_end,
 837					    &next, false);
 838	}
 839
 840	up_read(&vnode->validate_lock);
 841	_leave(" = %d", ret);
 842	return ret;
 843}
 844
 845/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 846 * write to an AFS file
 847 */
 848ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
 
 849{
 850	struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
 851	struct afs_file *af = iocb->ki_filp->private_data;
 852	ssize_t result;
 853	size_t count = iov_iter_count(from);
 854
 855	_enter("{%llx:%llu},{%zu},",
 856	       vnode->fid.vid, vnode->fid.vnode, count);
 857
 858	if (IS_SWAPFILE(&vnode->netfs.inode)) {
 859		printk(KERN_INFO
 860		       "AFS: Attempt to write to active swap file!\n");
 861		return -EBUSY;
 862	}
 863
 864	if (!count)
 865		return 0;
 866
 867	result = afs_validate(vnode, af->key);
 868	if (result < 0)
 
 869		return result;
 870
 871	result = generic_file_write_iter(iocb, from);
 872
 873	_leave(" = %zd", result);
 874	return result;
 875}
 876
 877/*
 878 * flush any dirty pages for this process, and check for write errors.
 879 * - the return status from this call provides a reliable indication of
 880 *   whether any write errors occurred for this process.
 881 */
 882int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 883{
 884	struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
 885	struct afs_file *af = file->private_data;
 
 
 
 
 886	int ret;
 887
 888	_enter("{%llx:%llu},{n=%pD},%d",
 889	       vnode->fid.vid, vnode->fid.vnode, file,
 890	       datasync);
 891
 892	ret = afs_validate(vnode, af->key);
 893	if (ret < 0)
 894		return ret;
 895
 896	return file_write_and_wait_range(file, start, end);
 
 897}
 898
 899/*
 900 * notification that a previously read-only page is about to become writable
 901 * - if it returns an error, the caller will deliver a bus error signal
 
 902 */
 903vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
 904{
 905	struct folio *folio = page_folio(vmf->page);
 906	struct file *file = vmf->vma->vm_file;
 907	struct inode *inode = file_inode(file);
 908	struct afs_vnode *vnode = AFS_FS_I(inode);
 909	struct afs_file *af = file->private_data;
 910	unsigned long priv;
 911	vm_fault_t ret = VM_FAULT_RETRY;
 912
 913	_enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
 914
 915	afs_validate(vnode, af->key);
 916
 917	sb_start_pagefault(inode->i_sb);
 918
 919	/* Wait for the page to be written to the cache before we allow it to
 920	 * be modified.  We then assume the entire page will need writing back.
 921	 */
 922#ifdef CONFIG_AFS_FSCACHE
 923	if (folio_test_fscache(folio) &&
 924	    folio_wait_fscache_killable(folio) < 0)
 925		goto out;
 926#endif
 927
 928	if (folio_wait_writeback_killable(folio))
 929		goto out;
 
 930
 931	if (folio_lock_killable(folio) < 0)
 932		goto out;
 
 
 933
 934	/* We mustn't change folio->private until writeback is complete as that
 935	 * details the portion of the page we need to write back and we might
 936	 * need to redirty the page if there's a problem.
 937	 */
 938	if (folio_wait_writeback_killable(folio) < 0) {
 939		folio_unlock(folio);
 940		goto out;
 941	}
 942
 943	priv = afs_folio_dirty(folio, 0, folio_size(folio));
 944	priv = afs_folio_dirty_mmapped(priv);
 945	if (folio_test_private(folio)) {
 946		folio_change_private(folio, (void *)priv);
 947		trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite+"), folio);
 948	} else {
 949		folio_attach_private(folio, (void *)priv);
 950		trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite"), folio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 951	}
 952	file_update_time(file);
 953
 954	ret = VM_FAULT_LOCKED;
 
 
 
 
 
 955out:
 956	sb_end_pagefault(inode->i_sb);
 957	return ret;
 958}
 959
 960/*
 961 * Prune the keys cached for writeback.  The caller must hold vnode->wb_lock.
 962 */
 963void afs_prune_wb_keys(struct afs_vnode *vnode)
 964{
 965	LIST_HEAD(graveyard);
 966	struct afs_wb_key *wbk, *tmp;
 967
 968	/* Discard unused keys */
 969	spin_lock(&vnode->wb_lock);
 970
 971	if (!mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
 972	    !mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_DIRTY)) {
 973		list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
 974			if (refcount_read(&wbk->usage) == 1)
 975				list_move(&wbk->vnode_link, &graveyard);
 976		}
 977	}
 978
 979	spin_unlock(&vnode->wb_lock);
 980
 981	while (!list_empty(&graveyard)) {
 982		wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
 983		list_del(&wbk->vnode_link);
 984		afs_put_wb_key(wbk);
 985	}
 986}
 987
 988/*
 989 * Clean up a page during invalidation.
 990 */
 991int afs_launder_folio(struct folio *folio)
 992{
 993	struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
 994	struct iov_iter iter;
 995	struct bio_vec bv[1];
 996	unsigned long priv;
 997	unsigned int f, t;
 998	int ret = 0;
 999
1000	_enter("{%lx}", folio->index);
1001
1002	priv = (unsigned long)folio_get_private(folio);
1003	if (folio_clear_dirty_for_io(folio)) {
1004		f = 0;
1005		t = folio_size(folio);
1006		if (folio_test_private(folio)) {
1007			f = afs_folio_dirty_from(folio, priv);
1008			t = afs_folio_dirty_to(folio, priv);
1009		}
1010
1011		bv[0].bv_page = &folio->page;
1012		bv[0].bv_offset = f;
1013		bv[0].bv_len = t - f;
1014		iov_iter_bvec(&iter, ITER_SOURCE, bv, 1, bv[0].bv_len);
1015
1016		trace_afs_folio_dirty(vnode, tracepoint_string("launder"), folio);
1017		ret = afs_store_data(vnode, &iter, folio_pos(folio) + f, true);
1018	}
1019
1020	trace_afs_folio_dirty(vnode, tracepoint_string("laundered"), folio);
1021	folio_detach_private(folio);
1022	folio_wait_fscache(folio);
1023	return ret;
1024}
1025
1026/*
1027 * Deal with the completion of writing the data to the cache.
1028 */
1029static void afs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
1030				    bool was_async)
1031{
1032	struct afs_vnode *vnode = priv;
1033
1034	if (IS_ERR_VALUE(transferred_or_error) &&
1035	    transferred_or_error != -ENOBUFS)
1036		afs_invalidate_cache(vnode, 0);
1037}
1038
1039/*
1040 * Save the write to the cache also.
1041 */
1042static void afs_write_to_cache(struct afs_vnode *vnode,
1043			       loff_t start, size_t len, loff_t i_size,
1044			       bool caching)
1045{
1046	fscache_write_to_cache(afs_vnode_cache(vnode),
1047			       vnode->netfs.inode.i_mapping, start, len, i_size,
1048			       afs_write_to_cache_done, vnode, caching);
1049}
v3.5.6
 
  1/* handling of writes to regular files and writing back to the server
  2 *
  3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4 * Written by David Howells (dhowells@redhat.com)
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License
  8 * as published by the Free Software Foundation; either version
  9 * 2 of the License, or (at your option) any later version.
 10 */
 
 11#include <linux/backing-dev.h>
 12#include <linux/slab.h>
 13#include <linux/fs.h>
 14#include <linux/pagemap.h>
 15#include <linux/writeback.h>
 16#include <linux/pagevec.h>
 
 17#include "internal.h"
 18
 19static int afs_write_back_from_locked_page(struct afs_writeback *wb,
 20					   struct page *page);
 
 
 
 
 
 21
 
 22/*
 23 * mark a page as having been made dirty and thus needing writeback
 
 24 */
 25int afs_set_page_dirty(struct page *page)
 26{
 27	_enter("");
 28	return __set_page_dirty_nobuffers(page);
 29}
 30
 31/*
 32 * unlink a writeback record because its usage has reached zero
 33 * - must be called with the wb->vnode->writeback_lock held
 34 */
 35static void afs_unlink_writeback(struct afs_writeback *wb)
 36{
 37	struct afs_writeback *front;
 38	struct afs_vnode *vnode = wb->vnode;
 39
 40	list_del_init(&wb->link);
 41	if (!list_empty(&vnode->writebacks)) {
 42		/* if an fsync rises to the front of the queue then wake it
 43		 * up */
 44		front = list_entry(vnode->writebacks.next,
 45				   struct afs_writeback, link);
 46		if (front->state == AFS_WBACK_SYNCING) {
 47			_debug("wake up sync");
 48			front->state = AFS_WBACK_COMPLETE;
 49			wake_up(&front->waitq);
 50		}
 51	}
 52}
 53
 54/*
 55 * free a writeback record
 56 */
 57static void afs_free_writeback(struct afs_writeback *wb)
 58{
 59	_enter("");
 60	key_put(wb->key);
 61	kfree(wb);
 62}
 
 63
 64/*
 65 * dispose of a reference to a writeback record
 
 66 */
 67void afs_put_writeback(struct afs_writeback *wb)
 
 68{
 69	struct afs_vnode *vnode = wb->vnode;
 70
 71	_enter("{%d}", wb->usage);
 
 
 
 
 72
 73	spin_lock(&vnode->writeback_lock);
 74	if (--wb->usage == 0)
 75		afs_unlink_writeback(wb);
 76	else
 77		wb = NULL;
 78	spin_unlock(&vnode->writeback_lock);
 79	if (wb)
 80		afs_free_writeback(wb);
 81}
 82
 83/*
 84 * partly or wholly fill a page that's under preparation for writing
 85 */
 86static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
 87			 loff_t pos, struct page *page)
 
 88{
 89	loff_t i_size;
 
 
 
 
 
 90	int ret;
 91	int len;
 92
 93	_enter(",,%llu", (unsigned long long)pos);
 
 94
 95	i_size = i_size_read(&vnode->vfs_inode);
 96	if (pos + PAGE_CACHE_SIZE > i_size)
 97		len = i_size - pos;
 98	else
 99		len = PAGE_CACHE_SIZE;
 
 
 
 
 
 
100
101	ret = afs_vnode_fetch_data(vnode, key, pos, len, page);
102	if (ret < 0) {
103		if (ret == -ENOENT) {
104			_debug("got NOENT from server"
105			       " - marking file deleted and stale");
106			set_bit(AFS_VNODE_DELETED, &vnode->flags);
107			ret = -ESTALE;
 
 
 
 
 
 
 
108		}
 
 
 
 
 
 
 
109	}
110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111	_leave(" = %d", ret);
112	return ret;
113}
114
115/*
116 * prepare to perform part of a write to a page
117 */
118int afs_write_begin(struct file *file, struct address_space *mapping,
119		    loff_t pos, unsigned len, unsigned flags,
120		    struct page **pagep, void **fsdata)
121{
122	struct afs_writeback *candidate, *wb;
123	struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode);
124	struct page *page;
125	struct key *key = file->private_data;
126	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
127	unsigned to = from + len;
128	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
129	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
130
131	_enter("{%x:%u},{%lx},%u,%u",
132	       vnode->fid.vid, vnode->fid.vnode, index, from, to);
133
134	candidate = kzalloc(sizeof(*candidate), GFP_KERNEL);
135	if (!candidate)
136		return -ENOMEM;
137	candidate->vnode = vnode;
138	candidate->first = candidate->last = index;
139	candidate->offset_first = from;
140	candidate->to_last = to;
141	INIT_LIST_HEAD(&candidate->link);
142	candidate->usage = 1;
143	candidate->state = AFS_WBACK_PENDING;
144	init_waitqueue_head(&candidate->waitq);
145
146	page = grab_cache_page_write_begin(mapping, index, flags);
147	if (!page) {
148		kfree(candidate);
149		return -ENOMEM;
150	}
151	*pagep = page;
152	/* page won't leak in error case: it eventually gets cleaned off LRU */
153
154	if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) {
155		ret = afs_fill_page(vnode, key, index << PAGE_CACHE_SHIFT, page);
156		if (ret < 0) {
157			kfree(candidate);
158			_leave(" = %d [prep]", ret);
159			return ret;
160		}
161		SetPageUptodate(page);
 
 
 
 
 
 
 
162	}
163
164try_again:
165	spin_lock(&vnode->writeback_lock);
166
167	/* see if this page is already pending a writeback under a suitable key
168	 * - if so we can just join onto that one */
169	wb = (struct afs_writeback *) page_private(page);
170	if (wb) {
171		if (wb->key == key && wb->state == AFS_WBACK_PENDING)
172			goto subsume_in_current_wb;
173		goto flush_conflicting_wb;
174	}
175
176	if (index > 0) {
177		/* see if we can find an already pending writeback that we can
178		 * append this page to */
179		list_for_each_entry(wb, &vnode->writebacks, link) {
180			if (wb->last == index - 1 && wb->key == key &&
181			    wb->state == AFS_WBACK_PENDING)
182				goto append_to_previous_wb;
183		}
184	}
185
186	list_add_tail(&candidate->link, &vnode->writebacks);
187	candidate->key = key_get(key);
188	spin_unlock(&vnode->writeback_lock);
189	SetPagePrivate(page);
190	set_page_private(page, (unsigned long) candidate);
191	_leave(" = 0 [new]");
192	return 0;
 
 
 
193
194subsume_in_current_wb:
195	_debug("subsume");
196	ASSERTRANGE(wb->first, <=, index, <=, wb->last);
197	if (index == wb->first && from < wb->offset_first)
198		wb->offset_first = from;
199	if (index == wb->last && to > wb->to_last)
200		wb->to_last = to;
201	spin_unlock(&vnode->writeback_lock);
202	kfree(candidate);
203	_leave(" = 0 [sub]");
204	return 0;
205
206append_to_previous_wb:
207	_debug("append into %lx-%lx", wb->first, wb->last);
208	wb->usage++;
209	wb->last++;
210	wb->to_last = to;
211	spin_unlock(&vnode->writeback_lock);
212	SetPagePrivate(page);
213	set_page_private(page, (unsigned long) wb);
214	kfree(candidate);
215	_leave(" = 0 [app]");
216	return 0;
217
218	/* the page is currently bound to another context, so if it's dirty we
219	 * need to flush it before we can use the new context */
220flush_conflicting_wb:
221	_debug("flush conflict");
222	if (wb->state == AFS_WBACK_PENDING)
223		wb->state = AFS_WBACK_CONFLICTING;
224	spin_unlock(&vnode->writeback_lock);
225	if (PageDirty(page)) {
226		ret = afs_write_back_from_locked_page(wb, page);
227		if (ret < 0) {
228			afs_put_writeback(candidate);
229			_leave(" = %d", ret);
230			return ret;
231		}
232	}
233
234	/* the page holds a ref on the writeback record */
235	afs_put_writeback(wb);
236	set_page_private(page, 0);
237	ClearPagePrivate(page);
238	goto try_again;
 
 
 
 
 
 
 
239}
240
241/*
242 * finalise part of a write to a page
243 */
244int afs_write_end(struct file *file, struct address_space *mapping,
245		  loff_t pos, unsigned len, unsigned copied,
246		  struct page *page, void *fsdata)
247{
248	struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode);
249	loff_t i_size, maybe_i_size;
 
 
250
251	_enter("{%x:%u},{%lx}",
252	       vnode->fid.vid, vnode->fid.vnode, page->index);
253
254	maybe_i_size = pos + copied;
255
256	i_size = i_size_read(&vnode->vfs_inode);
257	if (maybe_i_size > i_size) {
258		spin_lock(&vnode->writeback_lock);
259		i_size = i_size_read(&vnode->vfs_inode);
260		if (maybe_i_size > i_size)
261			i_size_write(&vnode->vfs_inode, maybe_i_size);
262		spin_unlock(&vnode->writeback_lock);
263	}
264
265	set_page_dirty(page);
266	if (PageDirty(page))
267		_debug("dirtied");
268	unlock_page(page);
269	page_cache_release(page);
270
271	return copied;
272}
273
274/*
275 * kill all the pages in the given range
276 */
277static void afs_kill_pages(struct afs_vnode *vnode, bool error,
278			   pgoff_t first, pgoff_t last)
279{
280	struct pagevec pv;
281	unsigned count, loop;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
282
283	_enter("{%x:%u},%lx-%lx",
284	       vnode->fid.vid, vnode->fid.vnode, first, last);
285
286	pagevec_init(&pv, 0);
 
 
287
288	do {
289		_debug("kill %lx-%lx", first, last);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290
291		count = last - first + 1;
292		if (count > PAGEVEC_SIZE)
293			count = PAGEVEC_SIZE;
294		pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
295					      first, count, pv.pages);
296		ASSERTCMP(pv.nr, ==, count);
297
298		for (loop = 0; loop < count; loop++) {
299			ClearPageUptodate(pv.pages[loop]);
300			if (error)
301				SetPageError(pv.pages[loop]);
302			end_page_writeback(pv.pages[loop]);
303		}
304
305		__pagevec_release(&pv);
306	} while (first < last);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
307
308	_leave("");
 
 
 
 
 
 
 
309}
310
 
 
 
 
 
 
311/*
312 * synchronously write back the locked page and any subsequent non-locked dirty
313 * pages also covered by the same writeback record
314 */
315static int afs_write_back_from_locked_page(struct afs_writeback *wb,
316					   struct page *primary_page)
317{
318	struct page *pages[8], *page;
319	unsigned long count;
320	unsigned n, offset, to;
321	pgoff_t start, first, last;
322	int loop, ret;
 
 
 
 
 
 
 
 
 
 
 
 
323
324	_enter(",%lx", primary_page->index);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
325
326	count = 1;
327	if (!clear_page_dirty_for_io(primary_page))
328		BUG();
329	if (test_set_page_writeback(primary_page))
330		BUG();
331
332	/* find all consecutive lockable dirty pages, stopping when we find a
333	 * page that is not immediately lockable, is not dirty or is missing,
334	 * or we reach the end of the range */
335	start = primary_page->index;
336	if (start >= wb->last)
337		goto no_more;
338	start++;
339	do {
340		_debug("more %lx [%lx]", start, count);
341		n = wb->last - start + 1;
342		if (n > ARRAY_SIZE(pages))
343			n = ARRAY_SIZE(pages);
344		n = find_get_pages_contig(wb->vnode->vfs_inode.i_mapping,
345					  start, n, pages);
346		_debug("fgpc %u", n);
347		if (n == 0)
348			goto no_more;
349		if (pages[0]->index != start) {
350			do {
351				put_page(pages[--n]);
352			} while (n > 0);
353			goto no_more;
354		}
355
356		for (loop = 0; loop < n; loop++) {
357			page = pages[loop];
358			if (page->index > wb->last)
359				break;
360			if (!trylock_page(page))
361				break;
362			if (!PageDirty(page) ||
363			    page_private(page) != (unsigned long) wb) {
364				unlock_page(page);
 
 
 
 
 
 
365				break;
366			}
367			if (!clear_page_dirty_for_io(page))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368				BUG();
369			if (test_set_page_writeback(page))
370				BUG();
371			unlock_page(page);
372			put_page(page);
 
 
373		}
374		count += loop;
375		if (loop < n) {
376			for (; loop < n; loop++)
377				put_page(pages[loop]);
378			goto no_more;
379		}
380
381		start += loop;
382	} while (start <= wb->last && count < 65536);
383
384no_more:
385	/* we now have a contiguous set of dirty pages, each with writeback set
386	 * and the dirty mark cleared; the first page is locked and must remain
387	 * so, all the rest are unlocked */
388	first = primary_page->index;
389	last = first + count - 1;
390
391	offset = (first == wb->first) ? wb->offset_first : 0;
392	to = (last == wb->last) ? wb->to_last : PAGE_SIZE;
393
394	_debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
395
396	ret = afs_vnode_store_data(wb, first, last, offset, to);
397	if (ret < 0) {
398		switch (ret) {
399		case -EDQUOT:
400		case -ENOSPC:
401			set_bit(AS_ENOSPC,
402				&wb->vnode->vfs_inode.i_mapping->flags);
403			break;
404		case -EROFS:
405		case -EIO:
406		case -EREMOTEIO:
407		case -EFBIG:
408		case -ENOENT:
409		case -ENOMEDIUM:
410		case -ENXIO:
411			afs_kill_pages(wb->vnode, true, first, last);
412			set_bit(AS_EIO, &wb->vnode->vfs_inode.i_mapping->flags);
413			break;
414		case -EACCES:
415		case -EPERM:
416		case -ENOKEY:
417		case -EKEYEXPIRED:
418		case -EKEYREJECTED:
419		case -EKEYREVOKED:
420			afs_kill_pages(wb->vnode, false, first, last);
421			break;
422		default:
423			break;
424		}
425	} else {
426		ret = count;
427	}
428
429	_leave(" = %d", ret);
430	return ret;
 
 
 
431}
432
433/*
434 * write a page back to the server
435 * - the caller locked the page for us
436 */
437int afs_writepage(struct page *page, struct writeback_control *wbc)
 
 
 
438{
439	struct afs_writeback *wb;
 
 
 
 
 
 
 
440	int ret;
441
442	_enter("{%lx},", page->index);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
443
444	wb = (struct afs_writeback *) page_private(page);
445	ASSERT(wb != NULL);
 
 
 
446
447	ret = afs_write_back_from_locked_page(wb, page);
448	unlock_page(page);
449	if (ret < 0) {
450		_leave(" = %d", ret);
451		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
452	}
453
454	wbc->nr_to_write -= ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
455
456	_leave(" = 0");
457	return 0;
458}
459
460/*
461 * write a region of pages back to the server
462 */
463static int afs_writepages_region(struct address_space *mapping,
464				 struct writeback_control *wbc,
465				 pgoff_t index, pgoff_t end, pgoff_t *_next)
 
466{
467	struct afs_writeback *wb;
468	struct page *page;
469	int ret, n;
 
470
471	_enter(",,%lx,%lx,", index, end);
472
473	do {
474		n = find_get_pages_tag(mapping, &index, PAGECACHE_TAG_DIRTY,
475				       1, &page);
 
 
476		if (!n)
477			break;
478
479		_debug("wback %lx", page->index);
 
480
481		if (page->index > end) {
482			*_next = index;
483			page_cache_release(page);
484			_leave(" = 0 [%lx]", *_next);
485			return 0;
486		}
487
488		/* at this point we hold neither mapping->tree_lock nor lock on
489		 * the page itself: the page may be truncated or invalidated
490		 * (changing page->mapping to NULL), or even swizzled back from
491		 * swapper_space to tmpfs file mapping
492		 */
493		lock_page(page);
 
 
 
 
 
 
 
 
 
 
 
494
495		if (page->mapping != mapping) {
496			unlock_page(page);
497			page_cache_release(page);
 
 
498			continue;
499		}
500
501		if (wbc->sync_mode != WB_SYNC_NONE)
502			wait_on_page_writeback(page);
503
504		if (PageWriteback(page) || !PageDirty(page)) {
505			unlock_page(page);
 
 
 
 
 
 
 
 
 
 
 
 
506			continue;
507		}
508
509		wb = (struct afs_writeback *) page_private(page);
510		ASSERT(wb != NULL);
511
512		spin_lock(&wb->vnode->writeback_lock);
513		wb->state = AFS_WBACK_WRITING;
514		spin_unlock(&wb->vnode->writeback_lock);
515
516		ret = afs_write_back_from_locked_page(wb, page);
517		unlock_page(page);
518		page_cache_release(page);
519		if (ret < 0) {
520			_leave(" = %d", ret);
521			return ret;
522		}
523
524		wbc->nr_to_write -= ret;
 
 
 
525
526		cond_resched();
527	} while (index < end && wbc->nr_to_write > 0);
528
529	*_next = index;
530	_leave(" = 0 [%lx]", *_next);
531	return 0;
532}
533
534/*
535 * write some of the pending data back to the server
536 */
537int afs_writepages(struct address_space *mapping,
538		   struct writeback_control *wbc)
539{
540	pgoff_t start, end, next;
 
541	int ret;
542
543	_enter("");
544
 
 
 
 
 
 
 
 
 
545	if (wbc->range_cyclic) {
546		start = mapping->writeback_index;
547		end = -1;
548		ret = afs_writepages_region(mapping, wbc, start, end, &next);
549		if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
550			ret = afs_writepages_region(mapping, wbc, 0, start,
551						    &next);
552		mapping->writeback_index = next;
 
 
 
 
 
 
553	} else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
554		end = (pgoff_t)(LLONG_MAX >> PAGE_CACHE_SHIFT);
555		ret = afs_writepages_region(mapping, wbc, 0, end, &next);
556		if (wbc->nr_to_write > 0)
557			mapping->writeback_index = next;
558	} else {
559		start = wbc->range_start >> PAGE_CACHE_SHIFT;
560		end = wbc->range_end >> PAGE_CACHE_SHIFT;
561		ret = afs_writepages_region(mapping, wbc, start, end, &next);
562	}
563
 
564	_leave(" = %d", ret);
565	return ret;
566}
567
568/*
569 * completion of write to server
570 */
571void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
572{
573	struct afs_writeback *wb = call->wb;
574	struct pagevec pv;
575	unsigned count, loop;
576	pgoff_t first = call->first, last = call->last;
577	bool free_wb;
578
579	_enter("{%x:%u},{%lx-%lx}",
580	       vnode->fid.vid, vnode->fid.vnode, first, last);
581
582	ASSERT(wb != NULL);
583
584	pagevec_init(&pv, 0);
585
586	do {
587		_debug("done %lx-%lx", first, last);
588
589		count = last - first + 1;
590		if (count > PAGEVEC_SIZE)
591			count = PAGEVEC_SIZE;
592		pv.nr = find_get_pages_contig(call->mapping, first, count,
593					      pv.pages);
594		ASSERTCMP(pv.nr, ==, count);
595
596		spin_lock(&vnode->writeback_lock);
597		for (loop = 0; loop < count; loop++) {
598			struct page *page = pv.pages[loop];
599			end_page_writeback(page);
600			if (page_private(page) == (unsigned long) wb) {
601				set_page_private(page, 0);
602				ClearPagePrivate(page);
603				wb->usage--;
604			}
605		}
606		free_wb = false;
607		if (wb->usage == 0) {
608			afs_unlink_writeback(wb);
609			free_wb = true;
610		}
611		spin_unlock(&vnode->writeback_lock);
612		first += count;
613		if (free_wb) {
614			afs_free_writeback(wb);
615			wb = NULL;
616		}
617
618		__pagevec_release(&pv);
619	} while (first <= last);
620
621	_leave("");
622}
623
624/*
625 * write to an AFS file
626 */
627ssize_t afs_file_write(struct kiocb *iocb, const struct iovec *iov,
628		       unsigned long nr_segs, loff_t pos)
629{
630	struct dentry *dentry = iocb->ki_filp->f_path.dentry;
631	struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode);
632	ssize_t result;
633	size_t count = iov_length(iov, nr_segs);
634
635	_enter("{%x.%u},{%zu},%lu,",
636	       vnode->fid.vid, vnode->fid.vnode, count, nr_segs);
637
638	if (IS_SWAPFILE(&vnode->vfs_inode)) {
639		printk(KERN_INFO
640		       "AFS: Attempt to write to active swap file!\n");
641		return -EBUSY;
642	}
643
644	if (!count)
645		return 0;
646
647	result = generic_file_aio_write(iocb, iov, nr_segs, pos);
648	if (IS_ERR_VALUE(result)) {
649		_leave(" = %zd", result);
650		return result;
651	}
 
652
653	_leave(" = %zd", result);
654	return result;
655}
656
657/*
658 * flush the vnode to the fileserver
 
 
659 */
660int afs_writeback_all(struct afs_vnode *vnode)
661{
662	struct address_space *mapping = vnode->vfs_inode.i_mapping;
663	struct writeback_control wbc = {
664		.sync_mode	= WB_SYNC_ALL,
665		.nr_to_write	= LONG_MAX,
666		.range_cyclic	= 1,
667	};
668	int ret;
669
670	_enter("");
 
 
671
672	ret = mapping->a_ops->writepages(mapping, &wbc);
673	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 
674
675	_leave(" = %d", ret);
676	return ret;
677}
678
679/*
680 * flush any dirty pages for this process, and check for write errors.
681 * - the return status from this call provides a reliable indication of
682 *   whether any write errors occurred for this process.
683 */
684int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
685{
686	struct dentry *dentry = file->f_path.dentry;
687	struct inode *inode = file->f_mapping->host;
688	struct afs_writeback *wb, *xwb;
689	struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode);
690	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
691
692	_enter("{%x:%u},{n=%s},%d",
693	       vnode->fid.vid, vnode->fid.vnode, dentry->d_name.name,
694	       datasync);
695
696	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
697	if (ret)
698		return ret;
699	mutex_lock(&inode->i_mutex);
700
701	/* use a writeback record as a marker in the queue - when this reaches
702	 * the front of the queue, all the outstanding writes are either
703	 * completed or rejected */
704	wb = kzalloc(sizeof(*wb), GFP_KERNEL);
705	if (!wb) {
706		ret = -ENOMEM;
707		goto out;
708	}
709	wb->vnode = vnode;
710	wb->first = 0;
711	wb->last = -1;
712	wb->offset_first = 0;
713	wb->to_last = PAGE_SIZE;
714	wb->usage = 1;
715	wb->state = AFS_WBACK_SYNCING;
716	init_waitqueue_head(&wb->waitq);
717
718	spin_lock(&vnode->writeback_lock);
719	list_for_each_entry(xwb, &vnode->writebacks, link) {
720		if (xwb->state == AFS_WBACK_PENDING)
721			xwb->state = AFS_WBACK_CONFLICTING;
722	}
723	list_add_tail(&wb->link, &vnode->writebacks);
724	spin_unlock(&vnode->writeback_lock);
725
726	/* push all the outstanding writebacks to the server */
727	ret = afs_writeback_all(vnode);
728	if (ret < 0) {
729		afs_put_writeback(wb);
730		_leave(" = %d [wb]", ret);
731		goto out;
732	}
 
733
734	/* wait for the preceding writes to actually complete */
735	ret = wait_event_interruptible(wb->waitq,
736				       wb->state == AFS_WBACK_COMPLETE ||
737				       vnode->writebacks.next == &wb->link);
738	afs_put_writeback(wb);
739	_leave(" = %d", ret);
740out:
741	mutex_unlock(&inode->i_mutex);
742	return ret;
743}
744
745/*
746 * notification that a previously read-only page is about to become writable
747 * - if it returns an error, the caller will deliver a bus error signal
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
748 */
749int afs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
750{
751	struct afs_vnode *vnode = AFS_FS_I(vma->vm_file->f_mapping->host);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
752
753	_enter("{{%x:%u}},{%lx}",
754	       vnode->fid.vid, vnode->fid.vnode, page->index);
 
 
755
756	/* wait for the page to be written to the cache before we allow it to
757	 * be modified */
758#ifdef CONFIG_AFS_FSCACHE
759	fscache_wait_on_page_write(vnode->cache, page);
760#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
761
762	_leave(" = 0");
763	return 0;
 
 
 
 
 
 
 
 
764}