Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Miscellaneous routines.
  3 *
  4 * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
  5 * Written by David Howells (dhowells@redhat.com)
  6 */
  7
  8#include <linux/swap.h>
  9#include "internal.h"
 10
 11/*
 12 * Attach a folio to the buffer and maybe set marks on it to say that we need
 13 * to put the folio later and twiddle the pagecache flags.
 14 */
 15int netfs_xa_store_and_mark(struct xarray *xa, unsigned long index,
 16			    struct folio *folio, unsigned int flags,
 17			    gfp_t gfp_mask)
 18{
 19	XA_STATE_ORDER(xas, xa, index, folio_order(folio));
 20
 21retry:
 22	xas_lock(&xas);
 23	for (;;) {
 24		xas_store(&xas, folio);
 25		if (!xas_error(&xas))
 26			break;
 27		xas_unlock(&xas);
 28		if (!xas_nomem(&xas, gfp_mask))
 29			return xas_error(&xas);
 30		goto retry;
 31	}
 32
 33	if (flags & NETFS_FLAG_PUT_MARK)
 34		xas_set_mark(&xas, NETFS_BUF_PUT_MARK);
 35	if (flags & NETFS_FLAG_PAGECACHE_MARK)
 36		xas_set_mark(&xas, NETFS_BUF_PAGECACHE_MARK);
 37	xas_unlock(&xas);
 38	return xas_error(&xas);
 39}
 40
 41/*
 42 * Create the specified range of folios in the buffer attached to the read
 43 * request.  The folios are marked with NETFS_BUF_PUT_MARK so that we know that
 44 * these need freeing later.
 45 */
 46int netfs_add_folios_to_buffer(struct xarray *buffer,
 47			       struct address_space *mapping,
 48			       pgoff_t index, pgoff_t to, gfp_t gfp_mask)
 49{
 50	struct folio *folio;
 51	int ret;
 52
 53	if (to + 1 == index) /* Page range is inclusive */
 54		return 0;
 55
 56	do {
 57		/* TODO: Figure out what order folio can be allocated here */
 58		folio = filemap_alloc_folio(readahead_gfp_mask(mapping), 0);
 59		if (!folio)
 60			return -ENOMEM;
 61		folio->index = index;
 62		ret = netfs_xa_store_and_mark(buffer, index, folio,
 63					      NETFS_FLAG_PUT_MARK, gfp_mask);
 64		if (ret < 0) {
 65			folio_put(folio);
 66			return ret;
 67		}
 68
 69		index += folio_nr_pages(folio);
 70	} while (index <= to && index != 0);
 71
 72	return 0;
 73}
 74
 75/*
 76 * Clear an xarray buffer, putting a ref on the folios that have
 77 * NETFS_BUF_PUT_MARK set.
 78 */
 79void netfs_clear_buffer(struct xarray *buffer)
 80{
 81	struct folio *folio;
 82	XA_STATE(xas, buffer, 0);
 83
 84	rcu_read_lock();
 85	xas_for_each_marked(&xas, folio, ULONG_MAX, NETFS_BUF_PUT_MARK) {
 86		folio_put(folio);
 87	}
 88	rcu_read_unlock();
 89	xa_destroy(buffer);
 90}
 91
 92/**
 93 * netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback
 94 * @mapping: The mapping the folio belongs to.
 95 * @folio: The folio being dirtied.
 96 *
 97 * Set the dirty flag on a folio and pin an in-use cache object in memory so
 98 * that writeback can later write to it.  This is intended to be called from
 99 * the filesystem's ->dirty_folio() method.
100 *
101 * Return: true if the dirty flag was set on the folio, false otherwise.
102 */
103bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio)
104{
105	struct inode *inode = mapping->host;
106	struct netfs_inode *ictx = netfs_inode(inode);
107	struct fscache_cookie *cookie = netfs_i_cookie(ictx);
108	bool need_use = false;
109
110	_enter("");
111
112	if (!filemap_dirty_folio(mapping, folio))
113		return false;
114	if (!fscache_cookie_valid(cookie))
115		return true;
116
117	if (!(inode->i_state & I_PINNING_NETFS_WB)) {
118		spin_lock(&inode->i_lock);
119		if (!(inode->i_state & I_PINNING_NETFS_WB)) {
120			inode->i_state |= I_PINNING_NETFS_WB;
121			need_use = true;
122		}
123		spin_unlock(&inode->i_lock);
124
125		if (need_use)
126			fscache_use_cookie(cookie, true);
127	}
128	return true;
129}
130EXPORT_SYMBOL(netfs_dirty_folio);
131
132/**
133 * netfs_unpin_writeback - Unpin writeback resources
134 * @inode: The inode on which the cookie resides
135 * @wbc: The writeback control
136 *
137 * Unpin the writeback resources pinned by netfs_dirty_folio().  This is
138 * intended to be called as/by the netfs's ->write_inode() method.
139 */
140int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc)
141{
142	struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
143
144	if (wbc->unpinned_netfs_wb)
145		fscache_unuse_cookie(cookie, NULL, NULL);
146	return 0;
147}
148EXPORT_SYMBOL(netfs_unpin_writeback);
149
150/**
151 * netfs_clear_inode_writeback - Clear writeback resources pinned by an inode
152 * @inode: The inode to clean up
153 * @aux: Auxiliary data to apply to the inode
154 *
155 * Clear any writeback resources held by an inode when the inode is evicted.
156 * This must be called before clear_inode() is called.
157 */
158void netfs_clear_inode_writeback(struct inode *inode, const void *aux)
159{
160	struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
161
162	if (inode->i_state & I_PINNING_NETFS_WB) {
163		loff_t i_size = i_size_read(inode);
164		fscache_unuse_cookie(cookie, aux, &i_size);
165	}
166}
167EXPORT_SYMBOL(netfs_clear_inode_writeback);
168
169/**
170 * netfs_invalidate_folio - Invalidate or partially invalidate a folio
171 * @folio: Folio proposed for release
172 * @offset: Offset of the invalidated region
173 * @length: Length of the invalidated region
174 *
175 * Invalidate part or all of a folio for a network filesystem.  The folio will
176 * be removed afterwards if the invalidated region covers the entire folio.
177 */
178void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
179{
180	struct netfs_folio *finfo = NULL;
181	size_t flen = folio_size(folio);
182
183	_enter("{%lx},%zx,%zx", folio->index, offset, length);
184
185	folio_wait_fscache(folio);
186
187	if (!folio_test_private(folio))
188		return;
189
190	finfo = netfs_folio_info(folio);
191
192	if (offset == 0 && length >= flen)
193		goto erase_completely;
194
195	if (finfo) {
196		/* We have a partially uptodate page from a streaming write. */
197		unsigned int fstart = finfo->dirty_offset;
198		unsigned int fend = fstart + finfo->dirty_len;
199		unsigned int end = offset + length;
200
201		if (offset >= fend)
202			return;
203		if (end <= fstart)
204			return;
205		if (offset <= fstart && end >= fend)
206			goto erase_completely;
207		if (offset <= fstart && end > fstart)
208			goto reduce_len;
209		if (offset > fstart && end >= fend)
210			goto move_start;
211		/* A partial write was split.  The caller has already zeroed
212		 * it, so just absorb the hole.
213		 */
214	}
215	return;
216
217erase_completely:
218	netfs_put_group(netfs_folio_group(folio));
219	folio_detach_private(folio);
220	folio_clear_uptodate(folio);
221	kfree(finfo);
222	return;
223reduce_len:
224	finfo->dirty_len = offset + length - finfo->dirty_offset;
225	return;
226move_start:
227	finfo->dirty_len -= offset - finfo->dirty_offset;
228	finfo->dirty_offset = offset;
229}
230EXPORT_SYMBOL(netfs_invalidate_folio);
231
232/**
233 * netfs_release_folio - Try to release a folio
234 * @folio: Folio proposed for release
235 * @gfp: Flags qualifying the release
236 *
237 * Request release of a folio and clean up its private state if it's not busy.
238 * Returns true if the folio can now be released, false if not
239 */
240bool netfs_release_folio(struct folio *folio, gfp_t gfp)
241{
242	struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
243	unsigned long long end;
244
245	end = folio_pos(folio) + folio_size(folio);
246	if (end > ctx->zero_point)
247		ctx->zero_point = end;
248
249	if (folio_test_private(folio))
250		return false;
251	if (folio_test_fscache(folio)) {
252		if (current_is_kswapd() || !(gfp & __GFP_FS))
253			return false;
254		folio_wait_fscache(folio);
255	}
256
257	fscache_note_page_release(netfs_i_cookie(ctx));
258	return true;
259}
260EXPORT_SYMBOL(netfs_release_folio);