Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.9.4.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Read with PG_private_2 [DEPRECATED].
  3 *
  4 * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
  5 * Written by David Howells (dhowells@redhat.com)
  6 */
  7
  8#include <linux/export.h>
  9#include <linux/fs.h>
 10#include <linux/mm.h>
 11#include <linux/pagemap.h>
 12#include <linux/slab.h>
 13#include <linux/task_io_accounting_ops.h>
 14#include "internal.h"
 15
 16/*
 17 * [DEPRECATED] Mark page as requiring copy-to-cache using PG_private_2.  The
 18 * third mark in the folio queue is used to indicate that this folio needs
 19 * writing.
 20 */
 21void netfs_pgpriv2_mark_copy_to_cache(struct netfs_io_subrequest *subreq,
 22				      struct netfs_io_request *rreq,
 23				      struct folio_queue *folioq,
 24				      int slot)
 25{
 26	struct folio *folio = folioq_folio(folioq, slot);
 27
 28	trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
 29	folio_start_private_2(folio);
 30	folioq_mark3(folioq, slot);
 31}
 32
 33/*
 34 * [DEPRECATED] Cancel PG_private_2 on all marked folios in the event of an
 35 * unrecoverable error.
 36 */
 37static void netfs_pgpriv2_cancel(struct folio_queue *folioq)
 38{
 39	struct folio *folio;
 40	int slot;
 41
 42	while (folioq) {
 43		if (!folioq->marks3) {
 44			folioq = folioq->next;
 45			continue;
 46		}
 47
 48		slot = __ffs(folioq->marks3);
 49		folio = folioq_folio(folioq, slot);
 50
 51		trace_netfs_folio(folio, netfs_folio_trace_cancel_copy);
 52		folio_end_private_2(folio);
 53		folioq_unmark3(folioq, slot);
 54	}
 55}
 56
 57/*
 58 * [DEPRECATED] Copy a folio to the cache with PG_private_2 set.
 59 */
 60static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio *folio)
 61{
 62	struct netfs_io_stream *cache  = &wreq->io_streams[1];
 63	size_t fsize = folio_size(folio), flen = fsize;
 64	loff_t fpos = folio_pos(folio), i_size;
 65	bool to_eof = false;
 66
 67	_enter("");
 68
 69	/* netfs_perform_write() may shift i_size around the page or from out
 70	 * of the page to beyond it, but cannot move i_size into or through the
 71	 * page since we have it locked.
 72	 */
 73	i_size = i_size_read(wreq->inode);
 74
 75	if (fpos >= i_size) {
 76		/* mmap beyond eof. */
 77		_debug("beyond eof");
 78		folio_end_private_2(folio);
 79		return 0;
 80	}
 81
 82	if (fpos + fsize > wreq->i_size)
 83		wreq->i_size = i_size;
 84
 85	if (flen > i_size - fpos) {
 86		flen = i_size - fpos;
 87		to_eof = true;
 88	} else if (flen == i_size - fpos) {
 89		to_eof = true;
 90	}
 91
 92	_debug("folio %zx %zx", flen, fsize);
 93
 94	trace_netfs_folio(folio, netfs_folio_trace_store_copy);
 95
 96	/* Attach the folio to the rolling buffer. */
 97	if (netfs_buffer_append_folio(wreq, folio, false) < 0)
 98		return -ENOMEM;
 99
100	cache->submit_extendable_to = fsize;
101	cache->submit_off = 0;
102	cache->submit_len = flen;
103
104	/* Attach the folio to one or more subrequests.  For a big folio, we
105	 * could end up with thousands of subrequests if the wsize is small -
106	 * but we might need to wait during the creation of subrequests for
107	 * network resources (eg. SMB credits).
108	 */
109	do {
110		ssize_t part;
111
112		wreq->io_iter.iov_offset = cache->submit_off;
113
114		atomic64_set(&wreq->issued_to, fpos + cache->submit_off);
115		cache->submit_extendable_to = fsize - cache->submit_off;
116		part = netfs_advance_write(wreq, cache, fpos + cache->submit_off,
117					   cache->submit_len, to_eof);
118		cache->submit_off += part;
119		if (part > cache->submit_len)
120			cache->submit_len = 0;
121		else
122			cache->submit_len -= part;
123	} while (cache->submit_len > 0);
124
125	wreq->io_iter.iov_offset = 0;
126	iov_iter_advance(&wreq->io_iter, fsize);
127	atomic64_set(&wreq->issued_to, fpos + fsize);
128
129	if (flen < fsize)
130		netfs_issue_write(wreq, cache);
131
132	_leave(" = 0");
133	return 0;
134}
135
136/*
137 * [DEPRECATED] Go through the buffer and write any folios that are marked with
138 * the third mark to the cache.
139 */
140void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq)
141{
142	struct netfs_io_request *wreq;
143	struct folio_queue *folioq;
144	struct folio *folio;
145	int error = 0;
146	int slot = 0;
147
148	_enter("");
149
150	if (!fscache_resources_valid(&rreq->cache_resources))
151		goto couldnt_start;
152
153	/* Need the first folio to be able to set up the op. */
154	for (folioq = rreq->buffer; folioq; folioq = folioq->next) {
155		if (folioq->marks3) {
156			slot = __ffs(folioq->marks3);
157			break;
158		}
159	}
160	if (!folioq)
161		return;
162	folio = folioq_folio(folioq, slot);
163
164	wreq = netfs_create_write_req(rreq->mapping, NULL, folio_pos(folio),
165				      NETFS_PGPRIV2_COPY_TO_CACHE);
166	if (IS_ERR(wreq)) {
167		kleave(" [create %ld]", PTR_ERR(wreq));
168		goto couldnt_start;
169	}
170
171	trace_netfs_write(wreq, netfs_write_trace_copy_to_cache);
172	netfs_stat(&netfs_n_wh_copy_to_cache);
173	if (!wreq->io_streams[1].avail) {
174		netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
175		goto couldnt_start;
176	}
177
178	for (;;) {
179		error = netfs_pgpriv2_copy_folio(wreq, folio);
180		if (error < 0)
181			break;
182
183		folioq_unmark3(folioq, slot);
184		while (!folioq->marks3) {
185			folioq = folioq->next;
186			if (!folioq)
187				goto end_of_queue;
188		}
189
190		slot = __ffs(folioq->marks3);
191		folio = folioq_folio(folioq, slot);
192	}
193
194end_of_queue:
195	netfs_issue_write(wreq, &wreq->io_streams[1]);
196	smp_wmb(); /* Write lists before ALL_QUEUED. */
197	set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
198
199	netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
200	_leave(" = %d", error);
201couldnt_start:
202	netfs_pgpriv2_cancel(rreq->buffer);
203}
204
205/*
206 * [DEPRECATED] Remove the PG_private_2 mark from any folios we've finished
207 * copying.
208 */
209bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
210{
211	struct folio_queue *folioq = wreq->buffer;
212	unsigned long long collected_to = wreq->collected_to;
213	unsigned int slot = wreq->buffer_head_slot;
214	bool made_progress = false;
215
216	if (slot >= folioq_nr_slots(folioq)) {
217		folioq = netfs_delete_buffer_head(wreq);
218		slot = 0;
219	}
220
221	for (;;) {
222		struct folio *folio;
223		unsigned long long fpos, fend;
224		size_t fsize, flen;
225
226		folio = folioq_folio(folioq, slot);
227		if (WARN_ONCE(!folio_test_private_2(folio),
228			      "R=%08x: folio %lx is not marked private_2\n",
229			      wreq->debug_id, folio->index))
230			trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);
231
232		fpos = folio_pos(folio);
233		fsize = folio_size(folio);
234		flen = fsize;
235
236		fend = min_t(unsigned long long, fpos + flen, wreq->i_size);
237
238		trace_netfs_collect_folio(wreq, folio, fend, collected_to);
239
240		/* Unlock any folio we've transferred all of. */
241		if (collected_to < fend)
242			break;
243
244		trace_netfs_folio(folio, netfs_folio_trace_end_copy);
245		folio_end_private_2(folio);
246		wreq->cleaned_to = fpos + fsize;
247		made_progress = true;
248
249		/* Clean up the head folioq.  If we clear an entire folioq, then
250		 * we can get rid of it provided it's not also the tail folioq
251		 * being filled by the issuer.
252		 */
253		folioq_clear(folioq, slot);
254		slot++;
255		if (slot >= folioq_nr_slots(folioq)) {
256			if (READ_ONCE(wreq->buffer_tail) == folioq)
257				break;
258			folioq = netfs_delete_buffer_head(wreq);
259			slot = 0;
260		}
261
262		if (fpos + fsize >= collected_to)
263			break;
264	}
265
266	wreq->buffer = folioq;
267	wreq->buffer_head_slot = slot;
268	return made_progress;
269}