Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Object lifetime handling and tracing.
  3 *
  4 * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
  5 * Written by David Howells (dhowells@redhat.com)
  6 */
  7
  8#include <linux/slab.h>
  9#include "internal.h"
 10
 11/*
 12 * Allocate an I/O request and initialise it.
 13 */
 14struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
 15					     struct file *file,
 16					     loff_t start, size_t len,
 17					     enum netfs_io_origin origin)
 18{
 19	static atomic_t debug_ids;
 20	struct inode *inode = file ? file_inode(file) : mapping->host;
 21	struct netfs_inode *ctx = netfs_inode(inode);
 22	struct netfs_io_request *rreq;
 23	bool is_unbuffered = (origin == NETFS_UNBUFFERED_WRITE ||
 24			      origin == NETFS_DIO_READ ||
 25			      origin == NETFS_DIO_WRITE);
 26	bool cached = !is_unbuffered && netfs_is_cache_enabled(ctx);
 27	int ret;
 28
 29	rreq = kzalloc(ctx->ops->io_request_size ?: sizeof(struct netfs_io_request),
 30		       GFP_KERNEL);
 31	if (!rreq)
 32		return ERR_PTR(-ENOMEM);
 33
 34	rreq->start	= start;
 35	rreq->len	= len;
 36	rreq->upper_len	= len;
 37	rreq->origin	= origin;
 38	rreq->netfs_ops	= ctx->ops;
 39	rreq->mapping	= mapping;
 40	rreq->inode	= inode;
 41	rreq->i_size	= i_size_read(inode);
 42	rreq->debug_id	= atomic_inc_return(&debug_ids);
 43	INIT_LIST_HEAD(&rreq->subrequests);
 44	INIT_WORK(&rreq->work, NULL);
 45	refcount_set(&rreq->ref, 1);
 46
 47	__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
 48	if (cached)
 49		__set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
 50	if (file && file->f_flags & O_NONBLOCK)
 51		__set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags);
 52	if (rreq->netfs_ops->init_request) {
 53		ret = rreq->netfs_ops->init_request(rreq, file);
 54		if (ret < 0) {
 55			kfree(rreq);
 56			return ERR_PTR(ret);
 57		}
 58	}
 59
 60	trace_netfs_rreq_ref(rreq->debug_id, 1, netfs_rreq_trace_new);
 61	netfs_proc_add_rreq(rreq);
 62	netfs_stat(&netfs_n_rh_rreq);
 63	return rreq;
 64}
 65
 66void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
 67{
 68	int r;
 69
 70	__refcount_inc(&rreq->ref, &r);
 71	trace_netfs_rreq_ref(rreq->debug_id, r + 1, what);
 72}
 73
 74void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
 75{
 76	struct netfs_io_subrequest *subreq;
 77
 78	while (!list_empty(&rreq->subrequests)) {
 79		subreq = list_first_entry(&rreq->subrequests,
 80					  struct netfs_io_subrequest, rreq_link);
 81		list_del(&subreq->rreq_link);
 82		netfs_put_subrequest(subreq, was_async,
 83				     netfs_sreq_trace_put_clear);
 84	}
 85}
 86
 87static void netfs_free_request(struct work_struct *work)
 88{
 89	struct netfs_io_request *rreq =
 90		container_of(work, struct netfs_io_request, work);
 91	unsigned int i;
 92
 93	trace_netfs_rreq(rreq, netfs_rreq_trace_free);
 94	netfs_proc_del_rreq(rreq);
 95	netfs_clear_subrequests(rreq, false);
 96	if (rreq->netfs_ops->free_request)
 97		rreq->netfs_ops->free_request(rreq);
 98	if (rreq->cache_resources.ops)
 99		rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
100	if (rreq->direct_bv) {
101		for (i = 0; i < rreq->direct_bv_count; i++) {
102			if (rreq->direct_bv[i].bv_page) {
103				if (rreq->direct_bv_unpin)
104					unpin_user_page(rreq->direct_bv[i].bv_page);
105			}
106		}
107		kvfree(rreq->direct_bv);
108	}
109	kfree_rcu(rreq, rcu);
110	netfs_stat_d(&netfs_n_rh_rreq);
111}
112
113void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
114		       enum netfs_rreq_ref_trace what)
115{
116	unsigned int debug_id;
117	bool dead;
118	int r;
119
120	if (rreq) {
121		debug_id = rreq->debug_id;
122		dead = __refcount_dec_and_test(&rreq->ref, &r);
123		trace_netfs_rreq_ref(debug_id, r - 1, what);
124		if (dead) {
125			if (was_async) {
126				rreq->work.func = netfs_free_request;
127				if (!queue_work(system_unbound_wq, &rreq->work))
128					BUG();
129			} else {
130				netfs_free_request(&rreq->work);
131			}
132		}
133	}
134}
135
136/*
137 * Allocate and partially initialise an I/O request structure.
138 */
139struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq)
140{
141	struct netfs_io_subrequest *subreq;
142
143	subreq = kzalloc(rreq->netfs_ops->io_subrequest_size ?:
144			 sizeof(struct netfs_io_subrequest),
145			 GFP_KERNEL);
146	if (subreq) {
147		INIT_WORK(&subreq->work, NULL);
148		INIT_LIST_HEAD(&subreq->rreq_link);
149		refcount_set(&subreq->ref, 2);
150		subreq->rreq = rreq;
151		netfs_get_request(rreq, netfs_rreq_trace_get_subreq);
152		netfs_stat(&netfs_n_rh_sreq);
153	}
154
155	return subreq;
156}
157
158void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
159			  enum netfs_sreq_ref_trace what)
160{
161	int r;
162
163	__refcount_inc(&subreq->ref, &r);
164	trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, r + 1,
165			     what);
166}
167
168static void netfs_free_subrequest(struct netfs_io_subrequest *subreq,
169				  bool was_async)
170{
171	struct netfs_io_request *rreq = subreq->rreq;
172
173	trace_netfs_sreq(subreq, netfs_sreq_trace_free);
174	if (rreq->netfs_ops->free_subrequest)
175		rreq->netfs_ops->free_subrequest(subreq);
176	kfree(subreq);
177	netfs_stat_d(&netfs_n_rh_sreq);
178	netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq);
179}
180
181void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async,
182			  enum netfs_sreq_ref_trace what)
183{
184	unsigned int debug_index = subreq->debug_index;
185	unsigned int debug_id = subreq->rreq->debug_id;
186	bool dead;
187	int r;
188
189	dead = __refcount_dec_and_test(&subreq->ref, &r);
190	trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what);
191	if (dead)
192		netfs_free_subrequest(subreq, was_async);
193}