Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/* SPDX-License-Identifier: GPL-2.0
  2 *
  3 * page_pool.c
  4 *	Author:	Jesper Dangaard Brouer <netoptimizer@brouer.com>
  5 *	Copyright (C) 2016 Red Hat, Inc.
  6 */
  7
  8#include <linux/types.h>
  9#include <linux/kernel.h>
 10#include <linux/slab.h>
 11#include <linux/device.h>
 12
 13#include <net/page_pool.h>
 14#include <linux/dma-direction.h>
 15#include <linux/dma-mapping.h>
 16#include <linux/page-flags.h>
 17#include <linux/mm.h> /* for __put_page() */
 18
 19#include <trace/events/page_pool.h>
 20
 21static int page_pool_init(struct page_pool *pool,
 22			  const struct page_pool_params *params)
 23{
 24	unsigned int ring_qsize = 1024; /* Default */
 25
 26	memcpy(&pool->p, params, sizeof(pool->p));
 27
 28	/* Validate only known flags were used */
 29	if (pool->p.flags & ~(PP_FLAG_ALL))
 30		return -EINVAL;
 31
 32	if (pool->p.pool_size)
 33		ring_qsize = pool->p.pool_size;
 34
 35	/* Sanity limit mem that can be pinned down */
 36	if (ring_qsize > 32768)
 37		return -E2BIG;
 38
 39	/* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
 40	 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
 41	 * which is the XDP_TX use-case.
 42	 */
 43	if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
 44	    (pool->p.dma_dir != DMA_BIDIRECTIONAL))
 45		return -EINVAL;
 46
 47	if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
 48		return -ENOMEM;
 49
 50	atomic_set(&pool->pages_state_release_cnt, 0);
 51
 52	/* Driver calling page_pool_create() also call page_pool_destroy() */
 53	refcount_set(&pool->user_cnt, 1);
 54
 55	if (pool->p.flags & PP_FLAG_DMA_MAP)
 56		get_device(pool->p.dev);
 57
 58	return 0;
 59}
 60
 61struct page_pool *page_pool_create(const struct page_pool_params *params)
 62{
 63	struct page_pool *pool;
 64	int err;
 65
 66	pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
 67	if (!pool)
 68		return ERR_PTR(-ENOMEM);
 69
 70	err = page_pool_init(pool, params);
 71	if (err < 0) {
 72		pr_warn("%s() gave up with errno %d\n", __func__, err);
 73		kfree(pool);
 74		return ERR_PTR(err);
 75	}
 76
 77	return pool;
 78}
 79EXPORT_SYMBOL(page_pool_create);
 80
 81/* fast path */
 82static struct page *__page_pool_get_cached(struct page_pool *pool)
 83{
 84	struct ptr_ring *r = &pool->ring;
 85	bool refill = false;
 86	struct page *page;
 87
 88	/* Test for safe-context, caller should provide this guarantee */
 89	if (likely(in_serving_softirq())) {
 90		if (likely(pool->alloc.count)) {
 91			/* Fast-path */
 92			page = pool->alloc.cache[--pool->alloc.count];
 93			return page;
 94		}
 95		refill = true;
 96	}
 97
 98	/* Quicker fallback, avoid locks when ring is empty */
 99	if (__ptr_ring_empty(r))
100		return NULL;
101
102	/* Slow-path: Get page from locked ring queue,
103	 * refill alloc array if requested.
104	 */
105	spin_lock(&r->consumer_lock);
106	page = __ptr_ring_consume(r);
107	if (refill)
108		pool->alloc.count = __ptr_ring_consume_batched(r,
109							pool->alloc.cache,
110							PP_ALLOC_CACHE_REFILL);
111	spin_unlock(&r->consumer_lock);
112	return page;
113}
114
115/* slow path */
116noinline
117static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
118						 gfp_t _gfp)
119{
120	struct page *page;
121	gfp_t gfp = _gfp;
122	dma_addr_t dma;
123
124	/* We could always set __GFP_COMP, and avoid this branch, as
125	 * prep_new_page() can handle order-0 with __GFP_COMP.
126	 */
127	if (pool->p.order)
128		gfp |= __GFP_COMP;
129
130	/* FUTURE development:
131	 *
132	 * Current slow-path essentially falls back to single page
133	 * allocations, which doesn't improve performance.  This code
134	 * need bulk allocation support from the page allocator code.
135	 */
136
137	/* Cache was empty, do real allocation */
138	page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
139	if (!page)
140		return NULL;
141
142	if (!(pool->p.flags & PP_FLAG_DMA_MAP))
143		goto skip_dma_map;
144
145	/* Setup DMA mapping: use 'struct page' area for storing DMA-addr
146	 * since dma_addr_t can be either 32 or 64 bits and does not always fit
147	 * into page private data (i.e 32bit cpu with 64bit DMA caps)
148	 * This mapping is kept for lifetime of page, until leaving pool.
149	 */
150	dma = dma_map_page_attrs(pool->p.dev, page, 0,
151				 (PAGE_SIZE << pool->p.order),
152				 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
153	if (dma_mapping_error(pool->p.dev, dma)) {
154		put_page(page);
155		return NULL;
156	}
157	page->dma_addr = dma;
158
159skip_dma_map:
160	/* Track how many pages are held 'in-flight' */
161	pool->pages_state_hold_cnt++;
162
163	trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
164
165	/* When page just alloc'ed is should/must have refcnt 1. */
166	return page;
167}
168
169/* For using page_pool replace: alloc_pages() API calls, but provide
170 * synchronization guarantee for allocation side.
171 */
172struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
173{
174	struct page *page;
175
176	/* Fast-path: Get a page from cache */
177	page = __page_pool_get_cached(pool);
178	if (page)
179		return page;
180
181	/* Slow-path: cache empty, do real allocation */
182	page = __page_pool_alloc_pages_slow(pool, gfp);
183	return page;
184}
185EXPORT_SYMBOL(page_pool_alloc_pages);
186
187/* Calculate distance between two u32 values, valid if distance is below 2^(31)
188 *  https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
189 */
190#define _distance(a, b)	(s32)((a) - (b))
191
192static s32 page_pool_inflight(struct page_pool *pool)
193{
194	u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
195	u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
196	s32 distance;
197
198	distance = _distance(hold_cnt, release_cnt);
199
200	trace_page_pool_inflight(pool, distance, hold_cnt, release_cnt);
201	return distance;
202}
203
204static bool __page_pool_safe_to_destroy(struct page_pool *pool)
205{
206	s32 inflight = page_pool_inflight(pool);
207
208	/* The distance should not be able to become negative */
209	WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
210
211	return (inflight == 0);
212}
213
214/* Cleanup page_pool state from page */
215static void __page_pool_clean_page(struct page_pool *pool,
216				   struct page *page)
217{
218	dma_addr_t dma;
219
220	if (!(pool->p.flags & PP_FLAG_DMA_MAP))
221		goto skip_dma_unmap;
222
223	dma = page->dma_addr;
224	/* DMA unmap */
225	dma_unmap_page_attrs(pool->p.dev, dma,
226			     PAGE_SIZE << pool->p.order, pool->p.dma_dir,
227			     DMA_ATTR_SKIP_CPU_SYNC);
228	page->dma_addr = 0;
229skip_dma_unmap:
230	atomic_inc(&pool->pages_state_release_cnt);
231	trace_page_pool_state_release(pool, page,
232			      atomic_read(&pool->pages_state_release_cnt));
233}
234
235/* unmap the page and clean our state */
236void page_pool_unmap_page(struct page_pool *pool, struct page *page)
237{
238	/* When page is unmapped, this implies page will not be
239	 * returned to page_pool.
240	 */
241	__page_pool_clean_page(pool, page);
242}
243EXPORT_SYMBOL(page_pool_unmap_page);
244
245/* Return a page to the page allocator, cleaning up our state */
246static void __page_pool_return_page(struct page_pool *pool, struct page *page)
247{
248	__page_pool_clean_page(pool, page);
249
250	put_page(page);
251	/* An optimization would be to call __free_pages(page, pool->p.order)
252	 * knowing page is not part of page-cache (thus avoiding a
253	 * __page_cache_release() call).
254	 */
255}
256
257static bool __page_pool_recycle_into_ring(struct page_pool *pool,
258				   struct page *page)
259{
260	int ret;
261	/* BH protection not needed if current is serving softirq */
262	if (in_serving_softirq())
263		ret = ptr_ring_produce(&pool->ring, page);
264	else
265		ret = ptr_ring_produce_bh(&pool->ring, page);
266
267	return (ret == 0) ? true : false;
268}
269
270/* Only allow direct recycling in special circumstances, into the
271 * alloc side cache.  E.g. during RX-NAPI processing for XDP_DROP use-case.
272 *
273 * Caller must provide appropriate safe context.
274 */
275static bool __page_pool_recycle_direct(struct page *page,
276				       struct page_pool *pool)
277{
278	if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
279		return false;
280
281	/* Caller MUST have verified/know (page_ref_count(page) == 1) */
282	pool->alloc.cache[pool->alloc.count++] = page;
283	return true;
284}
285
286void __page_pool_put_page(struct page_pool *pool,
287			  struct page *page, bool allow_direct)
288{
289	/* This allocator is optimized for the XDP mode that uses
290	 * one-frame-per-page, but have fallbacks that act like the
291	 * regular page allocator APIs.
292	 *
293	 * refcnt == 1 means page_pool owns page, and can recycle it.
294	 */
295	if (likely(page_ref_count(page) == 1)) {
296		/* Read barrier done in page_ref_count / READ_ONCE */
297
298		if (allow_direct && in_serving_softirq())
299			if (__page_pool_recycle_direct(page, pool))
300				return;
301
302		if (!__page_pool_recycle_into_ring(pool, page)) {
303			/* Cache full, fallback to free pages */
304			__page_pool_return_page(pool, page);
305		}
306		return;
307	}
308	/* Fallback/non-XDP mode: API user have elevated refcnt.
309	 *
310	 * Many drivers split up the page into fragments, and some
311	 * want to keep doing this to save memory and do refcnt based
312	 * recycling. Support this use case too, to ease drivers
313	 * switching between XDP/non-XDP.
314	 *
315	 * In-case page_pool maintains the DMA mapping, API user must
316	 * call page_pool_put_page once.  In this elevated refcnt
317	 * case, the DMA is unmapped/released, as driver is likely
318	 * doing refcnt based recycle tricks, meaning another process
319	 * will be invoking put_page.
320	 */
321	__page_pool_clean_page(pool, page);
322	put_page(page);
323}
324EXPORT_SYMBOL(__page_pool_put_page);
325
326static void __page_pool_empty_ring(struct page_pool *pool)
327{
328	struct page *page;
329
330	/* Empty recycle ring */
331	while ((page = ptr_ring_consume_bh(&pool->ring))) {
332		/* Verify the refcnt invariant of cached pages */
333		if (!(page_ref_count(page) == 1))
334			pr_crit("%s() page_pool refcnt %d violation\n",
335				__func__, page_ref_count(page));
336
337		__page_pool_return_page(pool, page);
338	}
339}
340
341static void __warn_in_flight(struct page_pool *pool)
342{
343	u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
344	u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
345	s32 distance;
346
347	distance = _distance(hold_cnt, release_cnt);
348
349	/* Drivers should fix this, but only problematic when DMA is used */
350	WARN(1, "Still in-flight pages:%d hold:%u released:%u",
351	     distance, hold_cnt, release_cnt);
352}
353
354void __page_pool_free(struct page_pool *pool)
355{
356	/* Only last user actually free/release resources */
357	if (!page_pool_put(pool))
358		return;
359
360	WARN(pool->alloc.count, "API usage violation");
361	WARN(!ptr_ring_empty(&pool->ring), "ptr_ring is not empty");
362
363	/* Can happen due to forced shutdown */
364	if (!__page_pool_safe_to_destroy(pool))
365		__warn_in_flight(pool);
366
367	ptr_ring_cleanup(&pool->ring, NULL);
368
369	if (pool->p.flags & PP_FLAG_DMA_MAP)
370		put_device(pool->p.dev);
371
372	kfree(pool);
373}
374EXPORT_SYMBOL(__page_pool_free);
375
376/* Request to shutdown: release pages cached by page_pool, and check
377 * for in-flight pages
378 */
379bool __page_pool_request_shutdown(struct page_pool *pool)
380{
381	struct page *page;
382
383	/* Empty alloc cache, assume caller made sure this is
384	 * no-longer in use, and page_pool_alloc_pages() cannot be
385	 * call concurrently.
386	 */
387	while (pool->alloc.count) {
388		page = pool->alloc.cache[--pool->alloc.count];
389		__page_pool_return_page(pool, page);
390	}
391
392	/* No more consumers should exist, but producers could still
393	 * be in-flight.
394	 */
395	__page_pool_empty_ring(pool);
396
397	return __page_pool_safe_to_destroy(pool);
398}
399EXPORT_SYMBOL(__page_pool_request_shutdown);