Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/* SPDX-License-Identifier: GPL-2.0
  2 *
  3 * page_pool.h
  4 *	Author:	Jesper Dangaard Brouer <netoptimizer@brouer.com>
  5 *	Copyright (C) 2016 Red Hat, Inc.
  6 */
  7
  8/**
  9 * DOC: page_pool allocator
 10 *
 11 * This page_pool allocator is optimized for the XDP mode that
 12 * uses one-frame-per-page, but have fallbacks that act like the
 13 * regular page allocator APIs.
 14 *
 15 * Basic use involve replacing alloc_pages() calls with the
 16 * page_pool_alloc_pages() call.  Drivers should likely use
 17 * page_pool_dev_alloc_pages() replacing dev_alloc_pages().
 18 *
 19 * API keeps track of in-flight pages, in-order to let API user know
 20 * when it is safe to dealloactor page_pool object.  Thus, API users
 21 * must make sure to call page_pool_release_page() when a page is
 22 * "leaving" the page_pool.  Or call page_pool_put_page() where
 23 * appropiate.  For maintaining correct accounting.
 24 *
 25 * API user must only call page_pool_put_page() once on a page, as it
 26 * will either recycle the page, or in case of elevated refcnt, it
 27 * will release the DMA mapping and in-flight state accounting.  We
 28 * hope to lift this requirement in the future.
 29 */
 30#ifndef _NET_PAGE_POOL_H
 31#define _NET_PAGE_POOL_H
 32
 33#include <linux/mm.h> /* Needed by ptr_ring */
 34#include <linux/ptr_ring.h>
 35#include <linux/dma-direction.h>
 36
 37#define PP_FLAG_DMA_MAP		BIT(0) /* Should page_pool do the DMA
 38					* map/unmap
 39					*/
 40#define PP_FLAG_DMA_SYNC_DEV	BIT(1) /* If set all pages that the driver gets
 41					* from page_pool will be
 42					* DMA-synced-for-device according to
 43					* the length provided by the device
 44					* driver.
 45					* Please note DMA-sync-for-CPU is still
 46					* device driver responsibility
 47					*/
 48#define PP_FLAG_ALL		(PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
 49
 50/*
 51 * Fast allocation side cache array/stack
 52 *
 53 * The cache size and refill watermark is related to the network
 54 * use-case.  The NAPI budget is 64 packets.  After a NAPI poll the RX
 55 * ring is usually refilled and the max consumed elements will be 64,
 56 * thus a natural max size of objects needed in the cache.
 57 *
 58 * Keeping room for more objects, is due to XDP_DROP use-case.  As
 59 * XDP_DROP allows the opportunity to recycle objects directly into
 60 * this array, as it shares the same softirq/NAPI protection.  If
 61 * cache is already full (or partly full) then the XDP_DROP recycles
 62 * would have to take a slower code path.
 63 */
 64#define PP_ALLOC_CACHE_SIZE	128
 65#define PP_ALLOC_CACHE_REFILL	64
 66struct pp_alloc_cache {
 67	u32 count;
 68	struct page *cache[PP_ALLOC_CACHE_SIZE];
 69};
 70
 71struct page_pool_params {
 72	unsigned int	flags;
 73	unsigned int	order;
 74	unsigned int	pool_size;
 75	int		nid;  /* Numa node id to allocate from pages from */
 76	struct device	*dev; /* device, for DMA pre-mapping purposes */
 77	enum dma_data_direction dma_dir; /* DMA mapping direction */
 78	unsigned int	max_len; /* max DMA sync memory size */
 79	unsigned int	offset;  /* DMA addr offset */
 80};
 81
 82struct page_pool {
 83	struct page_pool_params p;
 84
 85	struct delayed_work release_dw;
 86	void (*disconnect)(void *);
 87	unsigned long defer_start;
 88	unsigned long defer_warn;
 89
 90	u32 pages_state_hold_cnt;
 91
 92	/*
 93	 * Data structure for allocation side
 94	 *
 95	 * Drivers allocation side usually already perform some kind
 96	 * of resource protection.  Piggyback on this protection, and
 97	 * require driver to protect allocation side.
 98	 *
 99	 * For NIC drivers this means, allocate a page_pool per
100	 * RX-queue. As the RX-queue is already protected by
101	 * Softirq/BH scheduling and napi_schedule. NAPI schedule
102	 * guarantee that a single napi_struct will only be scheduled
103	 * on a single CPU (see napi_schedule).
104	 */
105	struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
106
107	/* Data structure for storing recycled pages.
108	 *
109	 * Returning/freeing pages is more complicated synchronization
110	 * wise, because free's can happen on remote CPUs, with no
111	 * association with allocation resource.
112	 *
113	 * Use ptr_ring, as it separates consumer and producer
114	 * effeciently, it a way that doesn't bounce cache-lines.
115	 *
116	 * TODO: Implement bulk return pages into this structure.
117	 */
118	struct ptr_ring ring;
119
120	atomic_t pages_state_release_cnt;
121
122	/* A page_pool is strictly tied to a single RX-queue being
123	 * protected by NAPI, due to above pp_alloc_cache. This
124	 * refcnt serves purpose is to simplify drivers error handling.
125	 */
126	refcount_t user_cnt;
127
128	u64 destroy_cnt;
129};
130
131struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
132
133static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
134{
135	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
136
137	return page_pool_alloc_pages(pool, gfp);
138}
139
140/* get the stored dma direction. A driver might decide to treat this locally and
141 * avoid the extra cache line from page_pool to determine the direction
142 */
143static
144inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
145{
146	return pool->p.dma_dir;
147}
148
149bool page_pool_return_skb_page(struct page *page);
150
151struct page_pool *page_pool_create(const struct page_pool_params *params);
152
153#ifdef CONFIG_PAGE_POOL
154void page_pool_destroy(struct page_pool *pool);
155void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
156void page_pool_release_page(struct page_pool *pool, struct page *page);
157void page_pool_put_page_bulk(struct page_pool *pool, void **data,
158			     int count);
159#else
160static inline void page_pool_destroy(struct page_pool *pool)
161{
162}
163
164static inline void page_pool_use_xdp_mem(struct page_pool *pool,
165					 void (*disconnect)(void *))
166{
167}
168static inline void page_pool_release_page(struct page_pool *pool,
169					  struct page *page)
170{
171}
172
173static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
174					   int count)
175{
176}
177#endif
178
179void page_pool_put_page(struct page_pool *pool, struct page *page,
180			unsigned int dma_sync_size, bool allow_direct);
181
182/* Same as above but will try to sync the entire area pool->max_len */
183static inline void page_pool_put_full_page(struct page_pool *pool,
184					   struct page *page, bool allow_direct)
185{
186	/* When page_pool isn't compiled-in, net/core/xdp.c doesn't
187	 * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
188	 */
189#ifdef CONFIG_PAGE_POOL
190	page_pool_put_page(pool, page, -1, allow_direct);
191#endif
192}
193
194/* Same as above but the caller must guarantee safe context. e.g NAPI */
195static inline void page_pool_recycle_direct(struct page_pool *pool,
196					    struct page *page)
197{
198	page_pool_put_full_page(pool, page, true);
199}
200
201static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
202{
203	dma_addr_t ret = page->dma_addr[0];
204	if (sizeof(dma_addr_t) > sizeof(unsigned long))
205		ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
206	return ret;
207}
208
209static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
210{
211	page->dma_addr[0] = addr;
212	if (sizeof(dma_addr_t) > sizeof(unsigned long))
213		page->dma_addr[1] = upper_32_bits(addr);
214}
215
216static inline bool is_page_pool_compiled_in(void)
217{
218#ifdef CONFIG_PAGE_POOL
219	return true;
220#else
221	return false;
222#endif
223}
224
225static inline bool page_pool_put(struct page_pool *pool)
226{
227	return refcount_dec_and_test(&pool->user_cnt);
228}
229
230/* Caller must provide appropriate safe context, e.g. NAPI. */
231void page_pool_update_nid(struct page_pool *pool, int new_nid);
232static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
233{
234	if (unlikely(pool->p.nid != new_nid))
235		page_pool_update_nid(pool, new_nid);
236}
237
238static inline void page_pool_ring_lock(struct page_pool *pool)
239	__acquires(&pool->ring.producer_lock)
240{
241	if (in_serving_softirq())
242		spin_lock(&pool->ring.producer_lock);
243	else
244		spin_lock_bh(&pool->ring.producer_lock);
245}
246
247static inline void page_pool_ring_unlock(struct page_pool *pool)
248	__releases(&pool->ring.producer_lock)
249{
250	if (in_serving_softirq())
251		spin_unlock(&pool->ring.producer_lock);
252	else
253		spin_unlock_bh(&pool->ring.producer_lock);
254}
255
256/* Store mem_info on struct page and use it while recycling skb frags */
257static inline
258void page_pool_store_mem_info(struct page *page, struct page_pool *pp)
259{
260	page->pp = pp;
261}
262
263#endif /* _NET_PAGE_POOL_H */