Loading...
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * page_pool.h
4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
5 * Copyright (C) 2016 Red Hat, Inc.
6 */
7
8/**
9 * DOC: page_pool allocator
10 *
11 * This page_pool allocator is optimized for the XDP mode that
12 * uses one-frame-per-page, but have fallbacks that act like the
13 * regular page allocator APIs.
14 *
15 * Basic use involve replacing alloc_pages() calls with the
16 * page_pool_alloc_pages() call. Drivers should likely use
17 * page_pool_dev_alloc_pages() replacing dev_alloc_pages().
18 *
19 * API keeps track of in-flight pages, in-order to let API user know
20 * when it is safe to dealloactor page_pool object. Thus, API users
21 * must make sure to call page_pool_release_page() when a page is
22 * "leaving" the page_pool. Or call page_pool_put_page() where
23 * appropiate. For maintaining correct accounting.
24 *
25 * API user must only call page_pool_put_page() once on a page, as it
26 * will either recycle the page, or in case of elevated refcnt, it
27 * will release the DMA mapping and in-flight state accounting. We
28 * hope to lift this requirement in the future.
29 */
30#ifndef _NET_PAGE_POOL_H
31#define _NET_PAGE_POOL_H
32
33#include <linux/mm.h> /* Needed by ptr_ring */
34#include <linux/ptr_ring.h>
35#include <linux/dma-direction.h>
36
37#define PP_FLAG_DMA_MAP 1 /* Should page_pool do the DMA map/unmap */
38#define PP_FLAG_ALL PP_FLAG_DMA_MAP
39
40/*
41 * Fast allocation side cache array/stack
42 *
43 * The cache size and refill watermark is related to the network
44 * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX
45 * ring is usually refilled and the max consumed elements will be 64,
46 * thus a natural max size of objects needed in the cache.
47 *
48 * Keeping room for more objects, is due to XDP_DROP use-case. As
49 * XDP_DROP allows the opportunity to recycle objects directly into
50 * this array, as it shares the same softirq/NAPI protection. If
51 * cache is already full (or partly full) then the XDP_DROP recycles
52 * would have to take a slower code path.
53 */
54#define PP_ALLOC_CACHE_SIZE 128
55#define PP_ALLOC_CACHE_REFILL 64
56struct pp_alloc_cache {
57 u32 count;
58 void *cache[PP_ALLOC_CACHE_SIZE];
59};
60
61struct page_pool_params {
62 unsigned int flags;
63 unsigned int order;
64 unsigned int pool_size;
65 int nid; /* Numa node id to allocate from pages from */
66 struct device *dev; /* device, for DMA pre-mapping purposes */
67 enum dma_data_direction dma_dir; /* DMA mapping direction */
68};
69
70struct page_pool {
71 struct page_pool_params p;
72
73 u32 pages_state_hold_cnt;
74
75 /*
76 * Data structure for allocation side
77 *
78 * Drivers allocation side usually already perform some kind
79 * of resource protection. Piggyback on this protection, and
80 * require driver to protect allocation side.
81 *
82 * For NIC drivers this means, allocate a page_pool per
83 * RX-queue. As the RX-queue is already protected by
84 * Softirq/BH scheduling and napi_schedule. NAPI schedule
85 * guarantee that a single napi_struct will only be scheduled
86 * on a single CPU (see napi_schedule).
87 */
88 struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
89
90 /* Data structure for storing recycled pages.
91 *
92 * Returning/freeing pages is more complicated synchronization
93 * wise, because free's can happen on remote CPUs, with no
94 * association with allocation resource.
95 *
96 * Use ptr_ring, as it separates consumer and producer
97 * effeciently, it a way that doesn't bounce cache-lines.
98 *
99 * TODO: Implement bulk return pages into this structure.
100 */
101 struct ptr_ring ring;
102
103 atomic_t pages_state_release_cnt;
104
105 /* A page_pool is strictly tied to a single RX-queue being
106 * protected by NAPI, due to above pp_alloc_cache. This
107 * refcnt serves purpose is to simplify drivers error handling.
108 */
109 refcount_t user_cnt;
110};
111
112struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
113
114static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
115{
116 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
117
118 return page_pool_alloc_pages(pool, gfp);
119}
120
121/* get the stored dma direction. A driver might decide to treat this locally and
122 * avoid the extra cache line from page_pool to determine the direction
123 */
124static
125inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
126{
127 return pool->p.dma_dir;
128}
129
130struct page_pool *page_pool_create(const struct page_pool_params *params);
131
132void __page_pool_free(struct page_pool *pool);
133static inline void page_pool_free(struct page_pool *pool)
134{
135 /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
136 * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
137 */
138#ifdef CONFIG_PAGE_POOL
139 __page_pool_free(pool);
140#endif
141}
142
143/* Drivers use this instead of page_pool_free */
144static inline void page_pool_destroy(struct page_pool *pool)
145{
146 if (!pool)
147 return;
148
149 page_pool_free(pool);
150}
151
152/* Never call this directly, use helpers below */
153void __page_pool_put_page(struct page_pool *pool,
154 struct page *page, bool allow_direct);
155
156static inline void page_pool_put_page(struct page_pool *pool,
157 struct page *page, bool allow_direct)
158{
159 /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
160 * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
161 */
162#ifdef CONFIG_PAGE_POOL
163 __page_pool_put_page(pool, page, allow_direct);
164#endif
165}
166/* Very limited use-cases allow recycle direct */
167static inline void page_pool_recycle_direct(struct page_pool *pool,
168 struct page *page)
169{
170 __page_pool_put_page(pool, page, true);
171}
172
173/* API user MUST have disconnected alloc-side (not allowed to call
174 * page_pool_alloc_pages()) before calling this. The free-side can
175 * still run concurrently, to handle in-flight packet-pages.
176 *
177 * A request to shutdown can fail (with false) if there are still
178 * in-flight packet-pages.
179 */
180bool __page_pool_request_shutdown(struct page_pool *pool);
181static inline bool page_pool_request_shutdown(struct page_pool *pool)
182{
183 bool safe_to_remove = false;
184
185#ifdef CONFIG_PAGE_POOL
186 safe_to_remove = __page_pool_request_shutdown(pool);
187#endif
188 return safe_to_remove;
189}
190
191/* Disconnects a page (from a page_pool). API users can have a need
192 * to disconnect a page (from a page_pool), to allow it to be used as
193 * a regular page (that will eventually be returned to the normal
194 * page-allocator via put_page).
195 */
196void page_pool_unmap_page(struct page_pool *pool, struct page *page);
197static inline void page_pool_release_page(struct page_pool *pool,
198 struct page *page)
199{
200#ifdef CONFIG_PAGE_POOL
201 page_pool_unmap_page(pool, page);
202#endif
203}
204
205static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
206{
207 return page->dma_addr;
208}
209
210static inline bool is_page_pool_compiled_in(void)
211{
212#ifdef CONFIG_PAGE_POOL
213 return true;
214#else
215 return false;
216#endif
217}
218
219static inline void page_pool_get(struct page_pool *pool)
220{
221 refcount_inc(&pool->user_cnt);
222}
223
224static inline bool page_pool_put(struct page_pool *pool)
225{
226 return refcount_dec_and_test(&pool->user_cnt);
227}
228
229#endif /* _NET_PAGE_POOL_H */
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * page_pool.h
4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
5 * Copyright (C) 2016 Red Hat, Inc.
6 */
7
8/**
9 * DOC: page_pool allocator
10 *
11 * This page_pool allocator is optimized for the XDP mode that
12 * uses one-frame-per-page, but have fallbacks that act like the
13 * regular page allocator APIs.
14 *
15 * Basic use involve replacing alloc_pages() calls with the
16 * page_pool_alloc_pages() call. Drivers should likely use
17 * page_pool_dev_alloc_pages() replacing dev_alloc_pages().
18 *
19 * API keeps track of in-flight pages, in-order to let API user know
20 * when it is safe to dealloactor page_pool object. Thus, API users
21 * must make sure to call page_pool_release_page() when a page is
22 * "leaving" the page_pool. Or call page_pool_put_page() where
23 * appropiate. For maintaining correct accounting.
24 *
25 * API user must only call page_pool_put_page() once on a page, as it
26 * will either recycle the page, or in case of elevated refcnt, it
27 * will release the DMA mapping and in-flight state accounting. We
28 * hope to lift this requirement in the future.
29 */
30#ifndef _NET_PAGE_POOL_H
31#define _NET_PAGE_POOL_H
32
33#include <linux/mm.h> /* Needed by ptr_ring */
34#include <linux/ptr_ring.h>
35#include <linux/dma-direction.h>
36
37#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
38 * map/unmap
39 */
40#define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets
41 * from page_pool will be
42 * DMA-synced-for-device according to
43 * the length provided by the device
44 * driver.
45 * Please note DMA-sync-for-CPU is still
46 * device driver responsibility
47 */
48#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
49
50/*
51 * Fast allocation side cache array/stack
52 *
53 * The cache size and refill watermark is related to the network
54 * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX
55 * ring is usually refilled and the max consumed elements will be 64,
56 * thus a natural max size of objects needed in the cache.
57 *
58 * Keeping room for more objects, is due to XDP_DROP use-case. As
59 * XDP_DROP allows the opportunity to recycle objects directly into
60 * this array, as it shares the same softirq/NAPI protection. If
61 * cache is already full (or partly full) then the XDP_DROP recycles
62 * would have to take a slower code path.
63 */
64#define PP_ALLOC_CACHE_SIZE 128
65#define PP_ALLOC_CACHE_REFILL 64
66struct pp_alloc_cache {
67 u32 count;
68 void *cache[PP_ALLOC_CACHE_SIZE];
69};
70
71struct page_pool_params {
72 unsigned int flags;
73 unsigned int order;
74 unsigned int pool_size;
75 int nid; /* Numa node id to allocate from pages from */
76 struct device *dev; /* device, for DMA pre-mapping purposes */
77 enum dma_data_direction dma_dir; /* DMA mapping direction */
78 unsigned int max_len; /* max DMA sync memory size */
79 unsigned int offset; /* DMA addr offset */
80};
81
82struct page_pool {
83 struct page_pool_params p;
84
85 struct delayed_work release_dw;
86 void (*disconnect)(void *);
87 unsigned long defer_start;
88 unsigned long defer_warn;
89
90 u32 pages_state_hold_cnt;
91
92 /*
93 * Data structure for allocation side
94 *
95 * Drivers allocation side usually already perform some kind
96 * of resource protection. Piggyback on this protection, and
97 * require driver to protect allocation side.
98 *
99 * For NIC drivers this means, allocate a page_pool per
100 * RX-queue. As the RX-queue is already protected by
101 * Softirq/BH scheduling and napi_schedule. NAPI schedule
102 * guarantee that a single napi_struct will only be scheduled
103 * on a single CPU (see napi_schedule).
104 */
105 struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
106
107 /* Data structure for storing recycled pages.
108 *
109 * Returning/freeing pages is more complicated synchronization
110 * wise, because free's can happen on remote CPUs, with no
111 * association with allocation resource.
112 *
113 * Use ptr_ring, as it separates consumer and producer
114 * effeciently, it a way that doesn't bounce cache-lines.
115 *
116 * TODO: Implement bulk return pages into this structure.
117 */
118 struct ptr_ring ring;
119
120 atomic_t pages_state_release_cnt;
121
122 /* A page_pool is strictly tied to a single RX-queue being
123 * protected by NAPI, due to above pp_alloc_cache. This
124 * refcnt serves purpose is to simplify drivers error handling.
125 */
126 refcount_t user_cnt;
127
128 u64 destroy_cnt;
129};
130
131struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
132
133static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
134{
135 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
136
137 return page_pool_alloc_pages(pool, gfp);
138}
139
140/* get the stored dma direction. A driver might decide to treat this locally and
141 * avoid the extra cache line from page_pool to determine the direction
142 */
143static
144inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
145{
146 return pool->p.dma_dir;
147}
148
149struct page_pool *page_pool_create(const struct page_pool_params *params);
150
151#ifdef CONFIG_PAGE_POOL
152void page_pool_destroy(struct page_pool *pool);
153void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
154void page_pool_release_page(struct page_pool *pool, struct page *page);
155#else
156static inline void page_pool_destroy(struct page_pool *pool)
157{
158}
159
160static inline void page_pool_use_xdp_mem(struct page_pool *pool,
161 void (*disconnect)(void *))
162{
163}
164static inline void page_pool_release_page(struct page_pool *pool,
165 struct page *page)
166{
167}
168#endif
169
170void page_pool_put_page(struct page_pool *pool, struct page *page,
171 unsigned int dma_sync_size, bool allow_direct);
172
173/* Same as above but will try to sync the entire area pool->max_len */
174static inline void page_pool_put_full_page(struct page_pool *pool,
175 struct page *page, bool allow_direct)
176{
177 /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
178 * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
179 */
180#ifdef CONFIG_PAGE_POOL
181 page_pool_put_page(pool, page, -1, allow_direct);
182#endif
183}
184
185/* Same as above but the caller must guarantee safe context. e.g NAPI */
186static inline void page_pool_recycle_direct(struct page_pool *pool,
187 struct page *page)
188{
189 page_pool_put_full_page(pool, page, true);
190}
191
192static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
193{
194 return page->dma_addr;
195}
196
197static inline bool is_page_pool_compiled_in(void)
198{
199#ifdef CONFIG_PAGE_POOL
200 return true;
201#else
202 return false;
203#endif
204}
205
206static inline bool page_pool_put(struct page_pool *pool)
207{
208 return refcount_dec_and_test(&pool->user_cnt);
209}
210
211/* Caller must provide appropriate safe context, e.g. NAPI. */
212void page_pool_update_nid(struct page_pool *pool, int new_nid);
213static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
214{
215 if (unlikely(pool->p.nid != new_nid))
216 page_pool_update_nid(pool, new_nid);
217}
218#endif /* _NET_PAGE_POOL_H */