Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NET_PAGE_POOL_TYPES_H #define _NET_PAGE_POOL_TYPES_H #include <linux/dma-direction.h> #include <linux/ptr_ring.h> #include <linux/types.h> #define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA * map/unmap */ #define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets * from page_pool will be * DMA-synced-for-device according to * the length provided by the device * driver. * Please note DMA-sync-for-CPU is still * device driver responsibility */ #define PP_FLAG_SYSTEM_POOL BIT(2) /* Global system page_pool */ #define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | \ PP_FLAG_SYSTEM_POOL) /* * Fast allocation side cache array/stack * * The cache size and refill watermark is related to the network * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX * ring is usually refilled and the max consumed elements will be 64, * thus a natural max size of objects needed in the cache. * * Keeping room for more objects, is due to XDP_DROP use-case. As * XDP_DROP allows the opportunity to recycle objects directly into * this array, as it shares the same softirq/NAPI protection. If * cache is already full (or partly full) then the XDP_DROP recycles * would have to take a slower code path. */ #define PP_ALLOC_CACHE_SIZE 128 #define PP_ALLOC_CACHE_REFILL 64 struct pp_alloc_cache { u32 count; struct page *cache[PP_ALLOC_CACHE_SIZE]; }; /** * struct page_pool_params - page pool parameters * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV * @order: 2^order pages on allocation * @pool_size: size of the ptr_ring * @nid: NUMA node id to allocate from pages from * @dev: device, for DMA pre-mapping purposes * @netdev: netdev this pool will serve (leave as NULL if none or multiple) * @napi: NAPI which is the sole consumer of pages, otherwise NULL * @dma_dir: DMA mapping direction * @max_len: max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV * @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV */ struct page_pool_params { struct_group_tagged(page_pool_params_fast, fast, unsigned int flags; unsigned int order; unsigned int pool_size; int nid; struct device *dev; struct napi_struct *napi; enum dma_data_direction dma_dir; unsigned int max_len; unsigned int offset; ); struct_group_tagged(page_pool_params_slow, slow, struct net_device *netdev; /* private: used by test code only */ void (*init_callback)(struct page *page, void *arg); void *init_arg; ); }; #ifdef CONFIG_PAGE_POOL_STATS /** * struct page_pool_alloc_stats - allocation statistics * @fast: successful fast path allocations * @slow: slow path order-0 allocations * @slow_high_order: slow path high order allocations * @empty: ptr ring is empty, so a slow path allocation was forced * @refill: an allocation which triggered a refill of the cache * @waive: pages obtained from the ptr ring that cannot be added to * the cache due to a NUMA mismatch */ struct page_pool_alloc_stats { u64 fast; u64 slow; u64 slow_high_order; u64 empty; u64 refill; u64 waive; }; /** * struct page_pool_recycle_stats - recycling (freeing) statistics * @cached: recycling placed page in the page pool cache * @cache_full: page pool cache was full * @ring: page placed into the ptr ring * @ring_full: page released from page pool because the ptr ring was full * @released_refcnt: page released (and not recycled) because refcnt > 1 */ struct page_pool_recycle_stats { u64 cached; u64 cache_full; u64 ring; u64 ring_full; u64 released_refcnt; }; /** * struct page_pool_stats - combined page pool use statistics * @alloc_stats: see struct page_pool_alloc_stats * @recycle_stats: see struct page_pool_recycle_stats * * Wrapper struct for combining page pool stats with different storage * requirements. */ struct page_pool_stats { struct page_pool_alloc_stats alloc_stats; struct page_pool_recycle_stats recycle_stats; }; #endif struct page_pool { struct page_pool_params_fast p; int cpuid; bool has_init_callback; long frag_users; struct page *frag_page; unsigned int frag_offset; u32 pages_state_hold_cnt; struct delayed_work release_dw; void (*disconnect)(void *pool); unsigned long defer_start; unsigned long defer_warn; #ifdef CONFIG_PAGE_POOL_STATS /* these stats are incremented while in softirq context */ struct page_pool_alloc_stats alloc_stats; #endif u32 xdp_mem_id; /* * Data structure for allocation side * * Drivers allocation side usually already perform some kind * of resource protection. Piggyback on this protection, and * require driver to protect allocation side. * * For NIC drivers this means, allocate a page_pool per * RX-queue. As the RX-queue is already protected by * Softirq/BH scheduling and napi_schedule. NAPI schedule * guarantee that a single napi_struct will only be scheduled * on a single CPU (see napi_schedule). */ struct pp_alloc_cache alloc ____cacheline_aligned_in_smp; /* Data structure for storing recycled pages. * * Returning/freeing pages is more complicated synchronization * wise, because free's can happen on remote CPUs, with no * association with allocation resource. * * Use ptr_ring, as it separates consumer and producer * efficiently, it a way that doesn't bounce cache-lines. * * TODO: Implement bulk return pages into this structure. */ struct ptr_ring ring; #ifdef CONFIG_PAGE_POOL_STATS /* recycle stats are per-cpu to avoid locking */ struct page_pool_recycle_stats __percpu *recycle_stats; #endif atomic_t pages_state_release_cnt; /* A page_pool is strictly tied to a single RX-queue being * protected by NAPI, due to above pp_alloc_cache. This * refcnt serves purpose is to simplify drivers error handling. */ refcount_t user_cnt; u64 destroy_cnt; /* Slow/Control-path information follows */ struct page_pool_params_slow slow; /* User-facing fields, protected by page_pools_lock */ struct { struct hlist_node list; u64 detach_time; u32 napi_id; u32 id; } user; }; struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset, unsigned int size, gfp_t gfp); struct page_pool *page_pool_create(const struct page_pool_params *params); struct page_pool *page_pool_create_percpu(const struct page_pool_params *params, int cpuid); struct xdp_mem_info; #ifdef CONFIG_PAGE_POOL void page_pool_destroy(struct page_pool *pool); void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), struct xdp_mem_info *mem); void page_pool_put_page_bulk(struct page_pool *pool, void **data, int count); #else static inline void page_pool_destroy(struct page_pool *pool) { } static inline void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), struct xdp_mem_info *mem) { } static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data, int count) { } #endif void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page, unsigned int dma_sync_size, bool allow_direct); static inline bool is_page_pool_compiled_in(void) { #ifdef CONFIG_PAGE_POOL return true; #else return false; #endif } /* Caller must provide appropriate safe context, e.g. NAPI. */ void page_pool_update_nid(struct page_pool *pool, int new_nid); #endif /* _NET_PAGE_POOL_H */ |