Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/ext4/mballoc.h
4 *
5 * Written by: Alex Tomas <alex@clusterfs.com>
6 *
7 */
8#ifndef _EXT4_MBALLOC_H
9#define _EXT4_MBALLOC_H
10
11#include <linux/time.h>
12#include <linux/fs.h>
13#include <linux/namei.h>
14#include <linux/quotaops.h>
15#include <linux/buffer_head.h>
16#include <linux/module.h>
17#include <linux/swap.h>
18#include <linux/proc_fs.h>
19#include <linux/pagemap.h>
20#include <linux/seq_file.h>
21#include <linux/blkdev.h>
22#include <linux/mutex.h>
23#include "ext4_jbd2.h"
24#include "ext4.h"
25
26/*
27 * mb_debug() dynamic printk msgs could be used to debug mballoc code.
28 */
29#ifdef CONFIG_EXT4_DEBUG
30#define mb_debug(sb, fmt, ...) \
31 pr_debug("[%s/%d] EXT4-fs (%s): (%s, %d): %s: " fmt, \
32 current->comm, task_pid_nr(current), sb->s_id, \
33 __FILE__, __LINE__, __func__, ##__VA_ARGS__)
34#else
35#define mb_debug(sb, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
36#endif
37
38#define EXT4_MB_HISTORY_ALLOC 1 /* allocation */
39#define EXT4_MB_HISTORY_PREALLOC 2 /* preallocated blocks used */
40
41/*
42 * How long mballoc can look for a best extent (in found extents)
43 */
44#define MB_DEFAULT_MAX_TO_SCAN 200
45
46/*
47 * How long mballoc must look for a best extent
48 */
49#define MB_DEFAULT_MIN_TO_SCAN 10
50
51/*
52 * with 's_mb_stats' allocator will collect stats that will be
53 * shown at umount. The collecting costs though!
54 */
55#define MB_DEFAULT_STATS 0
56
57/*
58 * files smaller than MB_DEFAULT_STREAM_THRESHOLD are served
59 * by the stream allocator, which purpose is to pack requests
60 * as close each to other as possible to produce smooth I/O traffic
61 * We use locality group prealloc space for stream request.
62 * We can tune the same via /proc/fs/ext4/<partition>/stream_req
63 */
64#define MB_DEFAULT_STREAM_THRESHOLD 16 /* 64K */
65
66/*
67 * for which requests use 2^N search using buddies
68 */
69#define MB_DEFAULT_ORDER2_REQS 2
70
71/*
72 * default group prealloc size 512 blocks
73 */
74#define MB_DEFAULT_GROUP_PREALLOC 512
75
76/*
77 * Number of groups to search linearly before performing group scanning
78 * optimization.
79 */
80#define MB_DEFAULT_LINEAR_LIMIT 4
81
82/*
83 * Minimum number of groups that should be present in the file system to perform
84 * group scanning optimizations.
85 */
86#define MB_DEFAULT_LINEAR_SCAN_THRESHOLD 16
87
88/*
89 * The maximum order upto which CR_BEST_AVAIL_LEN can trim a particular
90 * allocation request. Example, if we have an order 7 request and max trim order
91 * of 3, we can trim this request upto order 4.
92 */
93#define MB_DEFAULT_BEST_AVAIL_TRIM_ORDER 3
94
95/*
96 * Number of valid buddy orders
97 */
98#define MB_NUM_ORDERS(sb) ((sb)->s_blocksize_bits + 2)
99
100struct ext4_free_data {
101 /* this links the free block information from sb_info */
102 struct list_head efd_list;
103
104 /* this links the free block information from group_info */
105 struct rb_node efd_node;
106
107 /* group which free block extent belongs */
108 ext4_group_t efd_group;
109
110 /* free block extent */
111 ext4_grpblk_t efd_start_cluster;
112 ext4_grpblk_t efd_count;
113
114 /* transaction which freed this extent */
115 tid_t efd_tid;
116};
117
118struct ext4_prealloc_space {
119 union {
120 struct rb_node inode_node; /* for inode PA rbtree */
121 struct list_head lg_list; /* for lg PAs */
122 } pa_node;
123 struct list_head pa_group_list;
124 union {
125 struct list_head pa_tmp_list;
126 struct rcu_head pa_rcu;
127 } u;
128 spinlock_t pa_lock;
129 atomic_t pa_count;
130 unsigned pa_deleted;
131 ext4_fsblk_t pa_pstart; /* phys. block */
132 ext4_lblk_t pa_lstart; /* log. block */
133 ext4_grpblk_t pa_len; /* len of preallocated chunk */
134 ext4_grpblk_t pa_free; /* how many blocks are free */
135 unsigned short pa_type; /* pa type. inode or group */
136 union {
137 rwlock_t *inode_lock; /* locks the rbtree holding this PA */
138 spinlock_t *lg_lock; /* locks the lg list holding this PA */
139 } pa_node_lock;
140 struct inode *pa_inode; /* used to get the inode during group discard */
141};
142
143enum {
144 MB_INODE_PA = 0,
145 MB_GROUP_PA = 1
146};
147
148struct ext4_free_extent {
149 ext4_lblk_t fe_logical;
150 ext4_grpblk_t fe_start; /* In cluster units */
151 ext4_group_t fe_group;
152 ext4_grpblk_t fe_len; /* In cluster units */
153};
154
155/*
156 * Locality group:
157 * we try to group all related changes together
158 * so that writeback can flush/allocate them together as well
159 * Size of lg_prealloc_list hash is determined by MB_DEFAULT_GROUP_PREALLOC
160 * (512). We store prealloc space into the hash based on the pa_free blocks
161 * order value.ie, fls(pa_free)-1;
162 */
163#define PREALLOC_TB_SIZE 10
164struct ext4_locality_group {
165 /* for allocator */
166 /* to serialize allocates */
167 struct mutex lg_mutex;
168 /* list of preallocations */
169 struct list_head lg_prealloc_list[PREALLOC_TB_SIZE];
170 spinlock_t lg_prealloc_lock;
171};
172
173struct ext4_allocation_context {
174 struct inode *ac_inode;
175 struct super_block *ac_sb;
176
177 /* original request */
178 struct ext4_free_extent ac_o_ex;
179
180 /* goal request (normalized ac_o_ex) */
181 struct ext4_free_extent ac_g_ex;
182
183 /* the best found extent */
184 struct ext4_free_extent ac_b_ex;
185
186 /* copy of the best found extent taken before preallocation efforts */
187 struct ext4_free_extent ac_f_ex;
188
189 /*
190 * goal len can change in CR1.5, so save the original len. This is
191 * used while adjusting the PA window and for accounting.
192 */
193 ext4_grpblk_t ac_orig_goal_len;
194
195 __u32 ac_flags; /* allocation hints */
196 __u16 ac_groups_scanned;
197 __u16 ac_groups_linear_remaining;
198 __u16 ac_found;
199 __u16 ac_cX_found[EXT4_MB_NUM_CRS];
200 __u16 ac_tail;
201 __u16 ac_buddy;
202 __u8 ac_status;
203 __u8 ac_criteria;
204 __u8 ac_2order; /* if request is to allocate 2^N blocks and
205 * N > 0, the field stores N, otherwise 0 */
206 __u8 ac_op; /* operation, for history only */
207 struct page *ac_bitmap_page;
208 struct page *ac_buddy_page;
209 struct ext4_prealloc_space *ac_pa;
210 struct ext4_locality_group *ac_lg;
211};
212
213#define AC_STATUS_CONTINUE 1
214#define AC_STATUS_FOUND 2
215#define AC_STATUS_BREAK 3
216
217struct ext4_buddy {
218 struct page *bd_buddy_page;
219 void *bd_buddy;
220 struct page *bd_bitmap_page;
221 void *bd_bitmap;
222 struct ext4_group_info *bd_info;
223 struct super_block *bd_sb;
224 __u16 bd_blkbits;
225 ext4_group_t bd_group;
226};
227
228static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
229 struct ext4_free_extent *fex)
230{
231 return ext4_group_first_block_no(sb, fex->fe_group) +
232 (fex->fe_start << EXT4_SB(sb)->s_cluster_bits);
233}
234
235static inline loff_t extent_logical_end(struct ext4_sb_info *sbi,
236 struct ext4_free_extent *fex)
237{
238 /* Use loff_t to avoid end exceeding ext4_lblk_t max. */
239 return (loff_t)fex->fe_logical + EXT4_C2B(sbi, fex->fe_len);
240}
241
242static inline loff_t pa_logical_end(struct ext4_sb_info *sbi,
243 struct ext4_prealloc_space *pa)
244{
245 /* Use loff_t to avoid end exceeding ext4_lblk_t max. */
246 return (loff_t)pa->pa_lstart + EXT4_C2B(sbi, pa->pa_len);
247}
248
249typedef int (*ext4_mballoc_query_range_fn)(
250 struct super_block *sb,
251 ext4_group_t agno,
252 ext4_grpblk_t start,
253 ext4_grpblk_t len,
254 void *priv);
255
256int
257ext4_mballoc_query_range(
258 struct super_block *sb,
259 ext4_group_t agno,
260 ext4_grpblk_t start,
261 ext4_grpblk_t end,
262 ext4_mballoc_query_range_fn formatter,
263 void *priv);
264
265#endif
1/*
2 * fs/ext4/mballoc.h
3 *
4 * Written by: Alex Tomas <alex@clusterfs.com>
5 *
6 */
7#ifndef _EXT4_MBALLOC_H
8#define _EXT4_MBALLOC_H
9
10#include <linux/time.h>
11#include <linux/fs.h>
12#include <linux/namei.h>
13#include <linux/quotaops.h>
14#include <linux/buffer_head.h>
15#include <linux/module.h>
16#include <linux/swap.h>
17#include <linux/proc_fs.h>
18#include <linux/pagemap.h>
19#include <linux/seq_file.h>
20#include <linux/blkdev.h>
21#include <linux/mutex.h>
22#include "ext4_jbd2.h"
23#include "ext4.h"
24
25/*
26 * with AGGRESSIVE_CHECK allocator runs consistency checks over
27 * structures. these checks slow things down a lot
28 */
29#define AGGRESSIVE_CHECK__
30
31/*
32 * with DOUBLE_CHECK defined mballoc creates persistent in-core
33 * bitmaps, maintains and uses them to check for double allocations
34 */
35#define DOUBLE_CHECK__
36
37/*
38 */
39#ifdef CONFIG_EXT4_DEBUG
40extern ushort ext4_mballoc_debug;
41
42#define mb_debug(n, fmt, a...) \
43 do { \
44 if ((n) <= ext4_mballoc_debug) { \
45 printk(KERN_DEBUG "(%s, %d): %s: ", \
46 __FILE__, __LINE__, __func__); \
47 printk(fmt, ## a); \
48 } \
49 } while (0)
50#else
51#define mb_debug(n, fmt, a...) no_printk(fmt, ## a)
52#endif
53
54#define EXT4_MB_HISTORY_ALLOC 1 /* allocation */
55#define EXT4_MB_HISTORY_PREALLOC 2 /* preallocated blocks used */
56
57/*
58 * How long mballoc can look for a best extent (in found extents)
59 */
60#define MB_DEFAULT_MAX_TO_SCAN 200
61
62/*
63 * How long mballoc must look for a best extent
64 */
65#define MB_DEFAULT_MIN_TO_SCAN 10
66
67/*
68 * with 'ext4_mb_stats' allocator will collect stats that will be
69 * shown at umount. The collecting costs though!
70 */
71#define MB_DEFAULT_STATS 0
72
73/*
74 * files smaller than MB_DEFAULT_STREAM_THRESHOLD are served
75 * by the stream allocator, which purpose is to pack requests
76 * as close each to other as possible to produce smooth I/O traffic
77 * We use locality group prealloc space for stream request.
78 * We can tune the same via /proc/fs/ext4/<parition>/stream_req
79 */
80#define MB_DEFAULT_STREAM_THRESHOLD 16 /* 64K */
81
82/*
83 * for which requests use 2^N search using buddies
84 */
85#define MB_DEFAULT_ORDER2_REQS 2
86
87/*
88 * default group prealloc size 512 blocks
89 */
90#define MB_DEFAULT_GROUP_PREALLOC 512
91
92
93struct ext4_free_data {
94 /* MUST be the first member */
95 struct ext4_journal_cb_entry efd_jce;
96
97 /* ext4_free_data private data starts from here */
98
99 /* this links the free block information from group_info */
100 struct rb_node efd_node;
101
102 /* group which free block extent belongs */
103 ext4_group_t efd_group;
104
105 /* free block extent */
106 ext4_grpblk_t efd_start_cluster;
107 ext4_grpblk_t efd_count;
108
109 /* transaction which freed this extent */
110 tid_t efd_tid;
111};
112
113struct ext4_prealloc_space {
114 struct list_head pa_inode_list;
115 struct list_head pa_group_list;
116 union {
117 struct list_head pa_tmp_list;
118 struct rcu_head pa_rcu;
119 } u;
120 spinlock_t pa_lock;
121 atomic_t pa_count;
122 unsigned pa_deleted;
123 ext4_fsblk_t pa_pstart; /* phys. block */
124 ext4_lblk_t pa_lstart; /* log. block */
125 ext4_grpblk_t pa_len; /* len of preallocated chunk */
126 ext4_grpblk_t pa_free; /* how many blocks are free */
127 unsigned short pa_type; /* pa type. inode or group */
128 spinlock_t *pa_obj_lock;
129 struct inode *pa_inode; /* hack, for history only */
130};
131
132enum {
133 MB_INODE_PA = 0,
134 MB_GROUP_PA = 1
135};
136
137struct ext4_free_extent {
138 ext4_lblk_t fe_logical;
139 ext4_grpblk_t fe_start; /* In cluster units */
140 ext4_group_t fe_group;
141 ext4_grpblk_t fe_len; /* In cluster units */
142};
143
144/*
145 * Locality group:
146 * we try to group all related changes together
147 * so that writeback can flush/allocate them together as well
148 * Size of lg_prealloc_list hash is determined by MB_DEFAULT_GROUP_PREALLOC
149 * (512). We store prealloc space into the hash based on the pa_free blocks
150 * order value.ie, fls(pa_free)-1;
151 */
152#define PREALLOC_TB_SIZE 10
153struct ext4_locality_group {
154 /* for allocator */
155 /* to serialize allocates */
156 struct mutex lg_mutex;
157 /* list of preallocations */
158 struct list_head lg_prealloc_list[PREALLOC_TB_SIZE];
159 spinlock_t lg_prealloc_lock;
160};
161
162struct ext4_allocation_context {
163 struct inode *ac_inode;
164 struct super_block *ac_sb;
165
166 /* original request */
167 struct ext4_free_extent ac_o_ex;
168
169 /* goal request (normalized ac_o_ex) */
170 struct ext4_free_extent ac_g_ex;
171
172 /* the best found extent */
173 struct ext4_free_extent ac_b_ex;
174
175 /* copy of the best found extent taken before preallocation efforts */
176 struct ext4_free_extent ac_f_ex;
177
178 __u16 ac_groups_scanned;
179 __u16 ac_found;
180 __u16 ac_tail;
181 __u16 ac_buddy;
182 __u16 ac_flags; /* allocation hints */
183 __u8 ac_status;
184 __u8 ac_criteria;
185 __u8 ac_2order; /* if request is to allocate 2^N blocks and
186 * N > 0, the field stores N, otherwise 0 */
187 __u8 ac_op; /* operation, for history only */
188 struct page *ac_bitmap_page;
189 struct page *ac_buddy_page;
190 struct ext4_prealloc_space *ac_pa;
191 struct ext4_locality_group *ac_lg;
192};
193
194#define AC_STATUS_CONTINUE 1
195#define AC_STATUS_FOUND 2
196#define AC_STATUS_BREAK 3
197
198struct ext4_buddy {
199 struct page *bd_buddy_page;
200 void *bd_buddy;
201 struct page *bd_bitmap_page;
202 void *bd_bitmap;
203 struct ext4_group_info *bd_info;
204 struct super_block *bd_sb;
205 __u16 bd_blkbits;
206 ext4_group_t bd_group;
207};
208
209static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
210 struct ext4_free_extent *fex)
211{
212 return ext4_group_first_block_no(sb, fex->fe_group) +
213 (fex->fe_start << EXT4_SB(sb)->s_cluster_bits);
214}
215#endif