Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012 Red Hat. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#ifndef DM_CACHE_POLICY_H
9#define DM_CACHE_POLICY_H
10
11#include "dm-cache-block-types.h"
12
13#include <linux/device-mapper.h>
14
15/*----------------------------------------------------------------*/
16
17/*
18 * The cache policy makes the important decisions about which blocks get to
19 * live on the faster cache device.
20 */
21enum policy_operation {
22 POLICY_PROMOTE,
23 POLICY_DEMOTE,
24 POLICY_WRITEBACK
25};
26
27/*
28 * This is the instruction passed back to the core target.
29 */
30struct policy_work {
31 enum policy_operation op;
32 dm_oblock_t oblock;
33 dm_cblock_t cblock;
34};
35
36/*
37 * The cache policy object. It is envisaged that this structure will be
38 * embedded in a bigger, policy specific structure (ie. use container_of()).
39 */
40struct dm_cache_policy {
41 /*
42 * Destroys this object.
43 */
44 void (*destroy)(struct dm_cache_policy *p);
45
46 /*
47 * Find the location of a block.
48 *
49 * Must not block.
50 *
51 * Returns 0 if in cache (cblock will be set), -ENOENT if not, < 0 for
52 * other errors (-EWOULDBLOCK would be typical). data_dir should be
53 * READ or WRITE. fast_copy should be set if migrating this block would
54 * be 'cheap' somehow (eg, discarded data). background_queued will be set
55 * if a migration has just been queued.
56 */
57 int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock,
58 int data_dir, bool fast_copy, bool *background_queued);
59
60 /*
61 * Sometimes the core target can optimise a migration, eg, the
62 * block may be discarded, or the bio may cover an entire block.
63 * In order to optimise it needs the migration immediately though
64 * so it knows to do something different with the bio.
65 *
66 * This method is optional (policy-internal will fallback to using
67 * lookup).
68 */
69 int (*lookup_with_work)(struct dm_cache_policy *p,
70 dm_oblock_t oblock, dm_cblock_t *cblock,
71 int data_dir, bool fast_copy,
72 struct policy_work **work);
73
74 /*
75 * Retrieves background work. Returns -ENODATA when there's no
76 * background work.
77 */
78 int (*get_background_work)(struct dm_cache_policy *p, bool idle,
79 struct policy_work **result);
80
81 /*
82 * You must pass in the same work pointer that you were given, not
83 * a copy.
84 */
85 void (*complete_background_work)(struct dm_cache_policy *p,
86 struct policy_work *work,
87 bool success);
88
89 void (*set_dirty)(struct dm_cache_policy *p, dm_cblock_t cblock);
90 void (*clear_dirty)(struct dm_cache_policy *p, dm_cblock_t cblock);
91
92 /*
93 * Called when a cache target is first created. Used to load a
94 * mapping from the metadata device into the policy.
95 */
96 int (*load_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock,
97 dm_cblock_t cblock, bool dirty,
98 uint32_t hint, bool hint_valid);
99
100 /*
101 * Drops the mapping, irrespective of whether it's clean or dirty.
102 * Returns -ENODATA if cblock is not mapped.
103 */
104 int (*invalidate_mapping)(struct dm_cache_policy *p, dm_cblock_t cblock);
105
106 /*
107 * Gets the hint for a given cblock. Called in a single threaded
108 * context. So no locking required.
109 */
110 uint32_t (*get_hint)(struct dm_cache_policy *p, dm_cblock_t cblock);
111
112 /*
113 * How full is the cache?
114 */
115 dm_cblock_t (*residency)(struct dm_cache_policy *p);
116
117 /*
118 * Because of where we sit in the block layer, we can be asked to
119 * map a lot of little bios that are all in the same block (no
120 * queue merging has occurred). To stop the policy being fooled by
121 * these, the core target sends regular tick() calls to the policy.
122 * The policy should only count an entry as hit once per tick.
123 *
124 * This method is optional.
125 */
126 void (*tick)(struct dm_cache_policy *p, bool can_block);
127
128 /*
129 * Configuration.
130 */
131 int (*emit_config_values)(struct dm_cache_policy *p, char *result,
132 unsigned int maxlen, ssize_t *sz_ptr);
133 int (*set_config_value)(struct dm_cache_policy *p,
134 const char *key, const char *value);
135
136 void (*allow_migrations)(struct dm_cache_policy *p, bool allow);
137
138 /*
139 * Book keeping ptr for the policy register, not for general use.
140 */
141 void *private;
142};
143
144/*----------------------------------------------------------------*/
145
146/*
147 * We maintain a little register of the different policy types.
148 */
149#define CACHE_POLICY_NAME_SIZE 16
150#define CACHE_POLICY_VERSION_SIZE 3
151
152struct dm_cache_policy_type {
153 /* For use by the register code only. */
154 struct list_head list;
155
156 /*
157 * Policy writers should fill in these fields. The name field is
158 * what gets passed on the target line to select your policy.
159 */
160 char name[CACHE_POLICY_NAME_SIZE];
161 unsigned int version[CACHE_POLICY_VERSION_SIZE];
162
163 /*
164 * For use by an alias dm_cache_policy_type to point to the
165 * real dm_cache_policy_type.
166 */
167 struct dm_cache_policy_type *real;
168
169 /*
170 * Policies may store a hint for each cache block.
171 * Currently the size of this hint must be 0 or 4 bytes but we
172 * expect to relax this in future.
173 */
174 size_t hint_size;
175
176 struct module *owner;
177 struct dm_cache_policy *(*create)(dm_cblock_t cache_size,
178 sector_t origin_size,
179 sector_t block_size);
180};
181
182int dm_cache_policy_register(struct dm_cache_policy_type *type);
183void dm_cache_policy_unregister(struct dm_cache_policy_type *type);
184
185/*----------------------------------------------------------------*/
186
187#endif /* DM_CACHE_POLICY_H */
1/*
2 * Copyright (C) 2012 Red Hat. All rights reserved.
3 *
4 * This file is released under the GPL.
5 */
6
7#ifndef DM_CACHE_POLICY_H
8#define DM_CACHE_POLICY_H
9
10#include "dm-cache-block-types.h"
11
12#include <linux/device-mapper.h>
13
14/*----------------------------------------------------------------*/
15
16/* FIXME: make it clear which methods are optional. Get debug policy to
17 * double check this at start.
18 */
19
20/*
21 * The cache policy makes the important decisions about which blocks get to
22 * live on the faster cache device.
23 *
24 * When the core target has to remap a bio it calls the 'map' method of the
25 * policy. This returns an instruction telling the core target what to do.
26 *
27 * POLICY_HIT:
28 * That block is in the cache. Remap to the cache and carry on.
29 *
30 * POLICY_MISS:
31 * This block is on the origin device. Remap and carry on.
32 *
33 * POLICY_NEW:
34 * This block is currently on the origin device, but the policy wants to
35 * move it. The core should:
36 *
37 * - hold any further io to this origin block
38 * - copy the origin to the given cache block
39 * - release all the held blocks
40 * - remap the original block to the cache
41 *
42 * POLICY_REPLACE:
43 * This block is currently on the origin device. The policy wants to
44 * move it to the cache, with the added complication that the destination
45 * cache block needs a writeback first. The core should:
46 *
47 * - hold any further io to this origin block
48 * - hold any further io to the origin block that's being written back
49 * - writeback
50 * - copy new block to cache
51 * - release held blocks
52 * - remap bio to cache and reissue.
53 *
54 * Should the core run into trouble while processing a POLICY_NEW or
55 * POLICY_REPLACE instruction it will roll back the policies mapping using
56 * remove_mapping() or force_mapping(). These methods must not fail. This
57 * approach avoids having transactional semantics in the policy (ie, the
58 * core informing the policy when a migration is complete), and hence makes
59 * it easier to write new policies.
60 *
61 * In general policy methods should never block, except in the case of the
62 * map function when can_migrate is set. So be careful to implement using
63 * bounded, preallocated memory.
64 */
65enum policy_operation {
66 POLICY_HIT,
67 POLICY_MISS,
68 POLICY_NEW,
69 POLICY_REPLACE
70};
71
72/*
73 * This is the instruction passed back to the core target.
74 */
75struct policy_result {
76 enum policy_operation op;
77 dm_oblock_t old_oblock; /* POLICY_REPLACE */
78 dm_cblock_t cblock; /* POLICY_HIT, POLICY_NEW, POLICY_REPLACE */
79};
80
81typedef int (*policy_walk_fn)(void *context, dm_cblock_t cblock,
82 dm_oblock_t oblock, uint32_t hint);
83
84/*
85 * The cache policy object. Just a bunch of methods. It is envisaged that
86 * this structure will be embedded in a bigger, policy specific structure
87 * (ie. use container_of()).
88 */
89struct dm_cache_policy {
90
91 /*
92 * FIXME: make it clear which methods are optional, and which may
93 * block.
94 */
95
96 /*
97 * Destroys this object.
98 */
99 void (*destroy)(struct dm_cache_policy *p);
100
101 /*
102 * See large comment above.
103 *
104 * oblock - the origin block we're interested in.
105 *
106 * can_block - indicates whether the current thread is allowed to
107 * block. -EWOULDBLOCK returned if it can't and would.
108 *
109 * can_migrate - gives permission for POLICY_NEW or POLICY_REPLACE
110 * instructions. If denied and the policy would have
111 * returned one of these instructions it should
112 * return -EWOULDBLOCK.
113 *
114 * discarded_oblock - indicates whether the whole origin block is
115 * in a discarded state (FIXME: better to tell the
116 * policy about this sooner, so it can recycle that
117 * cache block if it wants.)
118 * bio - the bio that triggered this call.
119 * result - gets filled in with the instruction.
120 *
121 * May only return 0, or -EWOULDBLOCK (if !can_migrate)
122 */
123 int (*map)(struct dm_cache_policy *p, dm_oblock_t oblock,
124 bool can_block, bool can_migrate, bool discarded_oblock,
125 struct bio *bio, struct policy_result *result);
126
127 /*
128 * Sometimes we want to see if a block is in the cache, without
129 * triggering any update of stats. (ie. it's not a real hit).
130 *
131 * Must not block.
132 *
133 * Returns 0 if in cache, -ENOENT if not, < 0 for other errors
134 * (-EWOULDBLOCK would be typical).
135 */
136 int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
137
138 void (*set_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
139 void (*clear_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
140
141 /*
142 * Called when a cache target is first created. Used to load a
143 * mapping from the metadata device into the policy.
144 */
145 int (*load_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock,
146 dm_cblock_t cblock, uint32_t hint, bool hint_valid);
147
148 int (*walk_mappings)(struct dm_cache_policy *p, policy_walk_fn fn,
149 void *context);
150
151 /*
152 * Override functions used on the error paths of the core target.
153 * They must succeed.
154 */
155 void (*remove_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock);
156 void (*force_mapping)(struct dm_cache_policy *p, dm_oblock_t current_oblock,
157 dm_oblock_t new_oblock);
158
159 /*
160 * This is called via the invalidate_cblocks message. It is
161 * possible the particular cblock has already been removed due to a
162 * write io in passthrough mode. In which case this should return
163 * -ENODATA.
164 */
165 int (*remove_cblock)(struct dm_cache_policy *p, dm_cblock_t cblock);
166
167 /*
168 * Provide a dirty block to be written back by the core target.
169 *
170 * Returns:
171 *
172 * 0 and @cblock,@oblock: block to write back provided
173 *
174 * -ENODATA: no dirty blocks available
175 */
176 int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock);
177
178 /*
179 * How full is the cache?
180 */
181 dm_cblock_t (*residency)(struct dm_cache_policy *p);
182
183 /*
184 * Because of where we sit in the block layer, we can be asked to
185 * map a lot of little bios that are all in the same block (no
186 * queue merging has occurred). To stop the policy being fooled by
187 * these the core target sends regular tick() calls to the policy.
188 * The policy should only count an entry as hit once per tick.
189 */
190 void (*tick)(struct dm_cache_policy *p);
191
192 /*
193 * Configuration.
194 */
195 int (*emit_config_values)(struct dm_cache_policy *p,
196 char *result, unsigned maxlen);
197 int (*set_config_value)(struct dm_cache_policy *p,
198 const char *key, const char *value);
199
200 /*
201 * Book keeping ptr for the policy register, not for general use.
202 */
203 void *private;
204};
205
206/*----------------------------------------------------------------*/
207
208/*
209 * We maintain a little register of the different policy types.
210 */
211#define CACHE_POLICY_NAME_SIZE 16
212#define CACHE_POLICY_VERSION_SIZE 3
213
214struct dm_cache_policy_type {
215 /* For use by the register code only. */
216 struct list_head list;
217
218 /*
219 * Policy writers should fill in these fields. The name field is
220 * what gets passed on the target line to select your policy.
221 */
222 char name[CACHE_POLICY_NAME_SIZE];
223 unsigned version[CACHE_POLICY_VERSION_SIZE];
224
225 /*
226 * For use by an alias dm_cache_policy_type to point to the
227 * real dm_cache_policy_type.
228 */
229 struct dm_cache_policy_type *real;
230
231 /*
232 * Policies may store a hint for each each cache block.
233 * Currently the size of this hint must be 0 or 4 bytes but we
234 * expect to relax this in future.
235 */
236 size_t hint_size;
237
238 struct module *owner;
239 struct dm_cache_policy *(*create)(dm_cblock_t cache_size,
240 sector_t origin_size,
241 sector_t block_size);
242};
243
244int dm_cache_policy_register(struct dm_cache_policy_type *type);
245void dm_cache_policy_unregister(struct dm_cache_policy_type *type);
246
247/*----------------------------------------------------------------*/
248
249#endif /* DM_CACHE_POLICY_H */