Linux Audio

Check our new training course

Loading...
v6.2
  1/*
  2 * Copyright (C) 2012 Red Hat. All rights reserved.
  3 *
  4 * This file is released under the GPL.
  5 */
  6
  7#ifndef DM_CACHE_POLICY_H
  8#define DM_CACHE_POLICY_H
  9
 10#include "dm-cache-block-types.h"
 11
 12#include <linux/device-mapper.h>
 13
 14/*----------------------------------------------------------------*/
 15
 
 
 
 
 16/*
 17 * The cache policy makes the important decisions about which blocks get to
 18 * live on the faster cache device.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 19 */
 20enum policy_operation {
 21	POLICY_PROMOTE,
 22	POLICY_DEMOTE,
 23	POLICY_WRITEBACK
 
 
 
 
 
 
 
 
 
 
 
 
 
 24};
 25
 26/*
 27 * This is the instruction passed back to the core target.
 28 */
 29struct policy_work {
 30	enum policy_operation op;
 31	dm_oblock_t oblock;
 32	dm_cblock_t cblock;
 33};
 34
 35/*
 36 * The cache policy object.  It is envisaged that this structure will be
 37 * embedded in a bigger, policy specific structure (ie. use container_of()).
 
 38 */
 39struct dm_cache_policy {
 
 
 
 
 
 
 40	/*
 41	 * Destroys this object.
 42	 */
 43	void (*destroy)(struct dm_cache_policy *p);
 44
 45	/*
 46	 * Find the location of a block.
 47	 *
 48	 * Must not block.
 
 
 
 
 
 
 
 
 49	 *
 50	 * Returns 0 if in cache (cblock will be set), -ENOENT if not, < 0 for
 51	 * other errors (-EWOULDBLOCK would be typical).  data_dir should be
 52	 * READ or WRITE. fast_copy should be set if migrating this block would
 53	 * be 'cheap' somehow (eg, discarded data). background_queued will be set
 54	 * if a migration has just been queued.
 
 
 
 55	 */
 56	int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock,
 57		      int data_dir, bool fast_copy, bool *background_queued);
 
 
 58
 59	/*
 60	 * Sometimes the core target can optimise a migration, eg, the
 61	 * block may be discarded, or the bio may cover an entire block.
 62	 * In order to optimise it needs the migration immediately though
 63	 * so it knows to do something different with the bio.
 64	 *
 65	 * This method is optional (policy-internal will fallback to using
 66	 * lookup).
 67	 */
 68	int (*lookup_with_work)(struct dm_cache_policy *p,
 69				dm_oblock_t oblock, dm_cblock_t *cblock,
 70				int data_dir, bool fast_copy,
 71				struct policy_work **work);
 72
 73	/*
 74	 * Retrieves background work.  Returns -ENODATA when there's no
 75	 * background work.
 76	 */
 77	int (*get_background_work)(struct dm_cache_policy *p, bool idle,
 78			           struct policy_work **result);
 79
 80	/*
 81	 * You must pass in the same work pointer that you were given, not
 82	 * a copy.
 83	 */
 84	void (*complete_background_work)(struct dm_cache_policy *p,
 85					 struct policy_work *work,
 86					 bool success);
 87
 88	void (*set_dirty)(struct dm_cache_policy *p, dm_cblock_t cblock);
 89	void (*clear_dirty)(struct dm_cache_policy *p, dm_cblock_t cblock);
 90
 91	/*
 92	 * Called when a cache target is first created.  Used to load a
 93	 * mapping from the metadata device into the policy.
 94	 */
 95	int (*load_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock,
 96			    dm_cblock_t cblock, bool dirty,
 97			    uint32_t hint, bool hint_valid);
 98
 99	/*
100	 * Drops the mapping, irrespective of whether it's clean or dirty.
101	 * Returns -ENODATA if cblock is not mapped.
 
 
102	 */
103	int (*invalidate_mapping)(struct dm_cache_policy *p, dm_cblock_t cblock);
104
105	/*
106	 * Gets the hint for a given cblock.  Called in a single threaded
107	 * context.  So no locking required.
 
 
 
 
 
 
 
108	 */
109	uint32_t (*get_hint)(struct dm_cache_policy *p, dm_cblock_t cblock);
 
110
111	/*
112	 * How full is the cache?
113	 */
114	dm_cblock_t (*residency)(struct dm_cache_policy *p);
115
116	/*
117	 * Because of where we sit in the block layer, we can be asked to
118	 * map a lot of little bios that are all in the same block (no
119	 * queue merging has occurred).  To stop the policy being fooled by
120	 * these, the core target sends regular tick() calls to the policy.
121	 * The policy should only count an entry as hit once per tick.
122	 *
123	 * This method is optional.
124	 */
125	void (*tick)(struct dm_cache_policy *p, bool can_block);
126
127	/*
128	 * Configuration.
129	 */
130	int (*emit_config_values)(struct dm_cache_policy *p, char *result,
131				  unsigned maxlen, ssize_t *sz_ptr);
132	int (*set_config_value)(struct dm_cache_policy *p,
133				const char *key, const char *value);
134
135	void (*allow_migrations)(struct dm_cache_policy *p, bool allow);
136
137	/*
138	 * Book keeping ptr for the policy register, not for general use.
139	 */
140	void *private;
141};
142
143/*----------------------------------------------------------------*/
144
145/*
146 * We maintain a little register of the different policy types.
147 */
148#define CACHE_POLICY_NAME_SIZE 16
149#define CACHE_POLICY_VERSION_SIZE 3
150
151struct dm_cache_policy_type {
152	/* For use by the register code only. */
153	struct list_head list;
154
155	/*
156	 * Policy writers should fill in these fields.  The name field is
157	 * what gets passed on the target line to select your policy.
158	 */
159	char name[CACHE_POLICY_NAME_SIZE];
160	unsigned version[CACHE_POLICY_VERSION_SIZE];
161
162	/*
163	 * For use by an alias dm_cache_policy_type to point to the
164	 * real dm_cache_policy_type.
165	 */
166	struct dm_cache_policy_type *real;
167
168	/*
169	 * Policies may store a hint for each cache block.
170	 * Currently the size of this hint must be 0 or 4 bytes but we
171	 * expect to relax this in future.
172	 */
173	size_t hint_size;
174
175	struct module *owner;
176	struct dm_cache_policy *(*create)(dm_cblock_t cache_size,
177					  sector_t origin_size,
178					  sector_t block_size);
179};
180
181int dm_cache_policy_register(struct dm_cache_policy_type *type);
182void dm_cache_policy_unregister(struct dm_cache_policy_type *type);
183
184/*----------------------------------------------------------------*/
185
186#endif	/* DM_CACHE_POLICY_H */
v4.10.11
  1/*
  2 * Copyright (C) 2012 Red Hat. All rights reserved.
  3 *
  4 * This file is released under the GPL.
  5 */
  6
  7#ifndef DM_CACHE_POLICY_H
  8#define DM_CACHE_POLICY_H
  9
 10#include "dm-cache-block-types.h"
 11
 12#include <linux/device-mapper.h>
 13
 14/*----------------------------------------------------------------*/
 15
 16/* FIXME: make it clear which methods are optional.  Get debug policy to
 17 * double check this at start.
 18 */
 19
 20/*
 21 * The cache policy makes the important decisions about which blocks get to
 22 * live on the faster cache device.
 23 *
 24 * When the core target has to remap a bio it calls the 'map' method of the
 25 * policy.  This returns an instruction telling the core target what to do.
 26 *
 27 * POLICY_HIT:
 28 *   That block is in the cache.  Remap to the cache and carry on.
 29 *
 30 * POLICY_MISS:
 31 *   This block is on the origin device.  Remap and carry on.
 32 *
 33 * POLICY_NEW:
 34 *   This block is currently on the origin device, but the policy wants to
 35 *   move it.  The core should:
 36 *
 37 *   - hold any further io to this origin block
 38 *   - copy the origin to the given cache block
 39 *   - release all the held blocks
 40 *   - remap the original block to the cache
 41 *
 42 * POLICY_REPLACE:
 43 *   This block is currently on the origin device.  The policy wants to
 44 *   move it to the cache, with the added complication that the destination
 45 *   cache block needs a writeback first.  The core should:
 46 *
 47 *   - hold any further io to this origin block
 48 *   - hold any further io to the origin block that's being written back
 49 *   - writeback
 50 *   - copy new block to cache
 51 *   - release held blocks
 52 *   - remap bio to cache and reissue.
 53 *
 54 * Should the core run into trouble while processing a POLICY_NEW or
 55 * POLICY_REPLACE instruction it will roll back the policies mapping using
 56 * remove_mapping() or force_mapping().  These methods must not fail.  This
 57 * approach avoids having transactional semantics in the policy (ie, the
 58 * core informing the policy when a migration is complete), and hence makes
 59 * it easier to write new policies.
 60 *
 61 * In general policy methods should never block, except in the case of the
 62 * map function when can_migrate is set.  So be careful to implement using
 63 * bounded, preallocated memory.
 64 */
 65enum policy_operation {
 66	POLICY_HIT,
 67	POLICY_MISS,
 68	POLICY_NEW,
 69	POLICY_REPLACE
 70};
 71
 72/*
 73 * When issuing a POLICY_REPLACE the policy needs to make a callback to
 74 * lock the block being demoted.  This doesn't need to occur during a
 75 * writeback operation since the block remains in the cache.
 76 */
 77struct policy_locker;
 78typedef int (*policy_lock_fn)(struct policy_locker *l, dm_oblock_t oblock);
 79
 80struct policy_locker {
 81	policy_lock_fn fn;
 82};
 83
 84/*
 85 * This is the instruction passed back to the core target.
 86 */
 87struct policy_result {
 88	enum policy_operation op;
 89	dm_oblock_t old_oblock;	/* POLICY_REPLACE */
 90	dm_cblock_t cblock;	/* POLICY_HIT, POLICY_NEW, POLICY_REPLACE */
 91};
 92
 93/*
 94 * The cache policy object.  Just a bunch of methods.  It is envisaged that
 95 * this structure will be embedded in a bigger, policy specific structure
 96 * (ie. use container_of()).
 97 */
 98struct dm_cache_policy {
 99
100	/*
101	 * FIXME: make it clear which methods are optional, and which may
102	 * block.
103	 */
104
105	/*
106	 * Destroys this object.
107	 */
108	void (*destroy)(struct dm_cache_policy *p);
109
110	/*
111	 * See large comment above.
112	 *
113	 * oblock      - the origin block we're interested in.
114	 *
115	 * can_block - indicates whether the current thread is allowed to
116	 *             block.  -EWOULDBLOCK returned if it can't and would.
117	 *
118	 * can_migrate - gives permission for POLICY_NEW or POLICY_REPLACE
119	 *               instructions.  If denied and the policy would have
120	 *               returned one of these instructions it should
121	 *               return -EWOULDBLOCK.
122	 *
123	 * discarded_oblock - indicates whether the whole origin block is
124	 *               in a discarded state (FIXME: better to tell the
125	 *               policy about this sooner, so it can recycle that
126	 *               cache block if it wants.)
127	 * bio         - the bio that triggered this call.
128	 * result      - gets filled in with the instruction.
129	 *
130	 * May only return 0, or -EWOULDBLOCK (if !can_migrate)
131	 */
132	int (*map)(struct dm_cache_policy *p, dm_oblock_t oblock,
133		   bool can_block, bool can_migrate, bool discarded_oblock,
134		   struct bio *bio, struct policy_locker *locker,
135		   struct policy_result *result);
136
137	/*
138	 * Sometimes we want to see if a block is in the cache, without
139	 * triggering any update of stats.  (ie. it's not a real hit).
140	 *
141	 * Must not block.
142	 *
143	 * Returns 0 if in cache, -ENOENT if not, < 0 for other errors
144	 * (-EWOULDBLOCK would be typical).
145	 */
146	int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
147
148	void (*set_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
149	void (*clear_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
150
151	/*
152	 * Called when a cache target is first created.  Used to load a
153	 * mapping from the metadata device into the policy.
154	 */
155	int (*load_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock,
156			    dm_cblock_t cblock, uint32_t hint, bool hint_valid);
157
158	/*
159	 * Gets the hint for a given cblock.  Called in a single threaded
160	 * context.  So no locking required.
161	 */
162	uint32_t (*get_hint)(struct dm_cache_policy *p, dm_cblock_t cblock);
 
 
 
 
 
163
164	/*
165	 * Override functions used on the error paths of the core target.
166	 * They must succeed.
167	 */
168	void (*remove_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock);
169	void (*force_mapping)(struct dm_cache_policy *p, dm_oblock_t current_oblock,
170			      dm_oblock_t new_oblock);
171
172	/*
173	 * This is called via the invalidate_cblocks message.  It is
174	 * possible the particular cblock has already been removed due to a
175	 * write io in passthrough mode.  In which case this should return
176	 * -ENODATA.
177	 */
178	int (*remove_cblock)(struct dm_cache_policy *p, dm_cblock_t cblock);
179
180	/*
181	 * Provide a dirty block to be written back by the core target.  If
182	 * critical_only is set then the policy should only provide work if
183	 * it urgently needs it.
184	 *
185	 * Returns:
186	 *
187	 * 0 and @cblock,@oblock: block to write back provided
188	 *
189	 * -ENODATA: no dirty blocks available
190	 */
191	int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock,
192			      bool critical_only);
193
194	/*
195	 * How full is the cache?
196	 */
197	dm_cblock_t (*residency)(struct dm_cache_policy *p);
198
199	/*
200	 * Because of where we sit in the block layer, we can be asked to
201	 * map a lot of little bios that are all in the same block (no
202	 * queue merging has occurred).  To stop the policy being fooled by
203	 * these, the core target sends regular tick() calls to the policy.
204	 * The policy should only count an entry as hit once per tick.
 
 
205	 */
206	void (*tick)(struct dm_cache_policy *p, bool can_block);
207
208	/*
209	 * Configuration.
210	 */
211	int (*emit_config_values)(struct dm_cache_policy *p, char *result,
212				  unsigned maxlen, ssize_t *sz_ptr);
213	int (*set_config_value)(struct dm_cache_policy *p,
214				const char *key, const char *value);
215
 
 
216	/*
217	 * Book keeping ptr for the policy register, not for general use.
218	 */
219	void *private;
220};
221
222/*----------------------------------------------------------------*/
223
224/*
225 * We maintain a little register of the different policy types.
226 */
227#define CACHE_POLICY_NAME_SIZE 16
228#define CACHE_POLICY_VERSION_SIZE 3
229
230struct dm_cache_policy_type {
231	/* For use by the register code only. */
232	struct list_head list;
233
234	/*
235	 * Policy writers should fill in these fields.  The name field is
236	 * what gets passed on the target line to select your policy.
237	 */
238	char name[CACHE_POLICY_NAME_SIZE];
239	unsigned version[CACHE_POLICY_VERSION_SIZE];
240
241	/*
242	 * For use by an alias dm_cache_policy_type to point to the
243	 * real dm_cache_policy_type.
244	 */
245	struct dm_cache_policy_type *real;
246
247	/*
248	 * Policies may store a hint for each each cache block.
249	 * Currently the size of this hint must be 0 or 4 bytes but we
250	 * expect to relax this in future.
251	 */
252	size_t hint_size;
253
254	struct module *owner;
255	struct dm_cache_policy *(*create)(dm_cblock_t cache_size,
256					  sector_t origin_size,
257					  sector_t block_size);
258};
259
260int dm_cache_policy_register(struct dm_cache_policy_type *type);
261void dm_cache_policy_unregister(struct dm_cache_policy_type *type);
262
263/*----------------------------------------------------------------*/
264
265#endif	/* DM_CACHE_POLICY_H */