Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Copyright (C) 2012 Red Hat, Inc.
  4 *
  5 * This file is released under the GPL.
  6 */
  7
  8#ifndef DM_CACHE_METADATA_H
  9#define DM_CACHE_METADATA_H
 10
 11#include "dm-cache-block-types.h"
 12#include "dm-cache-policy-internal.h"
 13#include "persistent-data/dm-space-map-metadata.h"
 14
 15/*----------------------------------------------------------------*/
 16
 17#define DM_CACHE_METADATA_BLOCK_SIZE DM_SM_METADATA_BLOCK_SIZE
 18
 19/* FIXME: remove this restriction */
 20/*
 21 * The metadata device is currently limited in size.
 
 
 
 22 */
 23#define DM_CACHE_METADATA_MAX_SECTORS DM_SM_METADATA_MAX_SECTORS
 24
 25/*
 26 * A metadata device larger than 16GB triggers a warning.
 27 */
 28#define DM_CACHE_METADATA_MAX_SECTORS_WARNING (16 * (1024 * 1024 * 1024 >> SECTOR_SHIFT))
 29
 30/*----------------------------------------------------------------*/
 31
 32/*
 33 * Ext[234]-style compat feature flags.
 34 *
 35 * A new feature which old metadata will still be compatible with should
 36 * define a DM_CACHE_FEATURE_COMPAT_* flag (rarely useful).
 37 *
 38 * A new feature that is not compatible with old code should define a
 39 * DM_CACHE_FEATURE_INCOMPAT_* flag and guard the relevant code with
 40 * that flag.
 41 *
 42 * A new feature that is not compatible with old code accessing the
 43 * metadata RDWR should define a DM_CACHE_FEATURE_RO_COMPAT_* flag and
 44 * guard the relevant code with that flag.
 45 *
 46 * As these various flags are defined they should be added to the
 47 * following masks.
 48 */
 49
 50#define DM_CACHE_FEATURE_COMPAT_SUPP	  0UL
 51#define DM_CACHE_FEATURE_COMPAT_RO_SUPP	  0UL
 52#define DM_CACHE_FEATURE_INCOMPAT_SUPP	  0UL
 53
 54struct dm_cache_metadata;
 55
 56/*
 57 * Reopens or creates a new, empty metadata volume.  Returns an ERR_PTR on
 58 * failure.  If reopening then features must match.
 59 */
 60struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
 61						 sector_t data_block_size,
 62						 bool may_format_device,
 63						 size_t policy_hint_size,
 64						 unsigned int metadata_version);
 65
 66void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
 67
 68/*
 69 * The metadata needs to know how many cache blocks there are.  We don't
 70 * care about the origin, assuming the core target is giving us valid
 71 * origin blocks to map to.
 72 */
 73int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
 
 74
 75int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
 76				   sector_t discard_block_size,
 77				   dm_dblock_t new_nr_entries);
 78
 79typedef int (*load_discard_fn)(void *context, sector_t discard_block_size,
 80			       dm_dblock_t dblock, bool discarded);
 81int dm_cache_load_discards(struct dm_cache_metadata *cmd,
 82			   load_discard_fn fn, void *context);
 83
 84int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_dblock_t dblock, bool discard);
 85
 86int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock);
 87int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock);
 88int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd);
 89
 90typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock,
 91			       dm_cblock_t cblock, bool dirty,
 92			       uint32_t hint, bool hint_valid);
 93int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
 94			   struct dm_cache_policy *policy,
 95			   load_mapping_fn fn,
 96			   void *context);
 97
 98int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
 99			    unsigned int nr_bits, unsigned long *bits);
100
101struct dm_cache_statistics {
102	uint32_t read_hits;
103	uint32_t read_misses;
104	uint32_t write_hits;
105	uint32_t write_misses;
106};
107
108void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
109				 struct dm_cache_statistics *stats);
110
111/*
112 * 'void' because it's no big deal if it fails.
113 */
114void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
115				 struct dm_cache_statistics *stats);
116
117int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown);
118
119int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
120					   dm_block_t *result);
121
122int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
123				   dm_block_t *result);
124
 
 
125/*
126 * The policy is invited to save a 32bit hint value for every cblock (eg,
127 * for a hit count).  These are stored against the policy name.  If
128 * policies are changed, then hints will be lost.  If the machine crashes,
129 * hints will be lost.
130 *
131 * The hints are indexed by the cblock, but many policies will not
132 * necessarily have a fast way of accessing efficiently via cblock.  So
133 * rather than querying the policy for each cblock, we let it walk its data
134 * structures and fill in the hints in whatever order it wishes.
135 */
136int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p);
137
138/*
139 * Query method.  Are all the blocks in the cache clean?
140 */
141int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
142
143int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result);
144int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd);
145void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd);
146void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd);
147int dm_cache_metadata_abort(struct dm_cache_metadata *cmd);
148
149/*----------------------------------------------------------------*/
150
151#endif /* DM_CACHE_METADATA_H */
v3.15
 
  1/*
  2 * Copyright (C) 2012 Red Hat, Inc.
  3 *
  4 * This file is released under the GPL.
  5 */
  6
  7#ifndef DM_CACHE_METADATA_H
  8#define DM_CACHE_METADATA_H
  9
 10#include "dm-cache-block-types.h"
 11#include "dm-cache-policy-internal.h"
 
 12
 13/*----------------------------------------------------------------*/
 14
 15#define DM_CACHE_METADATA_BLOCK_SIZE 4096
 16
 17/* FIXME: remove this restriction */
 18/*
 19 * The metadata device is currently limited in size.
 20 *
 21 * We have one block of index, which can hold 255 index entries.  Each
 22 * index entry contains allocation info about 16k metadata blocks.
 23 */
 24#define DM_CACHE_METADATA_MAX_SECTORS (255 * (1 << 14) * (DM_CACHE_METADATA_BLOCK_SIZE / (1 << SECTOR_SHIFT)))
 25
 26/*
 27 * A metadata device larger than 16GB triggers a warning.
 28 */
 29#define DM_CACHE_METADATA_MAX_SECTORS_WARNING (16 * (1024 * 1024 * 1024 >> SECTOR_SHIFT))
 30
 31/*----------------------------------------------------------------*/
 32
 33/*
 34 * Ext[234]-style compat feature flags.
 35 *
 36 * A new feature which old metadata will still be compatible with should
 37 * define a DM_CACHE_FEATURE_COMPAT_* flag (rarely useful).
 38 *
 39 * A new feature that is not compatible with old code should define a
 40 * DM_CACHE_FEATURE_INCOMPAT_* flag and guard the relevant code with
 41 * that flag.
 42 *
 43 * A new feature that is not compatible with old code accessing the
 44 * metadata RDWR should define a DM_CACHE_FEATURE_RO_COMPAT_* flag and
 45 * guard the relevant code with that flag.
 46 *
 47 * As these various flags are defined they should be added to the
 48 * following masks.
 49 */
 
 50#define DM_CACHE_FEATURE_COMPAT_SUPP	  0UL
 51#define DM_CACHE_FEATURE_COMPAT_RO_SUPP	  0UL
 52#define DM_CACHE_FEATURE_INCOMPAT_SUPP	  0UL
 53
 
 
 54/*
 55 * Reopens or creates a new, empty metadata volume.
 56 * Returns an ERR_PTR on failure.
 57 */
 58struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
 59						 sector_t data_block_size,
 60						 bool may_format_device,
 61						 size_t policy_hint_size);
 
 62
 63void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
 64
 65/*
 66 * The metadata needs to know how many cache blocks there are.  We don't
 67 * care about the origin, assuming the core target is giving us valid
 68 * origin blocks to map to.
 69 */
 70int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
 71dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd);
 72
 73int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
 74				   sector_t discard_block_size,
 75				   dm_oblock_t new_nr_entries);
 76
 77typedef int (*load_discard_fn)(void *context, sector_t discard_block_size,
 78			       dm_oblock_t dblock, bool discarded);
 79int dm_cache_load_discards(struct dm_cache_metadata *cmd,
 80			   load_discard_fn fn, void *context);
 81
 82int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_oblock_t dblock, bool discard);
 83
 84int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock);
 85int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock);
 86int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd);
 87
 88typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock,
 89			       dm_cblock_t cblock, bool dirty,
 90			       uint32_t hint, bool hint_valid);
 91int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
 92			   struct dm_cache_policy *policy,
 93			   load_mapping_fn fn,
 94			   void *context);
 95
 96int dm_cache_set_dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty);
 
 97
 98struct dm_cache_statistics {
 99	uint32_t read_hits;
100	uint32_t read_misses;
101	uint32_t write_hits;
102	uint32_t write_misses;
103};
104
105void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
106				 struct dm_cache_statistics *stats);
 
 
 
 
107void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
108				 struct dm_cache_statistics *stats);
109
110int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown);
111
112int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
113					   dm_block_t *result);
114
115int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
116				   dm_block_t *result);
117
118void dm_cache_dump(struct dm_cache_metadata *cmd);
119
120/*
121 * The policy is invited to save a 32bit hint value for every cblock (eg,
122 * for a hit count).  These are stored against the policy name.  If
123 * policies are changed, then hints will be lost.  If the machine crashes,
124 * hints will be lost.
125 *
126 * The hints are indexed by the cblock, but many policies will not
127 * neccessarily have a fast way of accessing efficiently via cblock.  So
128 * rather than querying the policy for each cblock, we let it walk its data
129 * structures and fill in the hints in whatever order it wishes.
130 */
131int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p);
132
133/*
134 * Query method.  Are all the blocks in the cache clean?
135 */
136int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
 
 
 
 
 
 
137
138/*----------------------------------------------------------------*/
139
140#endif /* DM_CACHE_METADATA_H */