Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Copyright (C) 2011-2017 Red Hat, Inc.
  4 *
  5 * This file is released under the GPL.
  6 */
  7
  8#ifndef DM_BIO_PRISON_H
  9#define DM_BIO_PRISON_H
 10
 11#include "persistent-data/dm-block-manager.h" /* FIXME: for dm_block_t */
 12#include "dm-thin-metadata.h" /* FIXME: for dm_thin_id */
 13
 14#include <linux/bio.h>
 15#include <linux/rbtree.h>
 16
 17/*----------------------------------------------------------------*/
 18
 19/*
 20 * Sometimes we can't deal with a bio straight away.  We put them in prison
 21 * where they can't cause any mischief.  Bios are put in a cell identified
 22 * by a key, multiple bios can be in the same cell.  When the cell is
 23 * subsequently unlocked the bios become available.
 24 */
 25struct dm_bio_prison;
 26
 27/*
 28 * Keys define a range of blocks within either a virtual or physical
 29 * device.
 30 */
 31struct dm_cell_key {
 32	int virtual;
 33	dm_thin_id dev;
 34	dm_block_t block_begin, block_end;
 35};
 36
 37/*
 38 * The range of a key (block_end - block_begin) must not
 39 * exceed BIO_PRISON_MAX_RANGE.  Also the range must not
 40 * cross a similarly sized boundary.
 41 *
 42 * Must be a power of 2.
 43 */
 44#define BIO_PRISON_MAX_RANGE 1024
 45#define BIO_PRISON_MAX_RANGE_SHIFT 10
 46
 47/*
 48 * Treat this as opaque, only in header so callers can manage allocation
 49 * themselves.
 50 */
 51struct dm_bio_prison_cell {
 52	struct list_head user_list;	/* for client use */
 53	struct rb_node node;
 54
 55	struct dm_cell_key key;
 56	struct bio *holder;
 57	struct bio_list bios;
 58};
 59
 60struct dm_bio_prison *dm_bio_prison_create(void);
 61void dm_bio_prison_destroy(struct dm_bio_prison *prison);
 62
 63/*
 64 * These two functions just wrap a mempool.  This is a transitory step:
 65 * Eventually all bio prison clients should manage their own cell memory.
 66 *
 67 * Like mempool_alloc(), dm_bio_prison_alloc_cell() can only fail if called
 68 * in interrupt context or passed GFP_NOWAIT.
 69 */
 70struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison,
 71						    gfp_t gfp);
 72void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
 73			     struct dm_bio_prison_cell *cell);
 74
 75/*
 76 * Creates, or retrieves a cell that overlaps the given key.
 77 *
 78 * Returns 1 if pre-existing cell returned, zero if new cell created using
 79 * @cell_prealloc.
 80 */
 81int dm_get_cell(struct dm_bio_prison *prison,
 82		struct dm_cell_key *key,
 83		struct dm_bio_prison_cell *cell_prealloc,
 84		struct dm_bio_prison_cell **cell_result);
 85
 86/*
 87 * Returns false if key is beyond BIO_PRISON_MAX_RANGE or spans a boundary.
 88 */
 89bool dm_cell_key_has_valid_range(struct dm_cell_key *key);
 90
 91/*
 92 * An atomic op that combines retrieving or creating a cell, and adding a
 93 * bio to it.
 94 *
 95 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
 96 */
 97int dm_bio_detain(struct dm_bio_prison *prison,
 98		  struct dm_cell_key *key,
 99		  struct bio *inmate,
100		  struct dm_bio_prison_cell *cell_prealloc,
101		  struct dm_bio_prison_cell **cell_result);
102
103void dm_cell_release(struct dm_bio_prison *prison,
104		     struct dm_bio_prison_cell *cell,
105		     struct bio_list *bios);
106void dm_cell_release_no_holder(struct dm_bio_prison *prison,
107			       struct dm_bio_prison_cell *cell,
108			       struct bio_list *inmates);
109void dm_cell_error(struct dm_bio_prison *prison,
110		   struct dm_bio_prison_cell *cell, blk_status_t error);
111
112/*
113 * Visits the cell and then releases.  Guarantees no new inmates are
114 * inserted between the visit and release.
115 */
116void dm_cell_visit_release(struct dm_bio_prison *prison,
117			   void (*visit_fn)(void *, struct dm_bio_prison_cell *),
118			   void *context, struct dm_bio_prison_cell *cell);
119
120/*
121 * Rather than always releasing the prisoners in a cell, the client may
122 * want to promote one of them to be the new holder.  There is a race here
123 * though between releasing an empty cell, and other threads adding new
124 * inmates.  So this function makes the decision with its lock held.
125 *
126 * This function can have two outcomes:
127 * i) An inmate is promoted to be the holder of the cell (return value of 0).
128 * ii) The cell has no inmate for promotion and is released (return value of 1).
129 */
130int dm_cell_promote_or_release(struct dm_bio_prison *prison,
131			       struct dm_bio_prison_cell *cell);
132
133/*----------------------------------------------------------------*/
134
135/*
136 * We use the deferred set to keep track of pending reads to shared blocks.
137 * We do this to ensure the new mapping caused by a write isn't performed
138 * until these prior reads have completed.  Otherwise the insertion of the
139 * new mapping could free the old block that the read bios are mapped to.
140 */
141
142struct dm_deferred_set;
143struct dm_deferred_entry;
144
145struct dm_deferred_set *dm_deferred_set_create(void);
146void dm_deferred_set_destroy(struct dm_deferred_set *ds);
147
148struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds);
149void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head);
150int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work);
151
152/*----------------------------------------------------------------*/
153
154#endif
v5.4
 
  1/*
  2 * Copyright (C) 2011-2017 Red Hat, Inc.
  3 *
  4 * This file is released under the GPL.
  5 */
  6
  7#ifndef DM_BIO_PRISON_H
  8#define DM_BIO_PRISON_H
  9
 10#include "persistent-data/dm-block-manager.h" /* FIXME: for dm_block_t */
 11#include "dm-thin-metadata.h" /* FIXME: for dm_thin_id */
 12
 13#include <linux/bio.h>
 14#include <linux/rbtree.h>
 15
 16/*----------------------------------------------------------------*/
 17
 18/*
 19 * Sometimes we can't deal with a bio straight away.  We put them in prison
 20 * where they can't cause any mischief.  Bios are put in a cell identified
 21 * by a key, multiple bios can be in the same cell.  When the cell is
 22 * subsequently unlocked the bios become available.
 23 */
 24struct dm_bio_prison;
 25
 26/*
 27 * Keys define a range of blocks within either a virtual or physical
 28 * device.
 29 */
 30struct dm_cell_key {
 31	int virtual;
 32	dm_thin_id dev;
 33	dm_block_t block_begin, block_end;
 34};
 35
 36/*
 
 
 
 
 
 
 
 
 
 
 37 * Treat this as opaque, only in header so callers can manage allocation
 38 * themselves.
 39 */
 40struct dm_bio_prison_cell {
 41	struct list_head user_list;	/* for client use */
 42	struct rb_node node;
 43
 44	struct dm_cell_key key;
 45	struct bio *holder;
 46	struct bio_list bios;
 47};
 48
 49struct dm_bio_prison *dm_bio_prison_create(void);
 50void dm_bio_prison_destroy(struct dm_bio_prison *prison);
 51
 52/*
 53 * These two functions just wrap a mempool.  This is a transitory step:
 54 * Eventually all bio prison clients should manage their own cell memory.
 55 *
 56 * Like mempool_alloc(), dm_bio_prison_alloc_cell() can only fail if called
 57 * in interrupt context or passed GFP_NOWAIT.
 58 */
 59struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison,
 60						    gfp_t gfp);
 61void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
 62			     struct dm_bio_prison_cell *cell);
 63
 64/*
 65 * Creates, or retrieves a cell that overlaps the given key.
 66 *
 67 * Returns 1 if pre-existing cell returned, zero if new cell created using
 68 * @cell_prealloc.
 69 */
 70int dm_get_cell(struct dm_bio_prison *prison,
 71		struct dm_cell_key *key,
 72		struct dm_bio_prison_cell *cell_prealloc,
 73		struct dm_bio_prison_cell **cell_result);
 
 
 
 
 
 74
 75/*
 76 * An atomic op that combines retrieving or creating a cell, and adding a
 77 * bio to it.
 78 *
 79 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
 80 */
 81int dm_bio_detain(struct dm_bio_prison *prison,
 82		  struct dm_cell_key *key,
 83		  struct bio *inmate,
 84		  struct dm_bio_prison_cell *cell_prealloc,
 85		  struct dm_bio_prison_cell **cell_result);
 86
 87void dm_cell_release(struct dm_bio_prison *prison,
 88		     struct dm_bio_prison_cell *cell,
 89		     struct bio_list *bios);
 90void dm_cell_release_no_holder(struct dm_bio_prison *prison,
 91			       struct dm_bio_prison_cell *cell,
 92			       struct bio_list *inmates);
 93void dm_cell_error(struct dm_bio_prison *prison,
 94		   struct dm_bio_prison_cell *cell, blk_status_t error);
 95
 96/*
 97 * Visits the cell and then releases.  Guarantees no new inmates are
 98 * inserted between the visit and release.
 99 */
100void dm_cell_visit_release(struct dm_bio_prison *prison,
101			   void (*visit_fn)(void *, struct dm_bio_prison_cell *),
102			   void *context, struct dm_bio_prison_cell *cell);
103
104/*
105 * Rather than always releasing the prisoners in a cell, the client may
106 * want to promote one of them to be the new holder.  There is a race here
107 * though between releasing an empty cell, and other threads adding new
108 * inmates.  So this function makes the decision with its lock held.
109 *
110 * This function can have two outcomes:
111 * i) An inmate is promoted to be the holder of the cell (return value of 0).
112 * ii) The cell has no inmate for promotion and is released (return value of 1).
113 */
114int dm_cell_promote_or_release(struct dm_bio_prison *prison,
115			       struct dm_bio_prison_cell *cell);
116
117/*----------------------------------------------------------------*/
118
119/*
120 * We use the deferred set to keep track of pending reads to shared blocks.
121 * We do this to ensure the new mapping caused by a write isn't performed
122 * until these prior reads have completed.  Otherwise the insertion of the
123 * new mapping could free the old block that the read bios are mapped to.
124 */
125
126struct dm_deferred_set;
127struct dm_deferred_entry;
128
129struct dm_deferred_set *dm_deferred_set_create(void);
130void dm_deferred_set_destroy(struct dm_deferred_set *ds);
131
132struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds);
133void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head);
134int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work);
135
136/*----------------------------------------------------------------*/
137
138#endif