Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2012-2017 Red Hat, Inc.
  4 *
  5 * This file is released under the GPL.
  6 */
  7
  8#include "dm.h"
  9#include "dm-bio-prison-v2.h"
 10
 11#include <linux/spinlock.h>
 12#include <linux/mempool.h>
 13#include <linux/module.h>
 14#include <linux/slab.h>
 15#include <linux/rwsem.h>
 16
 17/*----------------------------------------------------------------*/
 18
 19#define MIN_CELLS 1024
 20
 21struct dm_bio_prison_v2 {
 22	struct workqueue_struct *wq;
 23
 24	spinlock_t lock;
 25	struct rb_root cells;
 26	mempool_t cell_pool;
 27};
 28
 29static struct kmem_cache *_cell_cache;
 30
 31/*----------------------------------------------------------------*/
 32
 33/*
 34 * @nr_cells should be the number of cells you want in use _concurrently_.
 35 * Don't confuse it with the number of distinct keys.
 36 */
 37struct dm_bio_prison_v2 *dm_bio_prison_create_v2(struct workqueue_struct *wq)
 38{
 39	struct dm_bio_prison_v2 *prison = kzalloc(sizeof(*prison), GFP_KERNEL);
 40	int ret;
 41
 42	if (!prison)
 43		return NULL;
 44
 45	prison->wq = wq;
 46	spin_lock_init(&prison->lock);
 47
 48	ret = mempool_init_slab_pool(&prison->cell_pool, MIN_CELLS, _cell_cache);
 49	if (ret) {
 50		kfree(prison);
 51		return NULL;
 52	}
 53
 54	prison->cells = RB_ROOT;
 55
 56	return prison;
 57}
 58EXPORT_SYMBOL_GPL(dm_bio_prison_create_v2);
 59
 60void dm_bio_prison_destroy_v2(struct dm_bio_prison_v2 *prison)
 61{
 62	mempool_exit(&prison->cell_pool);
 63	kfree(prison);
 64}
 65EXPORT_SYMBOL_GPL(dm_bio_prison_destroy_v2);
 66
 67struct dm_bio_prison_cell_v2 *dm_bio_prison_alloc_cell_v2(struct dm_bio_prison_v2 *prison, gfp_t gfp)
 68{
 69	return mempool_alloc(&prison->cell_pool, gfp);
 70}
 71EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell_v2);
 72
 73void dm_bio_prison_free_cell_v2(struct dm_bio_prison_v2 *prison,
 74				struct dm_bio_prison_cell_v2 *cell)
 75{
 76	mempool_free(cell, &prison->cell_pool);
 77}
 78EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell_v2);
 79
 80static void __setup_new_cell(struct dm_cell_key_v2 *key,
 81			     struct dm_bio_prison_cell_v2 *cell)
 82{
 83	memset(cell, 0, sizeof(*cell));
 84	memcpy(&cell->key, key, sizeof(cell->key));
 85	bio_list_init(&cell->bios);
 86}
 87
 88static int cmp_keys(struct dm_cell_key_v2 *lhs,
 89		    struct dm_cell_key_v2 *rhs)
 90{
 91	if (lhs->virtual < rhs->virtual)
 92		return -1;
 93
 94	if (lhs->virtual > rhs->virtual)
 95		return 1;
 96
 97	if (lhs->dev < rhs->dev)
 98		return -1;
 99
100	if (lhs->dev > rhs->dev)
101		return 1;
102
103	if (lhs->block_end <= rhs->block_begin)
104		return -1;
105
106	if (lhs->block_begin >= rhs->block_end)
107		return 1;
108
109	return 0;
110}
111
112/*
113 * Returns true if node found, otherwise it inserts a new one.
114 */
115static bool __find_or_insert(struct dm_bio_prison_v2 *prison,
116			     struct dm_cell_key_v2 *key,
117			     struct dm_bio_prison_cell_v2 *cell_prealloc,
118			     struct dm_bio_prison_cell_v2 **result)
119{
120	int r;
121	struct rb_node **new = &prison->cells.rb_node, *parent = NULL;
122
123	while (*new) {
124		struct dm_bio_prison_cell_v2 *cell =
125			rb_entry(*new, struct dm_bio_prison_cell_v2, node);
126
127		r = cmp_keys(key, &cell->key);
128
129		parent = *new;
130		if (r < 0)
131			new = &((*new)->rb_left);
132
133		else if (r > 0)
134			new = &((*new)->rb_right);
135
136		else {
137			*result = cell;
138			return true;
139		}
140	}
141
142	__setup_new_cell(key, cell_prealloc);
143	*result = cell_prealloc;
144	rb_link_node(&cell_prealloc->node, parent, new);
145	rb_insert_color(&cell_prealloc->node, &prison->cells);
146
147	return false;
148}
149
150static bool __get(struct dm_bio_prison_v2 *prison,
151		  struct dm_cell_key_v2 *key,
152		  unsigned int lock_level,
153		  struct bio *inmate,
154		  struct dm_bio_prison_cell_v2 *cell_prealloc,
155		  struct dm_bio_prison_cell_v2 **cell)
156{
157	if (__find_or_insert(prison, key, cell_prealloc, cell)) {
158		if ((*cell)->exclusive_lock) {
159			if (lock_level <= (*cell)->exclusive_level) {
160				bio_list_add(&(*cell)->bios, inmate);
161				return false;
162			}
163		}
164
165		(*cell)->shared_count++;
166
167	} else
168		(*cell)->shared_count = 1;
169
170	return true;
171}
172
173bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
174		    struct dm_cell_key_v2 *key,
175		    unsigned int lock_level,
176		    struct bio *inmate,
177		    struct dm_bio_prison_cell_v2 *cell_prealloc,
178		    struct dm_bio_prison_cell_v2 **cell_result)
179{
180	int r;
181
182	spin_lock_irq(&prison->lock);
183	r = __get(prison, key, lock_level, inmate, cell_prealloc, cell_result);
184	spin_unlock_irq(&prison->lock);
185
186	return r;
187}
188EXPORT_SYMBOL_GPL(dm_cell_get_v2);
189
190static bool __put(struct dm_bio_prison_v2 *prison,
191		  struct dm_bio_prison_cell_v2 *cell)
192{
193	BUG_ON(!cell->shared_count);
194	cell->shared_count--;
195
196	// FIXME: shared locks granted above the lock level could starve this
197	if (!cell->shared_count) {
198		if (cell->exclusive_lock) {
199			if (cell->quiesce_continuation) {
200				queue_work(prison->wq, cell->quiesce_continuation);
201				cell->quiesce_continuation = NULL;
202			}
203		} else {
204			rb_erase(&cell->node, &prison->cells);
205			return true;
206		}
207	}
208
209	return false;
210}
211
212bool dm_cell_put_v2(struct dm_bio_prison_v2 *prison,
213		    struct dm_bio_prison_cell_v2 *cell)
214{
215	bool r;
216	unsigned long flags;
217
218	spin_lock_irqsave(&prison->lock, flags);
219	r = __put(prison, cell);
220	spin_unlock_irqrestore(&prison->lock, flags);
221
222	return r;
223}
224EXPORT_SYMBOL_GPL(dm_cell_put_v2);
225
226static int __lock(struct dm_bio_prison_v2 *prison,
227		  struct dm_cell_key_v2 *key,
228		  unsigned int lock_level,
229		  struct dm_bio_prison_cell_v2 *cell_prealloc,
230		  struct dm_bio_prison_cell_v2 **cell_result)
231{
232	struct dm_bio_prison_cell_v2 *cell;
233
234	if (__find_or_insert(prison, key, cell_prealloc, &cell)) {
235		if (cell->exclusive_lock)
236			return -EBUSY;
237
238		cell->exclusive_lock = true;
239		cell->exclusive_level = lock_level;
240		*cell_result = cell;
241
242		// FIXME: we don't yet know what level these shared locks
243		// were taken at, so have to quiesce them all.
244		return cell->shared_count > 0;
245
246	} else {
247		cell = cell_prealloc;
248		cell->shared_count = 0;
249		cell->exclusive_lock = true;
250		cell->exclusive_level = lock_level;
251		*cell_result = cell;
252	}
253
254	return 0;
255}
256
257int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
258		    struct dm_cell_key_v2 *key,
259		    unsigned int lock_level,
260		    struct dm_bio_prison_cell_v2 *cell_prealloc,
261		    struct dm_bio_prison_cell_v2 **cell_result)
262{
263	int r;
264
265	spin_lock_irq(&prison->lock);
266	r = __lock(prison, key, lock_level, cell_prealloc, cell_result);
267	spin_unlock_irq(&prison->lock);
268
269	return r;
270}
271EXPORT_SYMBOL_GPL(dm_cell_lock_v2);
272
273static void __quiesce(struct dm_bio_prison_v2 *prison,
274		      struct dm_bio_prison_cell_v2 *cell,
275		      struct work_struct *continuation)
276{
277	if (!cell->shared_count)
278		queue_work(prison->wq, continuation);
279	else
280		cell->quiesce_continuation = continuation;
281}
282
283void dm_cell_quiesce_v2(struct dm_bio_prison_v2 *prison,
284			struct dm_bio_prison_cell_v2 *cell,
285			struct work_struct *continuation)
286{
287	spin_lock_irq(&prison->lock);
288	__quiesce(prison, cell, continuation);
289	spin_unlock_irq(&prison->lock);
290}
291EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2);
292
293static int __promote(struct dm_bio_prison_v2 *prison,
294		     struct dm_bio_prison_cell_v2 *cell,
295		     unsigned int new_lock_level)
296{
297	if (!cell->exclusive_lock)
298		return -EINVAL;
299
300	cell->exclusive_level = new_lock_level;
301	return cell->shared_count > 0;
302}
303
304int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
305			    struct dm_bio_prison_cell_v2 *cell,
306			    unsigned int new_lock_level)
307{
308	int r;
309
310	spin_lock_irq(&prison->lock);
311	r = __promote(prison, cell, new_lock_level);
312	spin_unlock_irq(&prison->lock);
313
314	return r;
315}
316EXPORT_SYMBOL_GPL(dm_cell_lock_promote_v2);
317
318static bool __unlock(struct dm_bio_prison_v2 *prison,
319		     struct dm_bio_prison_cell_v2 *cell,
320		     struct bio_list *bios)
321{
322	BUG_ON(!cell->exclusive_lock);
323
324	bio_list_merge(bios, &cell->bios);
325	bio_list_init(&cell->bios);
326
327	if (cell->shared_count) {
328		cell->exclusive_lock = false;
329		return false;
330	}
331
332	rb_erase(&cell->node, &prison->cells);
333	return true;
334}
335
336bool dm_cell_unlock_v2(struct dm_bio_prison_v2 *prison,
337		       struct dm_bio_prison_cell_v2 *cell,
338		       struct bio_list *bios)
339{
340	bool r;
341
342	spin_lock_irq(&prison->lock);
343	r = __unlock(prison, cell, bios);
344	spin_unlock_irq(&prison->lock);
345
346	return r;
347}
348EXPORT_SYMBOL_GPL(dm_cell_unlock_v2);
349
350/*----------------------------------------------------------------*/
351
352int __init dm_bio_prison_init_v2(void)
353{
354	_cell_cache = KMEM_CACHE(dm_bio_prison_cell_v2, 0);
355	if (!_cell_cache)
356		return -ENOMEM;
357
358	return 0;
359}
360
361void dm_bio_prison_exit_v2(void)
362{
363	kmem_cache_destroy(_cell_cache);
364	_cell_cache = NULL;
365}
v5.14.15
 
  1/*
  2 * Copyright (C) 2012-2017 Red Hat, Inc.
  3 *
  4 * This file is released under the GPL.
  5 */
  6
  7#include "dm.h"
  8#include "dm-bio-prison-v2.h"
  9
 10#include <linux/spinlock.h>
 11#include <linux/mempool.h>
 12#include <linux/module.h>
 13#include <linux/slab.h>
 14#include <linux/rwsem.h>
 15
 16/*----------------------------------------------------------------*/
 17
 18#define MIN_CELLS 1024
 19
 20struct dm_bio_prison_v2 {
 21	struct workqueue_struct *wq;
 22
 23	spinlock_t lock;
 24	struct rb_root cells;
 25	mempool_t cell_pool;
 26};
 27
 28static struct kmem_cache *_cell_cache;
 29
 30/*----------------------------------------------------------------*/
 31
 32/*
 33 * @nr_cells should be the number of cells you want in use _concurrently_.
 34 * Don't confuse it with the number of distinct keys.
 35 */
 36struct dm_bio_prison_v2 *dm_bio_prison_create_v2(struct workqueue_struct *wq)
 37{
 38	struct dm_bio_prison_v2 *prison = kzalloc(sizeof(*prison), GFP_KERNEL);
 39	int ret;
 40
 41	if (!prison)
 42		return NULL;
 43
 44	prison->wq = wq;
 45	spin_lock_init(&prison->lock);
 46
 47	ret = mempool_init_slab_pool(&prison->cell_pool, MIN_CELLS, _cell_cache);
 48	if (ret) {
 49		kfree(prison);
 50		return NULL;
 51	}
 52
 53	prison->cells = RB_ROOT;
 54
 55	return prison;
 56}
 57EXPORT_SYMBOL_GPL(dm_bio_prison_create_v2);
 58
 59void dm_bio_prison_destroy_v2(struct dm_bio_prison_v2 *prison)
 60{
 61	mempool_exit(&prison->cell_pool);
 62	kfree(prison);
 63}
 64EXPORT_SYMBOL_GPL(dm_bio_prison_destroy_v2);
 65
 66struct dm_bio_prison_cell_v2 *dm_bio_prison_alloc_cell_v2(struct dm_bio_prison_v2 *prison, gfp_t gfp)
 67{
 68	return mempool_alloc(&prison->cell_pool, gfp);
 69}
 70EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell_v2);
 71
 72void dm_bio_prison_free_cell_v2(struct dm_bio_prison_v2 *prison,
 73				struct dm_bio_prison_cell_v2 *cell)
 74{
 75	mempool_free(cell, &prison->cell_pool);
 76}
 77EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell_v2);
 78
 79static void __setup_new_cell(struct dm_cell_key_v2 *key,
 80			     struct dm_bio_prison_cell_v2 *cell)
 81{
 82	memset(cell, 0, sizeof(*cell));
 83	memcpy(&cell->key, key, sizeof(cell->key));
 84	bio_list_init(&cell->bios);
 85}
 86
 87static int cmp_keys(struct dm_cell_key_v2 *lhs,
 88		    struct dm_cell_key_v2 *rhs)
 89{
 90	if (lhs->virtual < rhs->virtual)
 91		return -1;
 92
 93	if (lhs->virtual > rhs->virtual)
 94		return 1;
 95
 96	if (lhs->dev < rhs->dev)
 97		return -1;
 98
 99	if (lhs->dev > rhs->dev)
100		return 1;
101
102	if (lhs->block_end <= rhs->block_begin)
103		return -1;
104
105	if (lhs->block_begin >= rhs->block_end)
106		return 1;
107
108	return 0;
109}
110
111/*
112 * Returns true if node found, otherwise it inserts a new one.
113 */
114static bool __find_or_insert(struct dm_bio_prison_v2 *prison,
115			     struct dm_cell_key_v2 *key,
116			     struct dm_bio_prison_cell_v2 *cell_prealloc,
117			     struct dm_bio_prison_cell_v2 **result)
118{
119	int r;
120	struct rb_node **new = &prison->cells.rb_node, *parent = NULL;
121
122	while (*new) {
123		struct dm_bio_prison_cell_v2 *cell =
124			rb_entry(*new, struct dm_bio_prison_cell_v2, node);
125
126		r = cmp_keys(key, &cell->key);
127
128		parent = *new;
129		if (r < 0)
130			new = &((*new)->rb_left);
131
132		else if (r > 0)
133			new = &((*new)->rb_right);
134
135		else {
136			*result = cell;
137			return true;
138		}
139	}
140
141	__setup_new_cell(key, cell_prealloc);
142	*result = cell_prealloc;
143	rb_link_node(&cell_prealloc->node, parent, new);
144	rb_insert_color(&cell_prealloc->node, &prison->cells);
145
146	return false;
147}
148
149static bool __get(struct dm_bio_prison_v2 *prison,
150		  struct dm_cell_key_v2 *key,
151		  unsigned lock_level,
152		  struct bio *inmate,
153		  struct dm_bio_prison_cell_v2 *cell_prealloc,
154		  struct dm_bio_prison_cell_v2 **cell)
155{
156	if (__find_or_insert(prison, key, cell_prealloc, cell)) {
157		if ((*cell)->exclusive_lock) {
158			if (lock_level <= (*cell)->exclusive_level) {
159				bio_list_add(&(*cell)->bios, inmate);
160				return false;
161			}
162		}
163
164		(*cell)->shared_count++;
165
166	} else
167		(*cell)->shared_count = 1;
168
169	return true;
170}
171
172bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
173		    struct dm_cell_key_v2 *key,
174		    unsigned lock_level,
175		    struct bio *inmate,
176		    struct dm_bio_prison_cell_v2 *cell_prealloc,
177		    struct dm_bio_prison_cell_v2 **cell_result)
178{
179	int r;
180
181	spin_lock_irq(&prison->lock);
182	r = __get(prison, key, lock_level, inmate, cell_prealloc, cell_result);
183	spin_unlock_irq(&prison->lock);
184
185	return r;
186}
187EXPORT_SYMBOL_GPL(dm_cell_get_v2);
188
189static bool __put(struct dm_bio_prison_v2 *prison,
190		  struct dm_bio_prison_cell_v2 *cell)
191{
192	BUG_ON(!cell->shared_count);
193	cell->shared_count--;
194
195	// FIXME: shared locks granted above the lock level could starve this
196	if (!cell->shared_count) {
197		if (cell->exclusive_lock){
198			if (cell->quiesce_continuation) {
199				queue_work(prison->wq, cell->quiesce_continuation);
200				cell->quiesce_continuation = NULL;
201			}
202		} else {
203			rb_erase(&cell->node, &prison->cells);
204			return true;
205		}
206	}
207
208	return false;
209}
210
211bool dm_cell_put_v2(struct dm_bio_prison_v2 *prison,
212		    struct dm_bio_prison_cell_v2 *cell)
213{
214	bool r;
215	unsigned long flags;
216
217	spin_lock_irqsave(&prison->lock, flags);
218	r = __put(prison, cell);
219	spin_unlock_irqrestore(&prison->lock, flags);
220
221	return r;
222}
223EXPORT_SYMBOL_GPL(dm_cell_put_v2);
224
225static int __lock(struct dm_bio_prison_v2 *prison,
226		  struct dm_cell_key_v2 *key,
227		  unsigned lock_level,
228		  struct dm_bio_prison_cell_v2 *cell_prealloc,
229		  struct dm_bio_prison_cell_v2 **cell_result)
230{
231	struct dm_bio_prison_cell_v2 *cell;
232
233	if (__find_or_insert(prison, key, cell_prealloc, &cell)) {
234		if (cell->exclusive_lock)
235			return -EBUSY;
236
237		cell->exclusive_lock = true;
238		cell->exclusive_level = lock_level;
239		*cell_result = cell;
240
241		// FIXME: we don't yet know what level these shared locks
242		// were taken at, so have to quiesce them all.
243		return cell->shared_count > 0;
244
245	} else {
246		cell = cell_prealloc;
247		cell->shared_count = 0;
248		cell->exclusive_lock = true;
249		cell->exclusive_level = lock_level;
250		*cell_result = cell;
251	}
252
253	return 0;
254}
255
256int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
257		    struct dm_cell_key_v2 *key,
258		    unsigned lock_level,
259		    struct dm_bio_prison_cell_v2 *cell_prealloc,
260		    struct dm_bio_prison_cell_v2 **cell_result)
261{
262	int r;
263
264	spin_lock_irq(&prison->lock);
265	r = __lock(prison, key, lock_level, cell_prealloc, cell_result);
266	spin_unlock_irq(&prison->lock);
267
268	return r;
269}
270EXPORT_SYMBOL_GPL(dm_cell_lock_v2);
271
272static void __quiesce(struct dm_bio_prison_v2 *prison,
273		      struct dm_bio_prison_cell_v2 *cell,
274		      struct work_struct *continuation)
275{
276	if (!cell->shared_count)
277		queue_work(prison->wq, continuation);
278	else
279		cell->quiesce_continuation = continuation;
280}
281
282void dm_cell_quiesce_v2(struct dm_bio_prison_v2 *prison,
283			struct dm_bio_prison_cell_v2 *cell,
284			struct work_struct *continuation)
285{
286	spin_lock_irq(&prison->lock);
287	__quiesce(prison, cell, continuation);
288	spin_unlock_irq(&prison->lock);
289}
290EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2);
291
292static int __promote(struct dm_bio_prison_v2 *prison,
293		     struct dm_bio_prison_cell_v2 *cell,
294		     unsigned new_lock_level)
295{
296	if (!cell->exclusive_lock)
297		return -EINVAL;
298
299	cell->exclusive_level = new_lock_level;
300	return cell->shared_count > 0;
301}
302
303int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
304			    struct dm_bio_prison_cell_v2 *cell,
305			    unsigned new_lock_level)
306{
307	int r;
308
309	spin_lock_irq(&prison->lock);
310	r = __promote(prison, cell, new_lock_level);
311	spin_unlock_irq(&prison->lock);
312
313	return r;
314}
315EXPORT_SYMBOL_GPL(dm_cell_lock_promote_v2);
316
317static bool __unlock(struct dm_bio_prison_v2 *prison,
318		     struct dm_bio_prison_cell_v2 *cell,
319		     struct bio_list *bios)
320{
321	BUG_ON(!cell->exclusive_lock);
322
323	bio_list_merge(bios, &cell->bios);
324	bio_list_init(&cell->bios);
325
326	if (cell->shared_count) {
327		cell->exclusive_lock = false;
328		return false;
329	}
330
331	rb_erase(&cell->node, &prison->cells);
332	return true;
333}
334
335bool dm_cell_unlock_v2(struct dm_bio_prison_v2 *prison,
336		       struct dm_bio_prison_cell_v2 *cell,
337		       struct bio_list *bios)
338{
339	bool r;
340
341	spin_lock_irq(&prison->lock);
342	r = __unlock(prison, cell, bios);
343	spin_unlock_irq(&prison->lock);
344
345	return r;
346}
347EXPORT_SYMBOL_GPL(dm_cell_unlock_v2);
348
349/*----------------------------------------------------------------*/
350
351int __init dm_bio_prison_init_v2(void)
352{
353	_cell_cache = KMEM_CACHE(dm_bio_prison_cell_v2, 0);
354	if (!_cell_cache)
355		return -ENOMEM;
356
357	return 0;
358}
359
360void dm_bio_prison_exit_v2(void)
361{
362	kmem_cache_destroy(_cell_cache);
363	_cell_cache = NULL;
364}