Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2017 Red Hat. All rights reserved.
  4 *
  5 * This file is released under the GPL.
  6 */
  7
  8#include "dm-cache-background-tracker.h"
  9
 10/*----------------------------------------------------------------*/
 11
 12#define DM_MSG_PREFIX "dm-background-tracker"
 13
 14struct bt_work {
 15	struct list_head list;
 16	struct rb_node node;
 17	struct policy_work work;
 18};
 19
 20struct background_tracker {
 21	unsigned int max_work;
 22	atomic_t pending_promotes;
 23	atomic_t pending_writebacks;
 24	atomic_t pending_demotes;
 25
 26	struct list_head issued;
 27	struct list_head queued;
 28	struct rb_root pending;
 29
 30	struct kmem_cache *work_cache;
 31};
 32
 
 
 33struct background_tracker *btracker_create(unsigned int max_work)
 34{
 35	struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL);
 36
 37	if (!b) {
 38		DMERR("couldn't create background_tracker");
 39		return NULL;
 40	}
 41
 42	b->max_work = max_work;
 43	atomic_set(&b->pending_promotes, 0);
 44	atomic_set(&b->pending_writebacks, 0);
 45	atomic_set(&b->pending_demotes, 0);
 46
 47	INIT_LIST_HEAD(&b->issued);
 48	INIT_LIST_HEAD(&b->queued);
 49
 50	b->pending = RB_ROOT;
 51	b->work_cache = KMEM_CACHE(bt_work, 0);
 52	if (!b->work_cache) {
 53		DMERR("couldn't create mempool for background work items");
 54		kfree(b);
 55		b = NULL;
 56	}
 57
 58	return b;
 59}
 60EXPORT_SYMBOL_GPL(btracker_create);
 61
 62void btracker_destroy(struct background_tracker *b)
 63{
 64	struct bt_work *w, *tmp;
 65
 66	BUG_ON(!list_empty(&b->issued));
 67	list_for_each_entry_safe (w, tmp, &b->queued, list) {
 68		list_del(&w->list);
 69		kmem_cache_free(b->work_cache, w);
 70	}
 71
 72	kmem_cache_destroy(b->work_cache);
 73	kfree(b);
 74}
 75EXPORT_SYMBOL_GPL(btracker_destroy);
 76
 77static int cmp_oblock(dm_oblock_t lhs, dm_oblock_t rhs)
 78{
 79	if (from_oblock(lhs) < from_oblock(rhs))
 80		return -1;
 81
 82	if (from_oblock(rhs) < from_oblock(lhs))
 83		return 1;
 84
 85	return 0;
 86}
 87
 88static bool __insert_pending(struct background_tracker *b,
 89			     struct bt_work *nw)
 90{
 91	int cmp;
 92	struct bt_work *w;
 93	struct rb_node **new = &b->pending.rb_node, *parent = NULL;
 94
 95	while (*new) {
 96		w = container_of(*new, struct bt_work, node);
 97
 98		parent = *new;
 99		cmp = cmp_oblock(w->work.oblock, nw->work.oblock);
100		if (cmp < 0)
101			new = &((*new)->rb_left);
102
103		else if (cmp > 0)
104			new = &((*new)->rb_right);
105
106		else
107			/* already present */
108			return false;
109	}
110
111	rb_link_node(&nw->node, parent, new);
112	rb_insert_color(&nw->node, &b->pending);
113
114	return true;
115}
116
117static struct bt_work *__find_pending(struct background_tracker *b,
118				      dm_oblock_t oblock)
119{
120	int cmp;
121	struct bt_work *w;
122	struct rb_node **new = &b->pending.rb_node;
123
124	while (*new) {
125		w = container_of(*new, struct bt_work, node);
126
127		cmp = cmp_oblock(w->work.oblock, oblock);
128		if (cmp < 0)
129			new = &((*new)->rb_left);
130
131		else if (cmp > 0)
132			new = &((*new)->rb_right);
133
134		else
135			break;
136	}
137
138	return *new ? w : NULL;
139}
140
141
142static void update_stats(struct background_tracker *b, struct policy_work *w, int delta)
143{
144	switch (w->op) {
145	case POLICY_PROMOTE:
146		atomic_add(delta, &b->pending_promotes);
147		break;
148
149	case POLICY_DEMOTE:
150		atomic_add(delta, &b->pending_demotes);
151		break;
152
153	case POLICY_WRITEBACK:
154		atomic_add(delta, &b->pending_writebacks);
155		break;
156	}
157}
158
159unsigned int btracker_nr_writebacks_queued(struct background_tracker *b)
160{
161	return atomic_read(&b->pending_writebacks);
162}
163EXPORT_SYMBOL_GPL(btracker_nr_writebacks_queued);
164
165unsigned int btracker_nr_demotions_queued(struct background_tracker *b)
166{
167	return atomic_read(&b->pending_demotes);
168}
169EXPORT_SYMBOL_GPL(btracker_nr_demotions_queued);
170
171static bool max_work_reached(struct background_tracker *b)
172{
173	return atomic_read(&b->pending_promotes) +
174		atomic_read(&b->pending_writebacks) +
175		atomic_read(&b->pending_demotes) >= b->max_work;
176}
177
178static struct bt_work *alloc_work(struct background_tracker *b)
179{
180	if (max_work_reached(b))
181		return NULL;
182
183	return kmem_cache_alloc(b->work_cache, GFP_NOWAIT);
184}
185
186int btracker_queue(struct background_tracker *b,
187		   struct policy_work *work,
188		   struct policy_work **pwork)
189{
190	struct bt_work *w;
191
192	if (pwork)
193		*pwork = NULL;
194
195	w = alloc_work(b);
196	if (!w)
197		return -ENOMEM;
198
199	memcpy(&w->work, work, sizeof(*work));
200
201	if (!__insert_pending(b, w)) {
202		/*
203		 * There was a race, we'll just ignore this second
204		 * bit of work for the same oblock.
205		 */
206		kmem_cache_free(b->work_cache, w);
207		return -EINVAL;
208	}
209
210	if (pwork) {
211		*pwork = &w->work;
212		list_add(&w->list, &b->issued);
213	} else
214		list_add(&w->list, &b->queued);
215	update_stats(b, &w->work, 1);
216
217	return 0;
218}
219EXPORT_SYMBOL_GPL(btracker_queue);
220
221/*
222 * Returns -ENODATA if there's no work.
223 */
224int btracker_issue(struct background_tracker *b, struct policy_work **work)
225{
226	struct bt_work *w;
227
228	if (list_empty(&b->queued))
229		return -ENODATA;
230
231	w = list_first_entry(&b->queued, struct bt_work, list);
232	list_move(&w->list, &b->issued);
233	*work = &w->work;
234
235	return 0;
236}
237EXPORT_SYMBOL_GPL(btracker_issue);
238
239void btracker_complete(struct background_tracker *b,
240		       struct policy_work *op)
241{
242	struct bt_work *w = container_of(op, struct bt_work, work);
243
244	update_stats(b, &w->work, -1);
245	rb_erase(&w->node, &b->pending);
246	list_del(&w->list);
247	kmem_cache_free(b->work_cache, w);
248}
249EXPORT_SYMBOL_GPL(btracker_complete);
250
251bool btracker_promotion_already_present(struct background_tracker *b,
252					dm_oblock_t oblock)
253{
254	return __find_pending(b, oblock) != NULL;
255}
256EXPORT_SYMBOL_GPL(btracker_promotion_already_present);
257
258/*----------------------------------------------------------------*/
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2017 Red Hat. All rights reserved.
  4 *
  5 * This file is released under the GPL.
  6 */
  7
  8#include "dm-cache-background-tracker.h"
  9
 10/*----------------------------------------------------------------*/
 11
 12#define DM_MSG_PREFIX "dm-background-tracker"
 13
 
 
 
 
 
 
 14struct background_tracker {
 15	unsigned int max_work;
 16	atomic_t pending_promotes;
 17	atomic_t pending_writebacks;
 18	atomic_t pending_demotes;
 19
 20	struct list_head issued;
 21	struct list_head queued;
 22	struct rb_root pending;
 
 
 23};
 24
 25struct kmem_cache *btracker_work_cache = NULL;
 26
 27struct background_tracker *btracker_create(unsigned int max_work)
 28{
 29	struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL);
 30
 31	if (!b) {
 32		DMERR("couldn't create background_tracker");
 33		return NULL;
 34	}
 35
 36	b->max_work = max_work;
 37	atomic_set(&b->pending_promotes, 0);
 38	atomic_set(&b->pending_writebacks, 0);
 39	atomic_set(&b->pending_demotes, 0);
 40
 41	INIT_LIST_HEAD(&b->issued);
 42	INIT_LIST_HEAD(&b->queued);
 43
 44	b->pending = RB_ROOT;
 
 
 
 
 
 
 45
 46	return b;
 47}
 48EXPORT_SYMBOL_GPL(btracker_create);
 49
 50void btracker_destroy(struct background_tracker *b)
 51{
 52	struct bt_work *w, *tmp;
 53
 54	BUG_ON(!list_empty(&b->issued));
 55	list_for_each_entry_safe (w, tmp, &b->queued, list) {
 56		list_del(&w->list);
 57		kmem_cache_free(btracker_work_cache, w);
 58	}
 59
 
 60	kfree(b);
 61}
 62EXPORT_SYMBOL_GPL(btracker_destroy);
 63
 64static int cmp_oblock(dm_oblock_t lhs, dm_oblock_t rhs)
 65{
 66	if (from_oblock(lhs) < from_oblock(rhs))
 67		return -1;
 68
 69	if (from_oblock(rhs) < from_oblock(lhs))
 70		return 1;
 71
 72	return 0;
 73}
 74
 75static bool __insert_pending(struct background_tracker *b,
 76			     struct bt_work *nw)
 77{
 78	int cmp;
 79	struct bt_work *w;
 80	struct rb_node **new = &b->pending.rb_node, *parent = NULL;
 81
 82	while (*new) {
 83		w = container_of(*new, struct bt_work, node);
 84
 85		parent = *new;
 86		cmp = cmp_oblock(w->work.oblock, nw->work.oblock);
 87		if (cmp < 0)
 88			new = &((*new)->rb_left);
 89
 90		else if (cmp > 0)
 91			new = &((*new)->rb_right);
 92
 93		else
 94			/* already present */
 95			return false;
 96	}
 97
 98	rb_link_node(&nw->node, parent, new);
 99	rb_insert_color(&nw->node, &b->pending);
100
101	return true;
102}
103
104static struct bt_work *__find_pending(struct background_tracker *b,
105				      dm_oblock_t oblock)
106{
107	int cmp;
108	struct bt_work *w;
109	struct rb_node **new = &b->pending.rb_node;
110
111	while (*new) {
112		w = container_of(*new, struct bt_work, node);
113
114		cmp = cmp_oblock(w->work.oblock, oblock);
115		if (cmp < 0)
116			new = &((*new)->rb_left);
117
118		else if (cmp > 0)
119			new = &((*new)->rb_right);
120
121		else
122			break;
123	}
124
125	return *new ? w : NULL;
126}
127
128
129static void update_stats(struct background_tracker *b, struct policy_work *w, int delta)
130{
131	switch (w->op) {
132	case POLICY_PROMOTE:
133		atomic_add(delta, &b->pending_promotes);
134		break;
135
136	case POLICY_DEMOTE:
137		atomic_add(delta, &b->pending_demotes);
138		break;
139
140	case POLICY_WRITEBACK:
141		atomic_add(delta, &b->pending_writebacks);
142		break;
143	}
144}
145
 
 
 
 
 
 
146unsigned int btracker_nr_demotions_queued(struct background_tracker *b)
147{
148	return atomic_read(&b->pending_demotes);
149}
150EXPORT_SYMBOL_GPL(btracker_nr_demotions_queued);
151
152static bool max_work_reached(struct background_tracker *b)
153{
154	return atomic_read(&b->pending_promotes) +
155		atomic_read(&b->pending_writebacks) +
156		atomic_read(&b->pending_demotes) >= b->max_work;
157}
158
159static struct bt_work *alloc_work(struct background_tracker *b)
160{
161	if (max_work_reached(b))
162		return NULL;
163
164	return kmem_cache_alloc(btracker_work_cache, GFP_NOWAIT);
165}
166
167int btracker_queue(struct background_tracker *b,
168		   struct policy_work *work,
169		   struct policy_work **pwork)
170{
171	struct bt_work *w;
172
173	if (pwork)
174		*pwork = NULL;
175
176	w = alloc_work(b);
177	if (!w)
178		return -ENOMEM;
179
180	memcpy(&w->work, work, sizeof(*work));
181
182	if (!__insert_pending(b, w)) {
183		/*
184		 * There was a race, we'll just ignore this second
185		 * bit of work for the same oblock.
186		 */
187		kmem_cache_free(btracker_work_cache, w);
188		return -EINVAL;
189	}
190
191	if (pwork) {
192		*pwork = &w->work;
193		list_add(&w->list, &b->issued);
194	} else
195		list_add(&w->list, &b->queued);
196	update_stats(b, &w->work, 1);
197
198	return 0;
199}
200EXPORT_SYMBOL_GPL(btracker_queue);
201
202/*
203 * Returns -ENODATA if there's no work.
204 */
205int btracker_issue(struct background_tracker *b, struct policy_work **work)
206{
207	struct bt_work *w;
208
209	if (list_empty(&b->queued))
210		return -ENODATA;
211
212	w = list_first_entry(&b->queued, struct bt_work, list);
213	list_move(&w->list, &b->issued);
214	*work = &w->work;
215
216	return 0;
217}
218EXPORT_SYMBOL_GPL(btracker_issue);
219
220void btracker_complete(struct background_tracker *b,
221		       struct policy_work *op)
222{
223	struct bt_work *w = container_of(op, struct bt_work, work);
224
225	update_stats(b, &w->work, -1);
226	rb_erase(&w->node, &b->pending);
227	list_del(&w->list);
228	kmem_cache_free(btracker_work_cache, w);
229}
230EXPORT_SYMBOL_GPL(btracker_complete);
231
232bool btracker_promotion_already_present(struct background_tracker *b,
233					dm_oblock_t oblock)
234{
235	return __find_pending(b, oblock) != NULL;
236}
237EXPORT_SYMBOL_GPL(btracker_promotion_already_present);
238
239/*----------------------------------------------------------------*/