Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2016 CNEX Labs
  4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  5 *                  Matias Bjorling <matias@cnexlabs.com>
  6 *
  7 * This program is free software; you can redistribute it and/or
  8 * modify it under the terms of the GNU General Public License version
  9 * 2 as published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope that it will be useful, but
 12 * WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 14 * General Public License for more details.
 15 *
 16 * pblk-rl.c - pblk's rate limiter for user I/O
 17 *
 18 */
 19
 20#include "pblk.h"
 21
 22static void pblk_rl_kick_u_timer(struct pblk_rl *rl)
 23{
 24	mod_timer(&rl->u_timer, jiffies + msecs_to_jiffies(5000));
 25}
 26
 27int pblk_rl_is_limit(struct pblk_rl *rl)
 28{
 29	int rb_space;
 30
 31	rb_space = atomic_read(&rl->rb_space);
 32
 33	return (rb_space == 0);
 34}
 35
 36int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries)
 37{
 38	int rb_user_cnt = atomic_read(&rl->rb_user_cnt);
 39	int rb_space = atomic_read(&rl->rb_space);
 40
 41	if (unlikely(rb_space >= 0) && (rb_space - nr_entries < 0))
 42		return NVM_IO_ERR;
 43
 44	if (rb_user_cnt >= rl->rb_user_max)
 45		return NVM_IO_REQUEUE;
 46
 47	return NVM_IO_OK;
 48}
 49
 50void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries)
 51{
 52	int rb_space = atomic_read(&rl->rb_space);
 53
 54	if (unlikely(rb_space >= 0))
 55		atomic_sub(nr_entries, &rl->rb_space);
 56}
 57
 58int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries)
 59{
 60	int rb_gc_cnt = atomic_read(&rl->rb_gc_cnt);
 61	int rb_user_active;
 62
 63	/* If there is no user I/O let GC take over space on the write buffer */
 64	rb_user_active = READ_ONCE(rl->rb_user_active);
 65	return (!(rb_gc_cnt >= rl->rb_gc_max && rb_user_active));
 66}
 67
 68void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries)
 69{
 70	atomic_add(nr_entries, &rl->rb_user_cnt);
 71
 72	/* Release user I/O state. Protect from GC */
 73	smp_store_release(&rl->rb_user_active, 1);
 74	pblk_rl_kick_u_timer(rl);
 75}
 76
 77void pblk_rl_werr_line_in(struct pblk_rl *rl)
 78{
 79	atomic_inc(&rl->werr_lines);
 80}
 81
 82void pblk_rl_werr_line_out(struct pblk_rl *rl)
 83{
 84	atomic_dec(&rl->werr_lines);
 85}
 86
 87void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries)
 88{
 89	atomic_add(nr_entries, &rl->rb_gc_cnt);
 90}
 91
 92void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc)
 93{
 94	atomic_sub(nr_user, &rl->rb_user_cnt);
 95	atomic_sub(nr_gc, &rl->rb_gc_cnt);
 96}
 97
 98unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl)
 99{
100	return atomic_read(&rl->free_blocks);
101}
102
103unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl)
104{
105	return atomic_read(&rl->free_user_blocks);
106}
107
108static void __pblk_rl_update_rates(struct pblk_rl *rl,
109				   unsigned long free_blocks)
110{
111	struct pblk *pblk = container_of(rl, struct pblk, rl);
112	int max = rl->rb_budget;
113	int werr_gc_needed = atomic_read(&rl->werr_lines);
114
115	if (free_blocks >= rl->high) {
116		if (werr_gc_needed) {
117			/* Allocate a small budget for recovering
118			 * lines with write errors
119			 */
120			rl->rb_gc_max = 1 << rl->rb_windows_pw;
121			rl->rb_user_max = max - rl->rb_gc_max;
122			rl->rb_state = PBLK_RL_WERR;
123		} else {
124			rl->rb_user_max = max;
125			rl->rb_gc_max = 0;
126			rl->rb_state = PBLK_RL_OFF;
127		}
128	} else if (free_blocks < rl->high) {
129		int shift = rl->high_pw - rl->rb_windows_pw;
130		int user_windows = free_blocks >> shift;
131		int user_max = user_windows << ilog2(NVM_MAX_VLBA);
132
133		rl->rb_user_max = user_max;
134		rl->rb_gc_max = max - user_max;
135
136		if (free_blocks <= rl->rsv_blocks) {
137			rl->rb_user_max = 0;
138			rl->rb_gc_max = max;
139		}
140
141		/* In the worst case, we will need to GC lines in the low list
142		 * (high valid sector count). If there are lines to GC on high
143		 * or mid lists, these will be prioritized
144		 */
145		rl->rb_state = PBLK_RL_LOW;
146	}
147
148	if (rl->rb_state != PBLK_RL_OFF)
149		pblk_gc_should_start(pblk);
150	else
151		pblk_gc_should_stop(pblk);
152}
153
154void pblk_rl_update_rates(struct pblk_rl *rl)
155{
156	__pblk_rl_update_rates(rl, pblk_rl_nr_user_free_blks(rl));
157}
158
159void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
160{
161	int blk_in_line = atomic_read(&line->blk_in_line);
162	int free_blocks;
163
164	atomic_add(blk_in_line, &rl->free_blocks);
165	free_blocks = atomic_add_return(blk_in_line, &rl->free_user_blocks);
166
167	__pblk_rl_update_rates(rl, free_blocks);
168}
169
170void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
171			    bool used)
172{
173	int blk_in_line = atomic_read(&line->blk_in_line);
174	int free_blocks;
175
176	atomic_sub(blk_in_line, &rl->free_blocks);
177
178	if (used)
179		free_blocks = atomic_sub_return(blk_in_line,
180							&rl->free_user_blocks);
181	else
182		free_blocks = atomic_read(&rl->free_user_blocks);
183
184	__pblk_rl_update_rates(rl, free_blocks);
185}
186
187int pblk_rl_high_thrs(struct pblk_rl *rl)
188{
189	return rl->high;
190}
191
192int pblk_rl_max_io(struct pblk_rl *rl)
193{
194	return rl->rb_max_io;
195}
196
197static void pblk_rl_u_timer(struct timer_list *t)
198{
199	struct pblk_rl *rl = from_timer(rl, t, u_timer);
200
201	/* Release user I/O state. Protect from GC */
202	smp_store_release(&rl->rb_user_active, 0);
203}
204
205void pblk_rl_free(struct pblk_rl *rl)
206{
207	del_timer(&rl->u_timer);
208}
209
210void pblk_rl_init(struct pblk_rl *rl, int budget, int threshold)
211{
212	struct pblk *pblk = container_of(rl, struct pblk, rl);
213	struct nvm_tgt_dev *dev = pblk->dev;
214	struct nvm_geo *geo = &dev->geo;
215	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
216	struct pblk_line_meta *lm = &pblk->lm;
 
217	int sec_meta, blk_meta;
 
218	unsigned int rb_windows;
219
220	/* Consider sectors used for metadata */
221	sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
222	blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
223
224	rl->high = pblk->op_blks - blk_meta - lm->blk_per_line;
225	rl->high_pw = get_count_order(rl->high);
226
227	rl->rsv_blocks = pblk_get_min_chks(pblk);
228
229	/* This will always be a power-of-2 */
230	rb_windows = budget / NVM_MAX_VLBA;
231	rl->rb_windows_pw = get_count_order(rb_windows);
232
233	/* To start with, all buffer is available to user I/O writers */
234	rl->rb_budget = budget;
235	rl->rb_user_max = budget;
 
236	rl->rb_gc_max = 0;
237	rl->rb_state = PBLK_RL_HIGH;
238
239	/* Maximize I/O size and ansure that back threshold is respected */
240	if (threshold)
241		rl->rb_max_io = budget - pblk->min_write_pgs_data - threshold;
242	else
243		rl->rb_max_io = budget - pblk->min_write_pgs_data - 1;
244
245	atomic_set(&rl->rb_user_cnt, 0);
246	atomic_set(&rl->rb_gc_cnt, 0);
247	atomic_set(&rl->rb_space, -1);
248	atomic_set(&rl->werr_lines, 0);
249
250	timer_setup(&rl->u_timer, pblk_rl_u_timer, 0);
251
252	rl->rb_user_active = 0;
253	rl->rb_gc_active = 0;
254}
v4.17
 
  1/*
  2 * Copyright (C) 2016 CNEX Labs
  3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  4 *                  Matias Bjorling <matias@cnexlabs.com>
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License version
  8 * 2 as published by the Free Software Foundation.
  9 *
 10 * This program is distributed in the hope that it will be useful, but
 11 * WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 13 * General Public License for more details.
 14 *
 15 * pblk-rl.c - pblk's rate limiter for user I/O
 16 *
 17 */
 18
 19#include "pblk.h"
 20
 21static void pblk_rl_kick_u_timer(struct pblk_rl *rl)
 22{
 23	mod_timer(&rl->u_timer, jiffies + msecs_to_jiffies(5000));
 24}
 25
 26int pblk_rl_is_limit(struct pblk_rl *rl)
 27{
 28	int rb_space;
 29
 30	rb_space = atomic_read(&rl->rb_space);
 31
 32	return (rb_space == 0);
 33}
 34
 35int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries)
 36{
 37	int rb_user_cnt = atomic_read(&rl->rb_user_cnt);
 38	int rb_space = atomic_read(&rl->rb_space);
 39
 40	if (unlikely(rb_space >= 0) && (rb_space - nr_entries < 0))
 41		return NVM_IO_ERR;
 42
 43	if (rb_user_cnt >= rl->rb_user_max)
 44		return NVM_IO_REQUEUE;
 45
 46	return NVM_IO_OK;
 47}
 48
 49void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries)
 50{
 51	int rb_space = atomic_read(&rl->rb_space);
 52
 53	if (unlikely(rb_space >= 0))
 54		atomic_sub(nr_entries, &rl->rb_space);
 55}
 56
 57int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries)
 58{
 59	int rb_gc_cnt = atomic_read(&rl->rb_gc_cnt);
 60	int rb_user_active;
 61
 62	/* If there is no user I/O let GC take over space on the write buffer */
 63	rb_user_active = READ_ONCE(rl->rb_user_active);
 64	return (!(rb_gc_cnt >= rl->rb_gc_max && rb_user_active));
 65}
 66
 67void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries)
 68{
 69	atomic_add(nr_entries, &rl->rb_user_cnt);
 70
 71	/* Release user I/O state. Protect from GC */
 72	smp_store_release(&rl->rb_user_active, 1);
 73	pblk_rl_kick_u_timer(rl);
 74}
 75
 
 
 
 
 
 
 
 
 
 
 76void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries)
 77{
 78	atomic_add(nr_entries, &rl->rb_gc_cnt);
 79}
 80
 81void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc)
 82{
 83	atomic_sub(nr_user, &rl->rb_user_cnt);
 84	atomic_sub(nr_gc, &rl->rb_gc_cnt);
 85}
 86
 87unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl)
 88{
 89	return atomic_read(&rl->free_blocks);
 90}
 91
 92unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl)
 93{
 94	return atomic_read(&rl->free_user_blocks);
 95}
 96
 97static void __pblk_rl_update_rates(struct pblk_rl *rl,
 98				   unsigned long free_blocks)
 99{
100	struct pblk *pblk = container_of(rl, struct pblk, rl);
101	int max = rl->rb_budget;
 
102
103	if (free_blocks >= rl->high) {
104		rl->rb_user_max = max;
105		rl->rb_gc_max = 0;
106		rl->rb_state = PBLK_RL_HIGH;
 
 
 
 
 
 
 
 
 
107	} else if (free_blocks < rl->high) {
108		int shift = rl->high_pw - rl->rb_windows_pw;
109		int user_windows = free_blocks >> shift;
110		int user_max = user_windows << PBLK_MAX_REQ_ADDRS_PW;
111
112		rl->rb_user_max = user_max;
113		rl->rb_gc_max = max - user_max;
114
115		if (free_blocks <= rl->rsv_blocks) {
116			rl->rb_user_max = 0;
117			rl->rb_gc_max = max;
118		}
119
120		/* In the worst case, we will need to GC lines in the low list
121		 * (high valid sector count). If there are lines to GC on high
122		 * or mid lists, these will be prioritized
123		 */
124		rl->rb_state = PBLK_RL_LOW;
125	}
126
127	if (rl->rb_state == (PBLK_RL_MID | PBLK_RL_LOW))
128		pblk_gc_should_start(pblk);
129	else
130		pblk_gc_should_stop(pblk);
131}
132
133void pblk_rl_update_rates(struct pblk_rl *rl)
134{
135	__pblk_rl_update_rates(rl, pblk_rl_nr_user_free_blks(rl));
136}
137
138void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
139{
140	int blk_in_line = atomic_read(&line->blk_in_line);
141	int free_blocks;
142
143	atomic_add(blk_in_line, &rl->free_blocks);
144	free_blocks = atomic_add_return(blk_in_line, &rl->free_user_blocks);
145
146	__pblk_rl_update_rates(rl, free_blocks);
147}
148
149void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
150			    bool used)
151{
152	int blk_in_line = atomic_read(&line->blk_in_line);
153	int free_blocks;
154
155	atomic_sub(blk_in_line, &rl->free_blocks);
156
157	if (used)
158		free_blocks = atomic_sub_return(blk_in_line,
159							&rl->free_user_blocks);
160	else
161		free_blocks = atomic_read(&rl->free_user_blocks);
162
163	__pblk_rl_update_rates(rl, free_blocks);
164}
165
166int pblk_rl_high_thrs(struct pblk_rl *rl)
167{
168	return rl->high;
169}
170
171int pblk_rl_max_io(struct pblk_rl *rl)
172{
173	return rl->rb_max_io;
174}
175
176static void pblk_rl_u_timer(struct timer_list *t)
177{
178	struct pblk_rl *rl = from_timer(rl, t, u_timer);
179
180	/* Release user I/O state. Protect from GC */
181	smp_store_release(&rl->rb_user_active, 0);
182}
183
184void pblk_rl_free(struct pblk_rl *rl)
185{
186	del_timer(&rl->u_timer);
187}
188
189void pblk_rl_init(struct pblk_rl *rl, int budget)
190{
191	struct pblk *pblk = container_of(rl, struct pblk, rl);
192	struct nvm_tgt_dev *dev = pblk->dev;
193	struct nvm_geo *geo = &dev->geo;
194	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
195	struct pblk_line_meta *lm = &pblk->lm;
196	int min_blocks = lm->blk_per_line * PBLK_GC_RSV_LINE;
197	int sec_meta, blk_meta;
198
199	unsigned int rb_windows;
200
201	/* Consider sectors used for metadata */
202	sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
203	blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
204
205	rl->high = pblk->op_blks - blk_meta - lm->blk_per_line;
206	rl->high_pw = get_count_order(rl->high);
207
208	rl->rsv_blocks = min_blocks;
209
210	/* This will always be a power-of-2 */
211	rb_windows = budget / PBLK_MAX_REQ_ADDRS;
212	rl->rb_windows_pw = get_count_order(rb_windows);
213
214	/* To start with, all buffer is available to user I/O writers */
215	rl->rb_budget = budget;
216	rl->rb_user_max = budget;
217	rl->rb_max_io = budget >> 1;
218	rl->rb_gc_max = 0;
219	rl->rb_state = PBLK_RL_HIGH;
220
 
 
 
 
 
 
221	atomic_set(&rl->rb_user_cnt, 0);
222	atomic_set(&rl->rb_gc_cnt, 0);
223	atomic_set(&rl->rb_space, -1);
 
224
225	timer_setup(&rl->u_timer, pblk_rl_u_timer, 0);
226
227	rl->rb_user_active = 0;
228	rl->rb_gc_active = 0;
229}