Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2016 CNEX Labs
  4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  5 *                  Matias Bjorling <matias@cnexlabs.com>
  6 *
  7 * This program is free software; you can redistribute it and/or
  8 * modify it under the terms of the GNU General Public License version
  9 * 2 as published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope that it will be useful, but
 12 * WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 14 * General Public License for more details.
 15 *
 16 * pblk-map.c - pblk's lba-ppa mapping strategy
 17 *
 18 */
 19
 20#include "pblk.h"
 21
 22static int pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
 23			      struct ppa_addr *ppa_list,
 24			      unsigned long *lun_bitmap,
 25			      void *meta_list,
 26			      unsigned int valid_secs)
 27{
 28	struct pblk_line *line = pblk_line_get_data(pblk);
 29	struct pblk_emeta *emeta;
 30	struct pblk_w_ctx *w_ctx;
 31	__le64 *lba_list;
 32	u64 paddr;
 33	int nr_secs = pblk->min_write_pgs;
 34	int i;
 35
 36	if (!line)
 37		return -ENOSPC;
 38
 39	if (pblk_line_is_full(line)) {
 40		struct pblk_line *prev_line = line;
 41
 42		/* If we cannot allocate a new line, make sure to store metadata
 43		 * on current line and then fail
 44		 */
 45		line = pblk_line_replace_data(pblk);
 46		pblk_line_close_meta(pblk, prev_line);
 47
 48		if (!line) {
 49			pblk_pipeline_stop(pblk);
 50			return -ENOSPC;
 51		}
 52
 53	}
 54
 55	emeta = line->emeta;
 56	lba_list = emeta_to_lbas(pblk, emeta->buf);
 57
 58	paddr = pblk_alloc_page(pblk, line, nr_secs);
 59
 60	for (i = 0; i < nr_secs; i++, paddr++) {
 61		struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
 62		__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
 63
 64		/* ppa to be sent to the device */
 65		ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
 66
 67		/* Write context for target bio completion on write buffer. Note
 68		 * that the write buffer is protected by the sync backpointer,
 69		 * and a single writer thread have access to each specific entry
 70		 * at a time. Thus, it is safe to modify the context for the
 71		 * entry we are setting up for submission without taking any
 72		 * lock or memory barrier.
 73		 */
 74		if (i < valid_secs) {
 75			kref_get(&line->ref);
 76			atomic_inc(&line->sec_to_update);
 77			w_ctx = pblk_rb_w_ctx(&pblk->rwb, sentry + i);
 78			w_ctx->ppa = ppa_list[i];
 79			meta->lba = cpu_to_le64(w_ctx->lba);
 80			lba_list[paddr] = cpu_to_le64(w_ctx->lba);
 81			if (lba_list[paddr] != addr_empty)
 82				line->nr_valid_lbas++;
 83			else
 84				atomic64_inc(&pblk->pad_wa);
 85		} else {
 86			lba_list[paddr] = addr_empty;
 87			meta->lba = addr_empty;
 88			__pblk_map_invalidate(pblk, line, paddr);
 89		}
 90	}
 91
 92	pblk_down_rq(pblk, ppa_list[0], lun_bitmap);
 93	return 0;
 94}
 95
 96int pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
 97		 unsigned long *lun_bitmap, unsigned int valid_secs,
 98		 unsigned int off)
 99{
100	void *meta_list = pblk_get_meta_for_writes(pblk, rqd);
101	void *meta_buffer;
102	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
103	unsigned int map_secs;
104	int min = pblk->min_write_pgs;
105	int i;
106	int ret;
107
108	for (i = off; i < rqd->nr_ppas; i += min) {
109		map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
110		meta_buffer = pblk_get_meta(pblk, meta_list, i);
111
112		ret = pblk_map_page_data(pblk, sentry + i, &ppa_list[i],
113					lun_bitmap, meta_buffer, map_secs);
114		if (ret)
115			return ret;
116	}
117
118	return 0;
119}
120
121/* only if erase_ppa is set, acquire erase semaphore */
122int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
123		       unsigned int sentry, unsigned long *lun_bitmap,
124		       unsigned int valid_secs, struct ppa_addr *erase_ppa)
125{
126	struct nvm_tgt_dev *dev = pblk->dev;
127	struct nvm_geo *geo = &dev->geo;
128	struct pblk_line_meta *lm = &pblk->lm;
129	void *meta_list = pblk_get_meta_for_writes(pblk, rqd);
130	void *meta_buffer;
131	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
132	struct pblk_line *e_line, *d_line;
133	unsigned int map_secs;
134	int min = pblk->min_write_pgs;
135	int i, erase_lun;
136	int ret;
137
138
139	for (i = 0; i < rqd->nr_ppas; i += min) {
140		map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
141		meta_buffer = pblk_get_meta(pblk, meta_list, i);
 
142
143		ret = pblk_map_page_data(pblk, sentry + i, &ppa_list[i],
144					lun_bitmap, meta_buffer, map_secs);
145		if (ret)
146			return ret;
147
148		erase_lun = pblk_ppa_to_pos(geo, ppa_list[i]);
149
150		/* line can change after page map. We might also be writing the
151		 * last line.
152		 */
153		e_line = pblk_line_get_erase(pblk);
154		if (!e_line)
155			return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
156							valid_secs, i + min);
157
158		spin_lock(&e_line->lock);
159		if (!test_bit(erase_lun, e_line->erase_bitmap)) {
160			set_bit(erase_lun, e_line->erase_bitmap);
161			atomic_dec(&e_line->left_eblks);
162
163			*erase_ppa = ppa_list[i];
164			erase_ppa->a.blk = e_line->id;
165			erase_ppa->a.reserved = 0;
166
167			spin_unlock(&e_line->lock);
168
169			/* Avoid evaluating e_line->left_eblks */
170			return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
171							valid_secs, i + min);
172		}
173		spin_unlock(&e_line->lock);
174	}
175
176	d_line = pblk_line_get_data(pblk);
177
178	/* line can change after page map. We might also be writing the
179	 * last line.
180	 */
181	e_line = pblk_line_get_erase(pblk);
182	if (!e_line)
183		return -ENOSPC;
184
185	/* Erase blocks that are bad in this line but might not be in next */
186	if (unlikely(pblk_ppa_empty(*erase_ppa)) &&
187			bitmap_weight(d_line->blk_bitmap, lm->blk_per_line)) {
188		int bit = -1;
189
190retry:
191		bit = find_next_bit(d_line->blk_bitmap,
192						lm->blk_per_line, bit + 1);
193		if (bit >= lm->blk_per_line)
194			return 0;
195
196		spin_lock(&e_line->lock);
197		if (test_bit(bit, e_line->erase_bitmap)) {
198			spin_unlock(&e_line->lock);
199			goto retry;
200		}
201		spin_unlock(&e_line->lock);
202
203		set_bit(bit, e_line->erase_bitmap);
204		atomic_dec(&e_line->left_eblks);
205		*erase_ppa = pblk->luns[bit].bppa; /* set ch and lun */
206		erase_ppa->a.blk = e_line->id;
207	}
208
209	return 0;
210}
v4.17
 
  1/*
  2 * Copyright (C) 2016 CNEX Labs
  3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  4 *                  Matias Bjorling <matias@cnexlabs.com>
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License version
  8 * 2 as published by the Free Software Foundation.
  9 *
 10 * This program is distributed in the hope that it will be useful, but
 11 * WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 13 * General Public License for more details.
 14 *
 15 * pblk-map.c - pblk's lba-ppa mapping strategy
 16 *
 17 */
 18
 19#include "pblk.h"
 20
 21static void pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
 22			       struct ppa_addr *ppa_list,
 23			       unsigned long *lun_bitmap,
 24			       struct pblk_sec_meta *meta_list,
 25			       unsigned int valid_secs)
 26{
 27	struct pblk_line *line = pblk_line_get_data(pblk);
 28	struct pblk_emeta *emeta;
 29	struct pblk_w_ctx *w_ctx;
 30	__le64 *lba_list;
 31	u64 paddr;
 32	int nr_secs = pblk->min_write_pgs;
 33	int i;
 34
 
 
 
 35	if (pblk_line_is_full(line)) {
 36		struct pblk_line *prev_line = line;
 37
 
 
 
 38		line = pblk_line_replace_data(pblk);
 39		pblk_line_close_meta(pblk, prev_line);
 
 
 
 
 
 
 40	}
 41
 42	emeta = line->emeta;
 43	lba_list = emeta_to_lbas(pblk, emeta->buf);
 44
 45	paddr = pblk_alloc_page(pblk, line, nr_secs);
 46
 47	for (i = 0; i < nr_secs; i++, paddr++) {
 
 48		__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
 49
 50		/* ppa to be sent to the device */
 51		ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
 52
 53		/* Write context for target bio completion on write buffer. Note
 54		 * that the write buffer is protected by the sync backpointer,
 55		 * and a single writer thread have access to each specific entry
 56		 * at a time. Thus, it is safe to modify the context for the
 57		 * entry we are setting up for submission without taking any
 58		 * lock or memory barrier.
 59		 */
 60		if (i < valid_secs) {
 61			kref_get(&line->ref);
 
 62			w_ctx = pblk_rb_w_ctx(&pblk->rwb, sentry + i);
 63			w_ctx->ppa = ppa_list[i];
 64			meta_list[i].lba = cpu_to_le64(w_ctx->lba);
 65			lba_list[paddr] = cpu_to_le64(w_ctx->lba);
 66			if (lba_list[paddr] != addr_empty)
 67				line->nr_valid_lbas++;
 68			else
 69				atomic64_inc(&pblk->pad_wa);
 70		} else {
 71			lba_list[paddr] = meta_list[i].lba = addr_empty;
 
 72			__pblk_map_invalidate(pblk, line, paddr);
 73		}
 74	}
 75
 76	pblk_down_rq(pblk, ppa_list, nr_secs, lun_bitmap);
 
 77}
 78
 79void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
 80		 unsigned long *lun_bitmap, unsigned int valid_secs,
 81		 unsigned int off)
 82{
 83	struct pblk_sec_meta *meta_list = rqd->meta_list;
 
 
 84	unsigned int map_secs;
 85	int min = pblk->min_write_pgs;
 86	int i;
 
 87
 88	for (i = off; i < rqd->nr_ppas; i += min) {
 89		map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
 90		pblk_map_page_data(pblk, sentry + i, &rqd->ppa_list[i],
 91					lun_bitmap, &meta_list[i], map_secs);
 
 
 
 
 92	}
 
 
 93}
 94
 95/* only if erase_ppa is set, acquire erase semaphore */
 96void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
 97		       unsigned int sentry, unsigned long *lun_bitmap,
 98		       unsigned int valid_secs, struct ppa_addr *erase_ppa)
 99{
100	struct nvm_tgt_dev *dev = pblk->dev;
101	struct nvm_geo *geo = &dev->geo;
102	struct pblk_line_meta *lm = &pblk->lm;
103	struct pblk_sec_meta *meta_list = rqd->meta_list;
 
 
104	struct pblk_line *e_line, *d_line;
105	unsigned int map_secs;
106	int min = pblk->min_write_pgs;
107	int i, erase_lun;
 
 
108
109	for (i = 0; i < rqd->nr_ppas; i += min) {
110		map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
111		pblk_map_page_data(pblk, sentry + i, &rqd->ppa_list[i],
112					lun_bitmap, &meta_list[i], map_secs);
113
114		erase_lun = pblk_ppa_to_pos(geo, rqd->ppa_list[i]);
 
 
 
 
 
115
116		/* line can change after page map. We might also be writing the
117		 * last line.
118		 */
119		e_line = pblk_line_get_erase(pblk);
120		if (!e_line)
121			return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
122							valid_secs, i + min);
123
124		spin_lock(&e_line->lock);
125		if (!test_bit(erase_lun, e_line->erase_bitmap)) {
126			set_bit(erase_lun, e_line->erase_bitmap);
127			atomic_dec(&e_line->left_eblks);
128
129			*erase_ppa = rqd->ppa_list[i];
130			erase_ppa->a.blk = e_line->id;
 
131
132			spin_unlock(&e_line->lock);
133
134			/* Avoid evaluating e_line->left_eblks */
135			return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
136							valid_secs, i + min);
137		}
138		spin_unlock(&e_line->lock);
139	}
140
141	d_line = pblk_line_get_data(pblk);
142
143	/* line can change after page map. We might also be writing the
144	 * last line.
145	 */
146	e_line = pblk_line_get_erase(pblk);
147	if (!e_line)
148		return;
149
150	/* Erase blocks that are bad in this line but might not be in next */
151	if (unlikely(pblk_ppa_empty(*erase_ppa)) &&
152			bitmap_weight(d_line->blk_bitmap, lm->blk_per_line)) {
153		int bit = -1;
154
155retry:
156		bit = find_next_bit(d_line->blk_bitmap,
157						lm->blk_per_line, bit + 1);
158		if (bit >= lm->blk_per_line)
159			return;
160
161		spin_lock(&e_line->lock);
162		if (test_bit(bit, e_line->erase_bitmap)) {
163			spin_unlock(&e_line->lock);
164			goto retry;
165		}
166		spin_unlock(&e_line->lock);
167
168		set_bit(bit, e_line->erase_bitmap);
169		atomic_dec(&e_line->left_eblks);
170		*erase_ppa = pblk->luns[bit].bppa; /* set ch and lun */
171		erase_ppa->a.blk = e_line->id;
172	}
 
 
173}