Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2016 CNEX Labs
  4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  5 *                  Matias Bjorling <matias@cnexlabs.com>
  6 *
  7 * This program is free software; you can redistribute it and/or
  8 * modify it under the terms of the GNU General Public License version
  9 * 2 as published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope that it will be useful, but
 12 * WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 14 * General Public License for more details.
 15 *
 16 * pblk-read.c - pblk's read path
 17 */
 18
 19#include "pblk.h"
 20
 21/*
 22 * There is no guarantee that the value read from cache has not been updated and
 23 * resides at another location in the cache. We guarantee though that if the
 24 * value is read from the cache, it belongs to the mapped lba. In order to
 25 * guarantee and order between writes and reads are ordered, a flush must be
 26 * issued.
 27 */
 28static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
 29				sector_t lba, struct ppa_addr ppa)
 
 30{
 31#ifdef CONFIG_NVM_PBLK_DEBUG
 32	/* Callers must ensure that the ppa points to a cache address */
 33	BUG_ON(pblk_ppa_empty(ppa));
 34	BUG_ON(!pblk_addr_in_cache(ppa));
 35#endif
 36
 37	return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa);
 
 38}
 39
 40static int pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
 41				 struct bio *bio, sector_t blba,
 42				 bool *from_cache)
 43{
 44	void *meta_list = rqd->meta_list;
 45	int nr_secs, i;
 
 
 
 
 46
 47retry:
 48	nr_secs = pblk_lookup_l2p_seq(pblk, rqd->ppa_list, blba, rqd->nr_ppas,
 49					from_cache);
 50
 51	if (!*from_cache)
 52		goto end;
 53
 54	for (i = 0; i < nr_secs; i++) {
 55		struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
 56		sector_t lba = blba + i;
 57
 58		if (pblk_ppa_empty(rqd->ppa_list[i])) {
 59			__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
 
 
 
 
 
 
 
 60
 61			meta->lba = addr_empty;
 62		} else if (pblk_addr_in_cache(rqd->ppa_list[i])) {
 63			/*
 64			 * Try to read from write buffer. The address is later
 65			 * checked on the write buffer to prevent retrieving
 66			 * overwritten data.
 67			 */
 68			if (!pblk_read_from_cache(pblk, bio, lba,
 69							rqd->ppa_list[i])) {
 70				if (i == 0) {
 71					/*
 72					 * We didn't call with bio_advance()
 73					 * yet, so we can just retry.
 74					 */
 75					goto retry;
 76				} else {
 77					/*
 78					 * We already call bio_advance()
 79					 * so we cannot retry and we need
 80					 * to quit that function in order
 81					 * to allow caller to handle the bio
 82					 * splitting in the current sector
 83					 * position.
 84					 */
 85					nr_secs = i;
 86					goto end;
 87				}
 88			}
 89			meta->lba = cpu_to_le64(lba);
 90#ifdef CONFIG_NVM_PBLK_DEBUG
 
 
 91			atomic_long_inc(&pblk->cache_reads);
 92#endif
 
 
 
 93		}
 94		bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
 
 
 
 95	}
 96
 97end:
 98	if (pblk_io_aligned(pblk, nr_secs))
 99		rqd->is_seq = 1;
 
 
100
101#ifdef CONFIG_NVM_PBLK_DEBUG
102	atomic_long_add(nr_secs, &pblk->inflight_reads);
103#endif
104
105	return nr_secs;
106}
107
 
 
 
 
 
 
 
 
 
 
108
109static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
110				sector_t blba)
111{
112	void *meta_list = rqd->meta_list;
113	int nr_lbas = rqd->nr_ppas;
114	int i;
115
116	if (!pblk_is_oob_meta_supported(pblk))
117		return;
118
119	for (i = 0; i < nr_lbas; i++) {
120		struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
121		u64 lba = le64_to_cpu(meta->lba);
122
123		if (lba == ADDR_EMPTY)
124			continue;
125
126		if (lba != blba + i) {
127#ifdef CONFIG_NVM_PBLK_DEBUG
128			struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
129
130			print_ppa(pblk, &ppa_list[i], "seq", i);
131#endif
132			pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
133							lba, (u64)blba + i);
134			WARN_ON(1);
135		}
136	}
137}
138
139/*
140 * There can be holes in the lba list.
141 */
142static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
143				 u64 *lba_list, int nr_lbas)
144{
145	void *meta_lba_list = rqd->meta_list;
146	int i, j;
147
148	if (!pblk_is_oob_meta_supported(pblk))
149		return;
150
151	for (i = 0, j = 0; i < nr_lbas; i++) {
152		struct pblk_sec_meta *meta = pblk_get_meta(pblk,
153							   meta_lba_list, j);
154		u64 lba = lba_list[i];
155		u64 meta_lba;
156
157		if (lba == ADDR_EMPTY)
158			continue;
159
160		meta_lba = le64_to_cpu(meta->lba);
161
162		if (lba != meta_lba) {
163#ifdef CONFIG_NVM_PBLK_DEBUG
164			struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
165
166			print_ppa(pblk, &ppa_list[j], "rnd", j);
167#endif
168			pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
169							meta_lba, lba);
170			WARN_ON(1);
171		}
172
173		j++;
 
174	}
175
176	WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
177}
178
179static void pblk_end_user_read(struct bio *bio, int error)
180{
181	if (error && error != NVM_RSP_WARN_HIGHECC)
182		bio_io_error(bio);
183	else
184		bio_endio(bio);
 
185}
186
187static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
188			       bool put_line)
189{
 
190	struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
191	struct bio *int_bio = rqd->bio;
192	unsigned long start_time = r_ctx->start_time;
193
194	bio_end_io_acct(int_bio, start_time);
195
196	if (rqd->error)
197		pblk_log_read_err(pblk, rqd);
 
 
 
 
198
199	pblk_read_check_seq(pblk, rqd, r_ctx->lba);
200	bio_put(int_bio);
 
 
 
201
202	if (put_line)
203		pblk_rq_to_line_put(pblk, rqd);
204
205#ifdef CONFIG_NVM_PBLK_DEBUG
206	atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
207	atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
208#endif
209
210	pblk_free_rqd(pblk, rqd, PBLK_READ);
211	atomic_dec(&pblk->inflight_io);
212}
213
214static void pblk_end_io_read(struct nvm_rq *rqd)
215{
216	struct pblk *pblk = rqd->private;
217	struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
218	struct bio *bio = (struct bio *)r_ctx->private;
219
220	pblk_end_user_read(bio, rqd->error);
221	__pblk_end_io_read(pblk, rqd, true);
222}
223
224static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
225			 sector_t lba, bool *from_cache)
 
226{
227	struct pblk_sec_meta *meta = pblk_get_meta(pblk, rqd->meta_list, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228	struct ppa_addr ppa;
229
230	pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
231
232#ifdef CONFIG_NVM_PBLK_DEBUG
233	atomic_long_inc(&pblk->inflight_reads);
234#endif
235
236retry:
237	if (pblk_ppa_empty(ppa)) {
238		__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
239
240		meta->lba = addr_empty;
241		return;
242	}
243
244	/* Try to read from write buffer. The address is later checked on the
245	 * write buffer to prevent retrieving overwritten data.
246	 */
247	if (pblk_addr_in_cache(ppa)) {
248		if (!pblk_read_from_cache(pblk, bio, lba, ppa)) {
249			pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
250			goto retry;
251		}
252
253		meta->lba = cpu_to_le64(lba);
 
254
255#ifdef CONFIG_NVM_PBLK_DEBUG
256		atomic_long_inc(&pblk->cache_reads);
257#endif
258	} else {
259		rqd->ppa_addr = ppa;
260	}
 
 
261}
262
263void pblk_submit_read(struct pblk *pblk, struct bio *bio)
264{
 
 
265	sector_t blba = pblk_get_lba(bio);
266	unsigned int nr_secs = pblk_get_secs(bio);
267	bool from_cache;
268	struct pblk_g_ctx *r_ctx;
269	struct nvm_rq *rqd;
270	struct bio *int_bio, *split_bio;
271	unsigned long start_time;
 
 
 
 
 
 
 
 
 
 
272
273	start_time = bio_start_io_acct(bio);
274
275	rqd = pblk_alloc_rqd(pblk, PBLK_READ);
276
277	rqd->opcode = NVM_OP_PREAD;
 
278	rqd->nr_ppas = nr_secs;
279	rqd->private = pblk;
280	rqd->end_io = pblk_end_io_read;
281
282	r_ctx = nvm_rq_to_pdu(rqd);
283	r_ctx->start_time = start_time;
284	r_ctx->lba = blba;
285
286	if (pblk_alloc_rqd_meta(pblk, rqd)) {
287		bio_io_error(bio);
288		pblk_free_rqd(pblk, rqd, PBLK_READ);
289		return;
 
 
 
 
 
 
290	}
291
292	/* Clone read bio to deal internally with:
293	 * -read errors when reading from drive
294	 * -bio_advance() calls during cache reads
295	 */
296	int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
297
298	if (nr_secs > 1)
299		nr_secs = pblk_read_ppalist_rq(pblk, rqd, int_bio, blba,
300						&from_cache);
301	else
302		pblk_read_rq(pblk, rqd, int_bio, blba, &from_cache);
303
304split_retry:
305	r_ctx->private = bio; /* original bio */
306	rqd->bio = int_bio; /* internal bio */
307
308	if (from_cache && nr_secs == rqd->nr_ppas) {
309		/* All data was read from cache, we can complete the IO. */
310		pblk_end_user_read(bio, 0);
311		atomic_inc(&pblk->inflight_io);
312		__pblk_end_io_read(pblk, rqd, false);
313	} else if (nr_secs != rqd->nr_ppas) {
314		/* The read bio request could be partially filled by the write
315		 * buffer, but there are some holes that need to be read from
316		 * the drive. In order to handle this, we will use block layer
317		 * mechanism to split this request in to smaller ones and make
318		 * a chain of it.
319		 */
320		split_bio = bio_split(bio, nr_secs * NR_PHY_IN_LOG, GFP_KERNEL,
321					&pblk_bio_set);
322		bio_chain(split_bio, bio);
323		submit_bio_noacct(bio);
324
325		/* New bio contains first N sectors of the previous one, so
326		 * we can continue to use existing rqd, but we need to shrink
327		 * the number of PPAs in it. New bio is also guaranteed that
328		 * it contains only either data from cache or from drive, newer
329		 * mix of them.
330		 */
331		bio = split_bio;
332		rqd->nr_ppas = nr_secs;
333		if (rqd->nr_ppas == 1)
334			rqd->ppa_addr = rqd->ppa_list[0];
335
336		/* Recreate int_bio - existing might have some needed internal
337		 * fields modified already.
338		 */
339		bio_put(int_bio);
340		int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
341		goto split_retry;
342	} else if (pblk_submit_io(pblk, rqd, NULL)) {
343		/* Submitting IO to drive failed, let's report an error */
344		rqd->error = -ENODEV;
345		pblk_end_io_read(rqd);
 
 
 
 
 
 
 
 
 
 
 
 
 
346	}
 
 
 
 
 
 
 
 
 
 
 
 
347}
348
349static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
350			      struct pblk_line *line, u64 *lba_list,
351			      u64 *paddr_list_gc, unsigned int nr_secs)
352{
353	struct ppa_addr ppa_list_l2p[NVM_MAX_VLBA];
354	struct ppa_addr ppa_gc;
355	int valid_secs = 0;
356	int i;
357
358	pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
359
360	for (i = 0; i < nr_secs; i++) {
361		if (lba_list[i] == ADDR_EMPTY)
362			continue;
363
364		ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
365		if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
366			paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
367			continue;
368		}
369
370		rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
371	}
372
373#ifdef CONFIG_NVM_PBLK_DEBUG
374	atomic_long_add(valid_secs, &pblk->inflight_reads);
375#endif
376
377	return valid_secs;
378}
379
380static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
381		      struct pblk_line *line, sector_t lba,
382		      u64 paddr_gc)
383{
384	struct ppa_addr ppa_l2p, ppa_gc;
385	int valid_secs = 0;
386
387	if (lba == ADDR_EMPTY)
388		goto out;
389
390	/* logic error: lba out-of-bounds */
391	if (lba >= pblk->capacity) {
392		WARN(1, "pblk: read lba out of bounds\n");
393		goto out;
394	}
395
396	spin_lock(&pblk->trans_lock);
397	ppa_l2p = pblk_trans_map_get(pblk, lba);
398	spin_unlock(&pblk->trans_lock);
399
400	ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
401	if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
402		goto out;
403
404	rqd->ppa_addr = ppa_l2p;
405	valid_secs = 1;
406
407#ifdef CONFIG_NVM_PBLK_DEBUG
408	atomic_long_inc(&pblk->inflight_reads);
409#endif
410
411out:
412	return valid_secs;
413}
414
415int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
416{
 
 
 
417	struct nvm_rq rqd;
 
418	int ret = NVM_IO_OK;
419
420	memset(&rqd, 0, sizeof(struct nvm_rq));
421
422	ret = pblk_alloc_rqd_meta(pblk, &rqd);
423	if (ret)
424		return ret;
 
425
426	if (gc_rq->nr_secs > 1) {
 
 
 
427		gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
428							gc_rq->lba_list,
429							gc_rq->paddr_list,
430							gc_rq->nr_secs);
431		if (gc_rq->secs_to_gc == 1)
432			rqd.ppa_addr = rqd.ppa_list[0];
433	} else {
434		gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
435							gc_rq->lba_list[0],
436							gc_rq->paddr_list[0]);
437	}
438
439	if (!(gc_rq->secs_to_gc))
440		goto out;
441
 
 
 
 
 
 
 
 
 
 
 
442	rqd.opcode = NVM_OP_PREAD;
443	rqd.nr_ppas = gc_rq->secs_to_gc;
 
 
444
445	if (pblk_submit_io_sync(pblk, &rqd, gc_rq->data)) {
446		ret = -EIO;
447		goto err_free_dma;
 
448	}
449
450	pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
451
452	atomic_dec(&pblk->inflight_io);
453
454	if (rqd.error) {
455		atomic_long_inc(&pblk->read_failed_gc);
456#ifdef CONFIG_NVM_PBLK_DEBUG
457		pblk_print_failed_rqd(pblk, &rqd, rqd.error);
458#endif
459	}
460
461#ifdef CONFIG_NVM_PBLK_DEBUG
462	atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
463	atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
464	atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
465#endif
466
467out:
468	pblk_free_rqd_meta(pblk, &rqd);
469	return ret;
470
 
 
471err_free_dma:
472	pblk_free_rqd_meta(pblk, &rqd);
473	return ret;
474}
v4.17
 
  1/*
  2 * Copyright (C) 2016 CNEX Labs
  3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  4 *                  Matias Bjorling <matias@cnexlabs.com>
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License version
  8 * 2 as published by the Free Software Foundation.
  9 *
 10 * This program is distributed in the hope that it will be useful, but
 11 * WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 13 * General Public License for more details.
 14 *
 15 * pblk-read.c - pblk's read path
 16 */
 17
 18#include "pblk.h"
 19
 20/*
 21 * There is no guarantee that the value read from cache has not been updated and
 22 * resides at another location in the cache. We guarantee though that if the
 23 * value is read from the cache, it belongs to the mapped lba. In order to
 24 * guarantee and order between writes and reads are ordered, a flush must be
 25 * issued.
 26 */
 27static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
 28				sector_t lba, struct ppa_addr ppa,
 29				int bio_iter, bool advanced_bio)
 30{
 31#ifdef CONFIG_NVM_DEBUG
 32	/* Callers must ensure that the ppa points to a cache address */
 33	BUG_ON(pblk_ppa_empty(ppa));
 34	BUG_ON(!pblk_addr_in_cache(ppa));
 35#endif
 36
 37	return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
 38						bio_iter, advanced_bio);
 39}
 40
 41static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
 42				 sector_t blba, unsigned long *read_bitmap)
 
 43{
 44	struct pblk_sec_meta *meta_list = rqd->meta_list;
 45	struct bio *bio = rqd->bio;
 46	struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
 47	int nr_secs = rqd->nr_ppas;
 48	bool advanced_bio = false;
 49	int i, j = 0;
 50
 51	pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
 
 
 
 
 
 52
 53	for (i = 0; i < nr_secs; i++) {
 54		struct ppa_addr p = ppas[i];
 55		sector_t lba = blba + i;
 56
 57retry:
 58		if (pblk_ppa_empty(p)) {
 59			WARN_ON(test_and_set_bit(i, read_bitmap));
 60			meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
 61
 62			if (unlikely(!advanced_bio)) {
 63				bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
 64				advanced_bio = true;
 65			}
 66
 67			goto next;
 68		}
 69
 70		/* Try to read from write buffer. The address is later checked
 71		 * on the write buffer to prevent retrieving overwritten data.
 72		 */
 73		if (pblk_addr_in_cache(p)) {
 74			if (!pblk_read_from_cache(pblk, bio, lba, p, i,
 75								advanced_bio)) {
 76				pblk_lookup_l2p_seq(pblk, &p, lba, 1);
 77				goto retry;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 78			}
 79			WARN_ON(test_and_set_bit(i, read_bitmap));
 80			meta_list[i].lba = cpu_to_le64(lba);
 81			advanced_bio = true;
 82#ifdef CONFIG_NVM_DEBUG
 83			atomic_long_inc(&pblk->cache_reads);
 84#endif
 85		} else {
 86			/* Read from media non-cached sectors */
 87			rqd->ppa_list[j++] = p;
 88		}
 89
 90next:
 91		if (advanced_bio)
 92			bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
 93	}
 94
 
 95	if (pblk_io_aligned(pblk, nr_secs))
 96		rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
 97	else
 98		rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
 99
100#ifdef CONFIG_NVM_DEBUG
101	atomic_long_add(nr_secs, &pblk->inflight_reads);
102#endif
 
 
103}
104
105static int pblk_submit_read_io(struct pblk *pblk, struct nvm_rq *rqd)
106{
107	int err;
108
109	err = pblk_submit_io(pblk, rqd);
110	if (err)
111		return NVM_IO_ERR;
112
113	return NVM_IO_OK;
114}
115
116static void pblk_read_check(struct pblk *pblk, struct nvm_rq *rqd,
117			   sector_t blba)
118{
119	struct pblk_sec_meta *meta_list = rqd->meta_list;
120	int nr_lbas = rqd->nr_ppas;
121	int i;
122
 
 
 
123	for (i = 0; i < nr_lbas; i++) {
124		u64 lba = le64_to_cpu(meta_list[i].lba);
 
125
126		if (lba == ADDR_EMPTY)
127			continue;
128
129		WARN(lba != blba + i, "pblk: corrupted read LBA\n");
 
 
 
 
 
 
 
 
 
130	}
131}
132
133static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
 
 
 
 
134{
135	struct ppa_addr *ppa_list;
136	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
138	ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
 
 
139
140	for (i = 0; i < rqd->nr_ppas; i++) {
141		struct ppa_addr ppa = ppa_list[i];
142		struct pblk_line *line;
 
 
 
143
144		line = &pblk->lines[pblk_ppa_to_line(ppa)];
145		kref_put(&line->ref, pblk_line_put_wq);
146	}
 
 
147}
148
149static void pblk_end_user_read(struct bio *bio)
150{
151#ifdef CONFIG_NVM_DEBUG
152	WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
153#endif
154	bio_endio(bio);
155	bio_put(bio);
156}
157
158static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
159			       bool put_line)
160{
161	struct nvm_tgt_dev *dev = pblk->dev;
162	struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
163	struct bio *bio = rqd->bio;
164	unsigned long start_time = r_ctx->start_time;
165
166	generic_end_io_acct(dev->q, READ, &pblk->disk->part0, start_time);
167
168	if (rqd->error)
169		pblk_log_read_err(pblk, rqd);
170#ifdef CONFIG_NVM_DEBUG
171	else
172		WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n");
173#endif
174
175	pblk_read_check(pblk, rqd, r_ctx->lba);
176
177	bio_put(bio);
178	if (r_ctx->private)
179		pblk_end_user_read((struct bio *)r_ctx->private);
180
181	if (put_line)
182		pblk_read_put_rqd_kref(pblk, rqd);
183
184#ifdef CONFIG_NVM_DEBUG
185	atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
186	atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
187#endif
188
189	pblk_free_rqd(pblk, rqd, PBLK_READ);
190	atomic_dec(&pblk->inflight_io);
191}
192
193static void pblk_end_io_read(struct nvm_rq *rqd)
194{
195	struct pblk *pblk = rqd->private;
 
 
196
 
197	__pblk_end_io_read(pblk, rqd, true);
198}
199
200static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
201				 unsigned int bio_init_idx,
202				 unsigned long *read_bitmap)
203{
204	struct bio *new_bio, *bio = rqd->bio;
205	struct pblk_sec_meta *meta_list = rqd->meta_list;
206	struct bio_vec src_bv, dst_bv;
207	void *ppa_ptr = NULL;
208	void *src_p, *dst_p;
209	dma_addr_t dma_ppa_list = 0;
210	__le64 *lba_list_mem, *lba_list_media;
211	int nr_secs = rqd->nr_ppas;
212	int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
213	int i, ret, hole;
214
215	/* Re-use allocated memory for intermediate lbas */
216	lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
217	lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size);
218
219	new_bio = bio_alloc(GFP_KERNEL, nr_holes);
220
221	if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
222		goto err;
223
224	if (nr_holes != new_bio->bi_vcnt) {
225		pr_err("pblk: malformed bio\n");
226		goto err;
227	}
228
229	for (i = 0; i < nr_secs; i++)
230		lba_list_mem[i] = meta_list[i].lba;
231
232	new_bio->bi_iter.bi_sector = 0; /* internal bio */
233	bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
234
235	rqd->bio = new_bio;
236	rqd->nr_ppas = nr_holes;
237	rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
238
239	if (unlikely(nr_holes == 1)) {
240		ppa_ptr = rqd->ppa_list;
241		dma_ppa_list = rqd->dma_ppa_list;
242		rqd->ppa_addr = rqd->ppa_list[0];
243	}
244
245	ret = pblk_submit_io_sync(pblk, rqd);
246	if (ret) {
247		bio_put(rqd->bio);
248		pr_err("pblk: sync read IO submission failed\n");
249		goto err;
250	}
251
252	if (rqd->error) {
253		atomic_long_inc(&pblk->read_failed);
254#ifdef CONFIG_NVM_DEBUG
255		pblk_print_failed_rqd(pblk, rqd, rqd->error);
256#endif
257	}
258
259	if (unlikely(nr_holes == 1)) {
260		struct ppa_addr ppa;
261
262		ppa = rqd->ppa_addr;
263		rqd->ppa_list = ppa_ptr;
264		rqd->dma_ppa_list = dma_ppa_list;
265		rqd->ppa_list[0] = ppa;
266	}
267
268	for (i = 0; i < nr_secs; i++) {
269		lba_list_media[i] = meta_list[i].lba;
270		meta_list[i].lba = lba_list_mem[i];
271	}
272
273	/* Fill the holes in the original bio */
274	i = 0;
275	hole = find_first_zero_bit(read_bitmap, nr_secs);
276	do {
277		int line_id = pblk_ppa_to_line(rqd->ppa_list[i]);
278		struct pblk_line *line = &pblk->lines[line_id];
279
280		kref_put(&line->ref, pblk_line_put);
281
282		meta_list[hole].lba = lba_list_media[i];
283
284		src_bv = new_bio->bi_io_vec[i++];
285		dst_bv = bio->bi_io_vec[bio_init_idx + hole];
286
287		src_p = kmap_atomic(src_bv.bv_page);
288		dst_p = kmap_atomic(dst_bv.bv_page);
289
290		memcpy(dst_p + dst_bv.bv_offset,
291			src_p + src_bv.bv_offset,
292			PBLK_EXPOSED_PAGE_SIZE);
293
294		kunmap_atomic(src_p);
295		kunmap_atomic(dst_p);
296
297		mempool_free(src_bv.bv_page, pblk->page_bio_pool);
298
299		hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
300	} while (hole < nr_secs);
301
302	bio_put(new_bio);
303
304	/* Complete the original bio and associated request */
305	bio_endio(bio);
306	rqd->bio = bio;
307	rqd->nr_ppas = nr_secs;
308
309	__pblk_end_io_read(pblk, rqd, false);
310	return NVM_IO_OK;
311
312err:
313	pr_err("pblk: failed to perform partial read\n");
314
315	/* Free allocated pages in new bio */
316	pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt);
317	__pblk_end_io_read(pblk, rqd, false);
318	return NVM_IO_ERR;
319}
320
321static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd,
322			 sector_t lba, unsigned long *read_bitmap)
323{
324	struct pblk_sec_meta *meta_list = rqd->meta_list;
325	struct bio *bio = rqd->bio;
326	struct ppa_addr ppa;
327
328	pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
329
330#ifdef CONFIG_NVM_DEBUG
331	atomic_long_inc(&pblk->inflight_reads);
332#endif
333
334retry:
335	if (pblk_ppa_empty(ppa)) {
336		WARN_ON(test_and_set_bit(0, read_bitmap));
337		meta_list[0].lba = cpu_to_le64(ADDR_EMPTY);
 
338		return;
339	}
340
341	/* Try to read from write buffer. The address is later checked on the
342	 * write buffer to prevent retrieving overwritten data.
343	 */
344	if (pblk_addr_in_cache(ppa)) {
345		if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
346			pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
347			goto retry;
348		}
349
350		WARN_ON(test_and_set_bit(0, read_bitmap));
351		meta_list[0].lba = cpu_to_le64(lba);
352
353#ifdef CONFIG_NVM_DEBUG
354		atomic_long_inc(&pblk->cache_reads);
355#endif
356	} else {
357		rqd->ppa_addr = ppa;
358	}
359
360	rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
361}
362
363int pblk_submit_read(struct pblk *pblk, struct bio *bio)
364{
365	struct nvm_tgt_dev *dev = pblk->dev;
366	struct request_queue *q = dev->q;
367	sector_t blba = pblk_get_lba(bio);
368	unsigned int nr_secs = pblk_get_secs(bio);
 
369	struct pblk_g_ctx *r_ctx;
370	struct nvm_rq *rqd;
371	unsigned int bio_init_idx;
372	unsigned long read_bitmap; /* Max 64 ppas per request */
373	int ret = NVM_IO_ERR;
374
375	/* logic error: lba out-of-bounds. Ignore read request */
376	if (blba >= pblk->rl.nr_secs || nr_secs > PBLK_MAX_REQ_ADDRS) {
377		WARN(1, "pblk: read lba out of bounds (lba:%llu, nr:%d)\n",
378					(unsigned long long)blba, nr_secs);
379		return NVM_IO_ERR;
380	}
381
382	generic_start_io_acct(q, READ, bio_sectors(bio), &pblk->disk->part0);
383
384	bitmap_zero(&read_bitmap, nr_secs);
385
386	rqd = pblk_alloc_rqd(pblk, PBLK_READ);
387
388	rqd->opcode = NVM_OP_PREAD;
389	rqd->bio = bio;
390	rqd->nr_ppas = nr_secs;
391	rqd->private = pblk;
392	rqd->end_io = pblk_end_io_read;
393
394	r_ctx = nvm_rq_to_pdu(rqd);
395	r_ctx->start_time = jiffies;
396	r_ctx->lba = blba;
397
398	/* Save the index for this bio's start. This is needed in case
399	 * we need to fill a partial read.
400	 */
401	bio_init_idx = pblk_get_bi_idx(bio);
402
403	rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
404							&rqd->dma_meta_list);
405	if (!rqd->meta_list) {
406		pr_err("pblk: not able to allocate ppa list\n");
407		goto fail_rqd_free;
408	}
409
410	if (nr_secs > 1) {
411		rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
412		rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
 
 
413
414		pblk_read_ppalist_rq(pblk, rqd, blba, &read_bitmap);
415	} else {
416		pblk_read_rq(pblk, rqd, blba, &read_bitmap);
417	}
 
418
419	bio_get(bio);
420	if (bitmap_full(&read_bitmap, nr_secs)) {
421		bio_endio(bio);
 
 
 
 
422		atomic_inc(&pblk->inflight_io);
423		__pblk_end_io_read(pblk, rqd, false);
424		return NVM_IO_OK;
425	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
426
427	/* All sectors are to be read from the device */
428	if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
429		struct bio *int_bio = NULL;
430
431		/* Clone read bio to deal with read errors internally */
432		int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
433		if (!int_bio) {
434			pr_err("pblk: could not clone read bio\n");
435			goto fail_end_io;
436		}
437
438		rqd->bio = int_bio;
439		r_ctx->private = bio;
440
441		ret = pblk_submit_read_io(pblk, rqd);
442		if (ret) {
443			pr_err("pblk: read IO submission failed\n");
444			if (int_bio)
445				bio_put(int_bio);
446			goto fail_end_io;
447		}
448
449		return NVM_IO_OK;
450	}
451
452	/* The read bio request could be partially filled by the write buffer,
453	 * but there are some holes that need to be read from the drive.
454	 */
455	return pblk_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
456
457fail_rqd_free:
458	pblk_free_rqd(pblk, rqd, PBLK_READ);
459	return ret;
460fail_end_io:
461	__pblk_end_io_read(pblk, rqd, false);
462	return ret;
463}
464
465static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
466			      struct pblk_line *line, u64 *lba_list,
467			      u64 *paddr_list_gc, unsigned int nr_secs)
468{
469	struct ppa_addr ppa_list_l2p[PBLK_MAX_REQ_ADDRS];
470	struct ppa_addr ppa_gc;
471	int valid_secs = 0;
472	int i;
473
474	pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
475
476	for (i = 0; i < nr_secs; i++) {
477		if (lba_list[i] == ADDR_EMPTY)
478			continue;
479
480		ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
481		if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
482			paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
483			continue;
484		}
485
486		rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
487	}
488
489#ifdef CONFIG_NVM_DEBUG
490	atomic_long_add(valid_secs, &pblk->inflight_reads);
491#endif
492
493	return valid_secs;
494}
495
496static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
497		      struct pblk_line *line, sector_t lba,
498		      u64 paddr_gc)
499{
500	struct ppa_addr ppa_l2p, ppa_gc;
501	int valid_secs = 0;
502
503	if (lba == ADDR_EMPTY)
504		goto out;
505
506	/* logic error: lba out-of-bounds */
507	if (lba >= pblk->rl.nr_secs) {
508		WARN(1, "pblk: read lba out of bounds\n");
509		goto out;
510	}
511
512	spin_lock(&pblk->trans_lock);
513	ppa_l2p = pblk_trans_map_get(pblk, lba);
514	spin_unlock(&pblk->trans_lock);
515
516	ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
517	if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
518		goto out;
519
520	rqd->ppa_addr = ppa_l2p;
521	valid_secs = 1;
522
523#ifdef CONFIG_NVM_DEBUG
524	atomic_long_inc(&pblk->inflight_reads);
525#endif
526
527out:
528	return valid_secs;
529}
530
531int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
532{
533	struct nvm_tgt_dev *dev = pblk->dev;
534	struct nvm_geo *geo = &dev->geo;
535	struct bio *bio;
536	struct nvm_rq rqd;
537	int data_len;
538	int ret = NVM_IO_OK;
539
540	memset(&rqd, 0, sizeof(struct nvm_rq));
541
542	rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
543							&rqd.dma_meta_list);
544	if (!rqd.meta_list)
545		return -ENOMEM;
546
547	if (gc_rq->nr_secs > 1) {
548		rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
549		rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
550
551		gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
552							gc_rq->lba_list,
553							gc_rq->paddr_list,
554							gc_rq->nr_secs);
555		if (gc_rq->secs_to_gc == 1)
556			rqd.ppa_addr = rqd.ppa_list[0];
557	} else {
558		gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
559							gc_rq->lba_list[0],
560							gc_rq->paddr_list[0]);
561	}
562
563	if (!(gc_rq->secs_to_gc))
564		goto out;
565
566	data_len = (gc_rq->secs_to_gc) * geo->csecs;
567	bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
568						PBLK_VMALLOC_META, GFP_KERNEL);
569	if (IS_ERR(bio)) {
570		pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
571		goto err_free_dma;
572	}
573
574	bio->bi_iter.bi_sector = 0; /* internal bio */
575	bio_set_op_attrs(bio, REQ_OP_READ, 0);
576
577	rqd.opcode = NVM_OP_PREAD;
578	rqd.nr_ppas = gc_rq->secs_to_gc;
579	rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
580	rqd.bio = bio;
581
582	if (pblk_submit_io_sync(pblk, &rqd)) {
583		ret = -EIO;
584		pr_err("pblk: GC read request failed\n");
585		goto err_free_bio;
586	}
587
 
 
588	atomic_dec(&pblk->inflight_io);
589
590	if (rqd.error) {
591		atomic_long_inc(&pblk->read_failed_gc);
592#ifdef CONFIG_NVM_DEBUG
593		pblk_print_failed_rqd(pblk, &rqd, rqd.error);
594#endif
595	}
596
597#ifdef CONFIG_NVM_DEBUG
598	atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
599	atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
600	atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
601#endif
602
603out:
604	nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
605	return ret;
606
607err_free_bio:
608	bio_put(bio);
609err_free_dma:
610	nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
611	return ret;
612}