Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2016 CNEX Labs
   4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
   5 *                  Matias Bjorling <matias@cnexlabs.com>
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version
   9 * 2 as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but
  12 * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * General Public License for more details.
  15 *
  16 * pblk-core.c - pblk's core functionality
  17 *
  18 */
  19
  20#define CREATE_TRACE_POINTS
  21
  22#include "pblk.h"
  23#include "pblk-trace.h"
  24
  25static void pblk_line_mark_bb(struct work_struct *work)
  26{
  27	struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
  28									ws);
  29	struct pblk *pblk = line_ws->pblk;
  30	struct nvm_tgt_dev *dev = pblk->dev;
  31	struct ppa_addr *ppa = line_ws->priv;
  32	int ret;
  33
  34	ret = nvm_set_chunk_meta(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
  35	if (ret) {
  36		struct pblk_line *line;
  37		int pos;
  38
  39		line = pblk_ppa_to_line(pblk, *ppa);
  40		pos = pblk_ppa_to_pos(&dev->geo, *ppa);
  41
  42		pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
  43				line->id, pos);
  44	}
  45
  46	kfree(ppa);
  47	mempool_free(line_ws, &pblk->gen_ws_pool);
  48}
  49
  50static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
  51			 struct ppa_addr ppa_addr)
  52{
  53	struct nvm_tgt_dev *dev = pblk->dev;
  54	struct nvm_geo *geo = &dev->geo;
  55	struct ppa_addr *ppa;
  56	int pos = pblk_ppa_to_pos(geo, ppa_addr);
  57
  58	pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
  59	atomic_long_inc(&pblk->erase_failed);
  60
  61	atomic_dec(&line->blk_in_line);
  62	if (test_and_set_bit(pos, line->blk_bitmap))
  63		pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
  64							line->id, pos);
  65
  66	/* Not necessary to mark bad blocks on 2.0 spec. */
  67	if (geo->version == NVM_OCSSD_SPEC_20)
  68		return;
  69
  70	ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
  71	if (!ppa)
  72		return;
  73
  74	*ppa = ppa_addr;
  75	pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
  76						GFP_ATOMIC, pblk->bb_wq);
  77}
  78
  79static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
  80{
  81	struct nvm_tgt_dev *dev = pblk->dev;
  82	struct nvm_geo *geo = &dev->geo;
  83	struct nvm_chk_meta *chunk;
  84	struct pblk_line *line;
  85	int pos;
  86
  87	line = pblk_ppa_to_line(pblk, rqd->ppa_addr);
  88	pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
  89	chunk = &line->chks[pos];
  90
  91	atomic_dec(&line->left_seblks);
  92
  93	if (rqd->error) {
  94		trace_pblk_chunk_reset(pblk_disk_name(pblk),
  95				&rqd->ppa_addr, PBLK_CHUNK_RESET_FAILED);
  96
  97		chunk->state = NVM_CHK_ST_OFFLINE;
  98		pblk_mark_bb(pblk, line, rqd->ppa_addr);
  99	} else {
 100		trace_pblk_chunk_reset(pblk_disk_name(pblk),
 101				&rqd->ppa_addr, PBLK_CHUNK_RESET_DONE);
 102
 103		chunk->state = NVM_CHK_ST_FREE;
 104	}
 105
 106	trace_pblk_chunk_state(pblk_disk_name(pblk), &rqd->ppa_addr,
 107				chunk->state);
 108
 109	atomic_dec(&pblk->inflight_io);
 110}
 111
 112/* Erase completion assumes that only one block is erased at the time */
 113static void pblk_end_io_erase(struct nvm_rq *rqd)
 114{
 115	struct pblk *pblk = rqd->private;
 116
 117	__pblk_end_io_erase(pblk, rqd);
 118	mempool_free(rqd, &pblk->e_rq_pool);
 119}
 120
 121/*
 122 * Get information for all chunks from the device.
 123 *
 124 * The caller is responsible for freeing (vmalloc) the returned structure
 125 */
 126struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk)
 127{
 128	struct nvm_tgt_dev *dev = pblk->dev;
 129	struct nvm_geo *geo = &dev->geo;
 130	struct nvm_chk_meta *meta;
 131	struct ppa_addr ppa;
 132	unsigned long len;
 133	int ret;
 134
 135	ppa.ppa = 0;
 136
 137	len = geo->all_chunks * sizeof(*meta);
 138	meta = vzalloc(len);
 139	if (!meta)
 140		return ERR_PTR(-ENOMEM);
 141
 142	ret = nvm_get_chunk_meta(dev, ppa, geo->all_chunks, meta);
 143	if (ret) {
 144		vfree(meta);
 145		return ERR_PTR(-EIO);
 146	}
 147
 148	return meta;
 149}
 150
 151struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
 152					      struct nvm_chk_meta *meta,
 153					      struct ppa_addr ppa)
 154{
 155	struct nvm_tgt_dev *dev = pblk->dev;
 156	struct nvm_geo *geo = &dev->geo;
 157	int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
 158	int lun_off = ppa.m.pu * geo->num_chk;
 159	int chk_off = ppa.m.chk;
 160
 161	return meta + ch_off + lun_off + chk_off;
 162}
 163
 164void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
 165			   u64 paddr)
 166{
 167	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 168	struct list_head *move_list = NULL;
 169
 170	/* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
 171	 * table is modified with reclaimed sectors, a check is done to endure
 172	 * that newer updates are not overwritten.
 173	 */
 174	spin_lock(&line->lock);
 175	WARN_ON(line->state == PBLK_LINESTATE_FREE);
 176
 177	if (test_and_set_bit(paddr, line->invalid_bitmap)) {
 178		WARN_ONCE(1, "pblk: double invalidate\n");
 179		spin_unlock(&line->lock);
 180		return;
 181	}
 182	le32_add_cpu(line->vsc, -1);
 183
 184	if (line->state == PBLK_LINESTATE_CLOSED)
 185		move_list = pblk_line_gc_list(pblk, line);
 186	spin_unlock(&line->lock);
 187
 188	if (move_list) {
 189		spin_lock(&l_mg->gc_lock);
 190		spin_lock(&line->lock);
 191		/* Prevent moving a line that has just been chosen for GC */
 192		if (line->state == PBLK_LINESTATE_GC) {
 193			spin_unlock(&line->lock);
 194			spin_unlock(&l_mg->gc_lock);
 195			return;
 196		}
 197		spin_unlock(&line->lock);
 198
 199		list_move_tail(&line->list, move_list);
 200		spin_unlock(&l_mg->gc_lock);
 201	}
 202}
 203
 204void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
 205{
 206	struct pblk_line *line;
 207	u64 paddr;
 
 208
 209#ifdef CONFIG_NVM_PBLK_DEBUG
 210	/* Callers must ensure that the ppa points to a device address */
 211	BUG_ON(pblk_addr_in_cache(ppa));
 212	BUG_ON(pblk_ppa_empty(ppa));
 213#endif
 214
 215	line = pblk_ppa_to_line(pblk, ppa);
 
 216	paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
 217
 218	__pblk_map_invalidate(pblk, line, paddr);
 219}
 220
 221static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
 222				  unsigned int nr_secs)
 223{
 224	sector_t lba;
 225
 226	spin_lock(&pblk->trans_lock);
 227	for (lba = slba; lba < slba + nr_secs; lba++) {
 228		struct ppa_addr ppa;
 229
 230		ppa = pblk_trans_map_get(pblk, lba);
 231
 232		if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
 233			pblk_map_invalidate(pblk, ppa);
 234
 235		pblk_ppa_set_empty(&ppa);
 236		pblk_trans_map_set(pblk, lba, ppa);
 237	}
 238	spin_unlock(&pblk->trans_lock);
 239}
 240
 241int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
 242{
 243	struct nvm_tgt_dev *dev = pblk->dev;
 244
 245	rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
 246							&rqd->dma_meta_list);
 247	if (!rqd->meta_list)
 248		return -ENOMEM;
 249
 250	if (rqd->nr_ppas == 1)
 251		return 0;
 252
 253	rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size(pblk);
 254	rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size(pblk);
 255
 256	return 0;
 257}
 258
 259void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
 260{
 261	struct nvm_tgt_dev *dev = pblk->dev;
 262
 263	if (rqd->meta_list)
 264		nvm_dev_dma_free(dev->parent, rqd->meta_list,
 265				rqd->dma_meta_list);
 266}
 267
 268/* Caller must guarantee that the request is a valid type */
 269struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
 270{
 271	mempool_t *pool;
 272	struct nvm_rq *rqd;
 273	int rq_size;
 274
 275	switch (type) {
 276	case PBLK_WRITE:
 277	case PBLK_WRITE_INT:
 278		pool = &pblk->w_rq_pool;
 279		rq_size = pblk_w_rq_size;
 280		break;
 281	case PBLK_READ:
 282		pool = &pblk->r_rq_pool;
 283		rq_size = pblk_g_rq_size;
 284		break;
 285	default:
 286		pool = &pblk->e_rq_pool;
 287		rq_size = pblk_g_rq_size;
 288	}
 289
 290	rqd = mempool_alloc(pool, GFP_KERNEL);
 291	memset(rqd, 0, rq_size);
 292
 293	return rqd;
 294}
 295
 296/* Typically used on completion path. Cannot guarantee request consistency */
 297void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
 298{
 
 299	mempool_t *pool;
 300
 301	switch (type) {
 302	case PBLK_WRITE:
 303		kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
 304		fallthrough;
 305	case PBLK_WRITE_INT:
 306		pool = &pblk->w_rq_pool;
 307		break;
 308	case PBLK_READ:
 309		pool = &pblk->r_rq_pool;
 310		break;
 311	case PBLK_ERASE:
 312		pool = &pblk->e_rq_pool;
 313		break;
 314	default:
 315		pblk_err(pblk, "trying to free unknown rqd type\n");
 316		return;
 317	}
 318
 319	pblk_free_rqd_meta(pblk, rqd);
 320	mempool_free(rqd, pool);
 321}
 322
 323void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
 324			 int nr_pages)
 325{
 326	struct bio_vec *bv;
 327	struct page *page;
 328	int i, e, nbv = 0;
 329
 330	for (i = 0; i < bio->bi_vcnt; i++) {
 331		bv = &bio->bi_io_vec[i];
 332		page = bv->bv_page;
 333		for (e = 0; e < bv->bv_len; e += PBLK_EXPOSED_PAGE_SIZE, nbv++)
 334			if (nbv >= off)
 335				mempool_free(page++, &pblk->page_bio_pool);
 336	}
 337}
 338
 339int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
 340		       int nr_pages)
 341{
 342	struct request_queue *q = pblk->dev->q;
 343	struct page *page;
 344	int i, ret;
 345
 346	for (i = 0; i < nr_pages; i++) {
 347		page = mempool_alloc(&pblk->page_bio_pool, flags);
 348
 349		ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
 350		if (ret != PBLK_EXPOSED_PAGE_SIZE) {
 351			pblk_err(pblk, "could not add page to bio\n");
 352			mempool_free(page, &pblk->page_bio_pool);
 353			goto err;
 354		}
 355	}
 356
 357	return 0;
 358err:
 359	pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
 360	return -1;
 361}
 362
 363void pblk_write_kick(struct pblk *pblk)
 364{
 365	wake_up_process(pblk->writer_ts);
 366	mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
 367}
 368
 369void pblk_write_timer_fn(struct timer_list *t)
 370{
 371	struct pblk *pblk = from_timer(pblk, t, wtimer);
 372
 373	/* kick the write thread every tick to flush outstanding data */
 374	pblk_write_kick(pblk);
 375}
 376
 377void pblk_write_should_kick(struct pblk *pblk)
 378{
 379	unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
 380
 381	if (secs_avail >= pblk->min_write_pgs_data)
 382		pblk_write_kick(pblk);
 383}
 384
 
 
 
 
 
 
 
 385static void pblk_wait_for_meta(struct pblk *pblk)
 386{
 387	do {
 388		if (!atomic_read(&pblk->inflight_io))
 389			break;
 390
 391		schedule();
 392	} while (1);
 393}
 394
 395static void pblk_flush_writer(struct pblk *pblk)
 396{
 397	pblk_rb_flush(&pblk->rwb);
 398	do {
 399		if (!pblk_rb_sync_count(&pblk->rwb))
 400			break;
 401
 402		pblk_write_kick(pblk);
 403		schedule();
 404	} while (1);
 405}
 406
 407struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
 408{
 409	struct pblk_line_meta *lm = &pblk->lm;
 410	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 411	struct list_head *move_list = NULL;
 412	int packed_meta = (le32_to_cpu(*line->vsc) / pblk->min_write_pgs_data)
 413			* (pblk->min_write_pgs - pblk->min_write_pgs_data);
 414	int vsc = le32_to_cpu(*line->vsc) + packed_meta;
 415
 416	lockdep_assert_held(&line->lock);
 417
 418	if (line->w_err_gc->has_write_err) {
 419		if (line->gc_group != PBLK_LINEGC_WERR) {
 420			line->gc_group = PBLK_LINEGC_WERR;
 421			move_list = &l_mg->gc_werr_list;
 422			pblk_rl_werr_line_in(&pblk->rl);
 423		}
 424	} else if (!vsc) {
 425		if (line->gc_group != PBLK_LINEGC_FULL) {
 426			line->gc_group = PBLK_LINEGC_FULL;
 427			move_list = &l_mg->gc_full_list;
 428		}
 429	} else if (vsc < lm->high_thrs) {
 430		if (line->gc_group != PBLK_LINEGC_HIGH) {
 431			line->gc_group = PBLK_LINEGC_HIGH;
 432			move_list = &l_mg->gc_high_list;
 433		}
 434	} else if (vsc < lm->mid_thrs) {
 435		if (line->gc_group != PBLK_LINEGC_MID) {
 436			line->gc_group = PBLK_LINEGC_MID;
 437			move_list = &l_mg->gc_mid_list;
 438		}
 439	} else if (vsc < line->sec_in_line) {
 440		if (line->gc_group != PBLK_LINEGC_LOW) {
 441			line->gc_group = PBLK_LINEGC_LOW;
 442			move_list = &l_mg->gc_low_list;
 443		}
 444	} else if (vsc == line->sec_in_line) {
 445		if (line->gc_group != PBLK_LINEGC_EMPTY) {
 446			line->gc_group = PBLK_LINEGC_EMPTY;
 447			move_list = &l_mg->gc_empty_list;
 448		}
 449	} else {
 450		line->state = PBLK_LINESTATE_CORRUPT;
 451		trace_pblk_line_state(pblk_disk_name(pblk), line->id,
 452					line->state);
 453
 454		line->gc_group = PBLK_LINEGC_NONE;
 455		move_list =  &l_mg->corrupt_list;
 456		pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
 457						line->id, vsc,
 458						line->sec_in_line,
 459						lm->high_thrs, lm->mid_thrs);
 460	}
 461
 462	return move_list;
 463}
 464
 465void pblk_discard(struct pblk *pblk, struct bio *bio)
 466{
 467	sector_t slba = pblk_get_lba(bio);
 468	sector_t nr_secs = pblk_get_secs(bio);
 469
 470	pblk_invalidate_range(pblk, slba, nr_secs);
 471}
 472
 473void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
 474{
 475	atomic_long_inc(&pblk->write_failed);
 476#ifdef CONFIG_NVM_PBLK_DEBUG
 477	pblk_print_failed_rqd(pblk, rqd, rqd->error);
 478#endif
 479}
 480
 481void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
 482{
 483	/* Empty page read is not necessarily an error (e.g., L2P recovery) */
 484	if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
 485		atomic_long_inc(&pblk->read_empty);
 486		return;
 487	}
 488
 489	switch (rqd->error) {
 490	case NVM_RSP_WARN_HIGHECC:
 491		atomic_long_inc(&pblk->read_high_ecc);
 492		break;
 493	case NVM_RSP_ERR_FAILECC:
 494	case NVM_RSP_ERR_FAILCRC:
 495		atomic_long_inc(&pblk->read_failed);
 496		break;
 497	default:
 498		pblk_err(pblk, "unknown read error:%d\n", rqd->error);
 499	}
 500#ifdef CONFIG_NVM_PBLK_DEBUG
 501	pblk_print_failed_rqd(pblk, rqd, rqd->error);
 502#endif
 503}
 504
 505void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
 506{
 507	pblk->sec_per_write = sec_per_write;
 508}
 509
 510int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf)
 511{
 512	struct nvm_tgt_dev *dev = pblk->dev;
 513
 514	atomic_inc(&pblk->inflight_io);
 
 515
 516#ifdef CONFIG_NVM_PBLK_DEBUG
 517	if (pblk_check_io(pblk, rqd))
 518		return NVM_IO_ERR;
 519#endif
 520
 521	return nvm_submit_io(dev, rqd, buf);
 522}
 523
 524void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
 525{
 526	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
 527
 528	int i;
 529
 530	for (i = 0; i < rqd->nr_ppas; i++) {
 531		struct ppa_addr *ppa = &ppa_list[i];
 532		struct nvm_chk_meta *chunk = pblk_dev_ppa_to_chunk(pblk, *ppa);
 533		u64 caddr = pblk_dev_ppa_to_chunk_addr(pblk, *ppa);
 534
 535		if (caddr == 0)
 536			trace_pblk_chunk_state(pblk_disk_name(pblk),
 537							ppa, NVM_CHK_ST_OPEN);
 538		else if (caddr == (chunk->cnlb - 1))
 539			trace_pblk_chunk_state(pblk_disk_name(pblk),
 540							ppa, NVM_CHK_ST_CLOSED);
 541	}
 542}
 543
 544int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf)
 545{
 546	struct nvm_tgt_dev *dev = pblk->dev;
 547	int ret;
 548
 549	atomic_inc(&pblk->inflight_io);
 
 550
 551#ifdef CONFIG_NVM_PBLK_DEBUG
 552	if (pblk_check_io(pblk, rqd))
 553		return NVM_IO_ERR;
 554#endif
 555
 556	ret = nvm_submit_io_sync(dev, rqd, buf);
 557
 558	if (trace_pblk_chunk_state_enabled() && !ret &&
 559	    rqd->opcode == NVM_OP_PWRITE)
 560		pblk_check_chunk_state_update(pblk, rqd);
 561
 562	return ret;
 
 
 563}
 564
 565static int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd,
 566				   void *buf)
 
 567{
 568	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
 569	int ret;
 
 
 
 570
 571	pblk_down_chunk(pblk, ppa_list[0]);
 572	ret = pblk_submit_io_sync(pblk, rqd, buf);
 573	pblk_up_chunk(pblk, ppa_list[0]);
 574
 575	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 576}
 577
 578int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
 579		   unsigned long secs_to_flush, bool skip_meta)
 580{
 581	int max = pblk->sec_per_write;
 582	int min = pblk->min_write_pgs;
 583	int secs_to_sync = 0;
 584
 585	if (skip_meta && pblk->min_write_pgs_data != pblk->min_write_pgs)
 586		min = max = pblk->min_write_pgs_data;
 587
 588	if (secs_avail >= max)
 589		secs_to_sync = max;
 590	else if (secs_avail >= min)
 591		secs_to_sync = min * (secs_avail / min);
 592	else if (secs_to_flush)
 593		secs_to_sync = min;
 594
 595	return secs_to_sync;
 596}
 597
 598void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
 599{
 600	u64 addr;
 601	int i;
 602
 603	spin_lock(&line->lock);
 604	addr = find_next_zero_bit(line->map_bitmap,
 605					pblk->lm.sec_per_line, line->cur_sec);
 606	line->cur_sec = addr - nr_secs;
 607
 608	for (i = 0; i < nr_secs; i++, line->cur_sec--)
 609		WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
 610	spin_unlock(&line->lock);
 611}
 612
 613u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
 614{
 615	u64 addr;
 616	int i;
 617
 618	lockdep_assert_held(&line->lock);
 619
 620	/* logic error: ppa out-of-bounds. Prevent generating bad address */
 621	if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
 622		WARN(1, "pblk: page allocation out of bounds\n");
 623		nr_secs = pblk->lm.sec_per_line - line->cur_sec;
 624	}
 625
 626	line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
 627					pblk->lm.sec_per_line, line->cur_sec);
 628	for (i = 0; i < nr_secs; i++, line->cur_sec++)
 629		WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
 630
 631	return addr;
 632}
 633
 634u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
 635{
 636	u64 addr;
 637
 638	/* Lock needed in case a write fails and a recovery needs to remap
 639	 * failed write buffer entries
 640	 */
 641	spin_lock(&line->lock);
 642	addr = __pblk_alloc_page(pblk, line, nr_secs);
 643	line->left_msecs -= nr_secs;
 644	WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
 645	spin_unlock(&line->lock);
 646
 647	return addr;
 648}
 649
 650u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
 651{
 652	u64 paddr;
 653
 654	spin_lock(&line->lock);
 655	paddr = find_next_zero_bit(line->map_bitmap,
 656					pblk->lm.sec_per_line, line->cur_sec);
 657	spin_unlock(&line->lock);
 658
 659	return paddr;
 660}
 661
 662u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
 
 
 
 
 
 663{
 664	struct nvm_tgt_dev *dev = pblk->dev;
 665	struct nvm_geo *geo = &dev->geo;
 
 666	struct pblk_line_meta *lm = &pblk->lm;
 667	int bit;
 668
 669	/* This usually only happens on bad lines */
 670	bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
 671	if (bit >= lm->blk_per_line)
 672		return -1;
 673
 674	return bit * geo->ws_opt;
 675}
 676
 677int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
 678{
 679	struct pblk_line_meta *lm = &pblk->lm;
 680	struct ppa_addr *ppa_list;
 681	struct nvm_rq rqd;
 682	u64 paddr = pblk_line_smeta_start(pblk, line);
 683	int i, ret;
 684
 685	memset(&rqd, 0, sizeof(struct nvm_rq));
 
 
 
 
 686
 687	ret = pblk_alloc_rqd_meta(pblk, &rqd);
 688	if (ret)
 689		return ret;
 
 
 
 
 
 690
 691	rqd.opcode = NVM_OP_PREAD;
 692	rqd.nr_ppas = lm->smeta_sec;
 693	rqd.is_seq = 1;
 694	ppa_list = nvm_rq_to_ppa_list(&rqd);
 695
 696	for (i = 0; i < lm->smeta_sec; i++, paddr++)
 697		ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
 698
 699	ret = pblk_submit_io_sync(pblk, &rqd, line->smeta);
 700	if (ret) {
 701		pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
 702		goto clear_rqd;
 703	}
 704
 705	atomic_dec(&pblk->inflight_io);
 
 706
 707	if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
 708		pblk_log_read_err(pblk, &rqd);
 709		ret = -EIO;
 
 
 710	}
 711
 712clear_rqd:
 713	pblk_free_rqd_meta(pblk, &rqd);
 714	return ret;
 715}
 716
 717static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
 718				 u64 paddr)
 719{
 720	struct pblk_line_meta *lm = &pblk->lm;
 721	struct ppa_addr *ppa_list;
 722	struct nvm_rq rqd;
 723	__le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
 724	__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
 725	int i, ret;
 726
 727	memset(&rqd, 0, sizeof(struct nvm_rq));
 
 728
 729	ret = pblk_alloc_rqd_meta(pblk, &rqd);
 730	if (ret)
 731		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 732
 733	rqd.opcode = NVM_OP_PWRITE;
 734	rqd.nr_ppas = lm->smeta_sec;
 735	rqd.is_seq = 1;
 736	ppa_list = nvm_rq_to_ppa_list(&rqd);
 737
 738	for (i = 0; i < lm->smeta_sec; i++, paddr++) {
 739		struct pblk_sec_meta *meta = pblk_get_meta(pblk,
 740							   rqd.meta_list, i);
 
 
 
 
 741
 742		ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
 743		meta->lba = lba_list[paddr] = addr_empty;
 
 
 744	}
 745
 746	ret = pblk_submit_io_sync_sem(pblk, &rqd, line->smeta);
 747	if (ret) {
 748		pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
 749		goto clear_rqd;
 
 750	}
 751
 752	atomic_dec(&pblk->inflight_io);
 753
 754	if (rqd.error) {
 755		pblk_log_write_err(pblk, &rqd);
 756		ret = -EIO;
 
 
 757	}
 758
 759clear_rqd:
 760	pblk_free_rqd_meta(pblk, &rqd);
 
 
 
 
 761	return ret;
 762}
 763
 764int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
 765			 void *emeta_buf)
 766{
 767	struct nvm_tgt_dev *dev = pblk->dev;
 768	struct nvm_geo *geo = &dev->geo;
 769	struct pblk_line_meta *lm = &pblk->lm;
 770	void *ppa_list_buf, *meta_list;
 771	struct ppa_addr *ppa_list;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 772	struct nvm_rq rqd;
 773	u64 paddr = line->emeta_ssec;
 774	dma_addr_t dma_ppa_list, dma_meta_list;
 775	int min = pblk->min_write_pgs;
 776	int left_ppas = lm->emeta_sec[0];
 777	int line_id = line->id;
 778	int rq_ppas, rq_len;
 779	int i, j;
 780	int ret;
 781
 782	meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
 783							&dma_meta_list);
 784	if (!meta_list)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 785		return -ENOMEM;
 786
 787	ppa_list_buf = meta_list + pblk_dma_meta_size(pblk);
 788	dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
 789
 790next_rq:
 791	memset(&rqd, 0, sizeof(struct nvm_rq));
 
 
 
 792
 793	rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
 794	rq_len = rq_ppas * geo->csecs;
 795
 796	rqd.meta_list = meta_list;
 797	rqd.ppa_list = ppa_list_buf;
 798	rqd.dma_meta_list = dma_meta_list;
 799	rqd.dma_ppa_list = dma_ppa_list;
 800	rqd.opcode = NVM_OP_PREAD;
 801	rqd.nr_ppas = rq_ppas;
 802	ppa_list = nvm_rq_to_ppa_list(&rqd);
 803
 804	for (i = 0; i < rqd.nr_ppas; ) {
 805		struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id);
 806		int pos = pblk_ppa_to_pos(geo, ppa);
 807
 808		if (pblk_io_aligned(pblk, rq_ppas))
 809			rqd.is_seq = 1;
 810
 811		while (test_bit(pos, line->blk_bitmap)) {
 812			paddr += min;
 813			if (pblk_boundary_paddr_checks(pblk, paddr)) {
 814				ret = -EINTR;
 815				goto free_rqd_dma;
 816			}
 817
 818			ppa = addr_to_gen_ppa(pblk, paddr, line_id);
 819			pos = pblk_ppa_to_pos(geo, ppa);
 820		}
 821
 822		if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
 823			ret = -EINTR;
 824			goto free_rqd_dma;
 825		}
 826
 827		for (j = 0; j < min; j++, i++, paddr++)
 828			ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
 829	}
 830
 831	ret = pblk_submit_io_sync(pblk, &rqd, emeta_buf);
 
 
 
 
 
 832	if (ret) {
 833		pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
 834		goto free_rqd_dma;
 
 835	}
 836
 837	atomic_dec(&pblk->inflight_io);
 838
 839	if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
 840		pblk_log_read_err(pblk, &rqd);
 841		ret = -EIO;
 842		goto free_rqd_dma;
 
 843	}
 844
 845	emeta_buf += rq_len;
 846	left_ppas -= rq_ppas;
 847	if (left_ppas)
 848		goto next_rq;
 849
 850free_rqd_dma:
 851	nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
 
 852	return ret;
 853}
 854
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 855static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
 856			    struct ppa_addr ppa)
 857{
 858	rqd->opcode = NVM_OP_ERASE;
 859	rqd->ppa_addr = ppa;
 860	rqd->nr_ppas = 1;
 861	rqd->is_seq = 1;
 862	rqd->bio = NULL;
 863}
 864
 865static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
 866{
 867	struct nvm_rq rqd = {NULL};
 868	int ret;
 869
 870	trace_pblk_chunk_reset(pblk_disk_name(pblk), &ppa,
 871				PBLK_CHUNK_RESET_START);
 872
 873	pblk_setup_e_rq(pblk, &rqd, ppa);
 874
 875	/* The write thread schedules erases so that it minimizes disturbances
 876	 * with writes. Thus, there is no need to take the LUN semaphore.
 877	 */
 878	ret = pblk_submit_io_sync(pblk, &rqd, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 879	rqd.private = pblk;
 880	__pblk_end_io_erase(pblk, &rqd);
 881
 882	return ret;
 883}
 884
 885int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
 886{
 887	struct pblk_line_meta *lm = &pblk->lm;
 888	struct ppa_addr ppa;
 889	int ret, bit = -1;
 890
 891	/* Erase only good blocks, one at a time */
 892	do {
 893		spin_lock(&line->lock);
 894		bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
 895								bit + 1);
 896		if (bit >= lm->blk_per_line) {
 897			spin_unlock(&line->lock);
 898			break;
 899		}
 900
 901		ppa = pblk->luns[bit].bppa; /* set ch and lun */
 902		ppa.a.blk = line->id;
 903
 904		atomic_dec(&line->left_eblks);
 905		WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
 906		spin_unlock(&line->lock);
 907
 908		ret = pblk_blk_erase_sync(pblk, ppa);
 909		if (ret) {
 910			pblk_err(pblk, "failed to erase line %d\n", line->id);
 911			return ret;
 912		}
 913	} while (1);
 914
 915	return 0;
 916}
 917
 918static void pblk_line_setup_metadata(struct pblk_line *line,
 919				     struct pblk_line_mgmt *l_mg,
 920				     struct pblk_line_meta *lm)
 921{
 922	int meta_line;
 923
 924	lockdep_assert_held(&l_mg->free_lock);
 925
 926retry_meta:
 927	meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
 928	if (meta_line == PBLK_DATA_LINES) {
 929		spin_unlock(&l_mg->free_lock);
 930		io_schedule();
 931		spin_lock(&l_mg->free_lock);
 932		goto retry_meta;
 933	}
 934
 935	set_bit(meta_line, &l_mg->meta_bitmap);
 936	line->meta_line = meta_line;
 937
 938	line->smeta = l_mg->sline_meta[meta_line];
 939	line->emeta = l_mg->eline_meta[meta_line];
 940
 941	memset(line->smeta, 0, lm->smeta_len);
 942	memset(line->emeta->buf, 0, lm->emeta_len[0]);
 943
 944	line->emeta->mem = 0;
 945	atomic_set(&line->emeta->sync, 0);
 946}
 947
 948/* For now lines are always assumed full lines. Thus, smeta former and current
 949 * lun bitmaps are omitted.
 950 */
 951static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
 952				  struct pblk_line *cur)
 953{
 954	struct nvm_tgt_dev *dev = pblk->dev;
 955	struct nvm_geo *geo = &dev->geo;
 956	struct pblk_line_meta *lm = &pblk->lm;
 957	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 958	struct pblk_emeta *emeta = line->emeta;
 959	struct line_emeta *emeta_buf = emeta->buf;
 960	struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
 961	int nr_blk_line;
 962
 963	/* After erasing the line, new bad blocks might appear and we risk
 964	 * having an invalid line
 965	 */
 966	nr_blk_line = lm->blk_per_line -
 967			bitmap_weight(line->blk_bitmap, lm->blk_per_line);
 968	if (nr_blk_line < lm->min_blk_line) {
 969		spin_lock(&l_mg->free_lock);
 970		spin_lock(&line->lock);
 971		line->state = PBLK_LINESTATE_BAD;
 972		trace_pblk_line_state(pblk_disk_name(pblk), line->id,
 973					line->state);
 974		spin_unlock(&line->lock);
 975
 976		list_add_tail(&line->list, &l_mg->bad_list);
 977		spin_unlock(&l_mg->free_lock);
 978
 979		pblk_debug(pblk, "line %d is bad\n", line->id);
 980
 981		return 0;
 982	}
 983
 984	/* Run-time metadata */
 985	line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
 986
 987	/* Mark LUNs allocated in this line (all for now) */
 988	bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
 989
 990	smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
 991	guid_copy((guid_t *)&smeta_buf->header.uuid, &pblk->instance_uuid);
 992	smeta_buf->header.id = cpu_to_le32(line->id);
 993	smeta_buf->header.type = cpu_to_le16(line->type);
 994	smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
 995	smeta_buf->header.version_minor = SMETA_VERSION_MINOR;
 996
 997	/* Start metadata */
 998	smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
 999	smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
1000
1001	/* Fill metadata among lines */
1002	if (cur) {
1003		memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
1004		smeta_buf->prev_id = cpu_to_le32(cur->id);
1005		cur->emeta->buf->next_id = cpu_to_le32(line->id);
1006	} else {
1007		smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
1008	}
1009
1010	/* All smeta must be set at this point */
1011	smeta_buf->header.crc = cpu_to_le32(
1012			pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
1013	smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
1014
1015	/* End metadata */
1016	memcpy(&emeta_buf->header, &smeta_buf->header,
1017						sizeof(struct line_header));
1018
1019	emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1020	emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1021	emeta_buf->header.crc = cpu_to_le32(
1022			pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1023
1024	emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1025	emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1026	emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1027	emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1028	emeta_buf->crc = cpu_to_le32(0);
1029	emeta_buf->prev_id = smeta_buf->prev_id;
1030
1031	return 1;
1032}
1033
1034static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
1035{
1036	struct pblk_line_meta *lm = &pblk->lm;
1037	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1038
1039	line->map_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
1040	if (!line->map_bitmap)
1041		return -ENOMEM;
1042
1043	memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1044
1045	/* will be initialized using bb info from map_bitmap */
1046	line->invalid_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
1047	if (!line->invalid_bitmap) {
1048		mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1049		line->map_bitmap = NULL;
1050		return -ENOMEM;
1051	}
1052
1053	return 0;
1054}
1055
1056/* For now lines are always assumed full lines. Thus, smeta former and current
1057 * lun bitmaps are omitted.
1058 */
1059static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1060			     int init)
1061{
1062	struct nvm_tgt_dev *dev = pblk->dev;
1063	struct nvm_geo *geo = &dev->geo;
1064	struct pblk_line_meta *lm = &pblk->lm;
1065	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1066	u64 off;
1067	int bit = -1;
1068	int emeta_secs;
1069
1070	line->sec_in_line = lm->sec_per_line;
1071
1072	/* Capture bad block information on line mapping bitmaps */
1073	while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1074					bit + 1)) < lm->blk_per_line) {
1075		off = bit * geo->ws_opt;
1076		bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1077							lm->sec_per_line);
1078		bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1079							lm->sec_per_line);
1080		line->sec_in_line -= geo->clba;
1081	}
1082
1083	/* Mark smeta metadata sectors as bad sectors */
1084	bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1085	off = bit * geo->ws_opt;
1086	bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1087	line->sec_in_line -= lm->smeta_sec;
 
1088	line->cur_sec = off + lm->smeta_sec;
1089
1090	if (init && pblk_line_smeta_write(pblk, line, off)) {
1091		pblk_debug(pblk, "line smeta I/O failed. Retry\n");
1092		return 0;
1093	}
1094
1095	bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1096
1097	/* Mark emeta metadata sectors as bad sectors. We need to consider bad
1098	 * blocks to make sure that there are enough sectors to store emeta
1099	 */
1100	emeta_secs = lm->emeta_sec[0];
1101	off = lm->sec_per_line;
1102	while (emeta_secs) {
1103		off -= geo->ws_opt;
1104		if (!test_bit(off, line->invalid_bitmap)) {
1105			bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
1106			emeta_secs -= geo->ws_opt;
1107		}
1108	}
1109
1110	line->emeta_ssec = off;
1111	line->sec_in_line -= lm->emeta_sec[0];
1112	line->nr_valid_lbas = 0;
1113	line->left_msecs = line->sec_in_line;
1114	*line->vsc = cpu_to_le32(line->sec_in_line);
1115
1116	if (lm->sec_per_line - line->sec_in_line !=
1117		bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1118		spin_lock(&line->lock);
1119		line->state = PBLK_LINESTATE_BAD;
1120		trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1121					line->state);
1122		spin_unlock(&line->lock);
1123
1124		list_add_tail(&line->list, &l_mg->bad_list);
1125		pblk_err(pblk, "unexpected line %d is bad\n", line->id);
1126
1127		return 0;
1128	}
1129
1130	return 1;
1131}
1132
1133static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
1134{
1135	struct pblk_line_meta *lm = &pblk->lm;
1136	struct nvm_tgt_dev *dev = pblk->dev;
1137	struct nvm_geo *geo = &dev->geo;
1138	int blk_to_erase = atomic_read(&line->blk_in_line);
1139	int i;
1140
1141	for (i = 0; i < lm->blk_per_line; i++) {
1142		struct pblk_lun *rlun = &pblk->luns[i];
1143		int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1144		int state = line->chks[pos].state;
1145
1146		/* Free chunks should not be erased */
1147		if (state & NVM_CHK_ST_FREE) {
1148			set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
1149							line->erase_bitmap);
1150			blk_to_erase--;
1151		}
1152	}
1153
1154	return blk_to_erase;
1155}
1156
1157static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1158{
1159	struct pblk_line_meta *lm = &pblk->lm;
1160	int blk_in_line = atomic_read(&line->blk_in_line);
1161	int blk_to_erase;
1162
 
 
 
 
 
 
 
 
 
 
 
1163	/* Bad blocks do not need to be erased */
1164	bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1165
1166	spin_lock(&line->lock);
1167
1168	/* If we have not written to this line, we need to mark up free chunks
1169	 * as already erased
1170	 */
1171	if (line->state == PBLK_LINESTATE_NEW) {
1172		blk_to_erase = pblk_prepare_new_line(pblk, line);
1173		line->state = PBLK_LINESTATE_FREE;
1174		trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1175					line->state);
1176	} else {
1177		blk_to_erase = blk_in_line;
1178	}
1179
1180	if (blk_in_line < lm->min_blk_line) {
1181		spin_unlock(&line->lock);
1182		return -EAGAIN;
1183	}
1184
1185	if (line->state != PBLK_LINESTATE_FREE) {
 
 
 
1186		WARN(1, "pblk: corrupted line %d, state %d\n",
1187							line->id, line->state);
1188		spin_unlock(&line->lock);
1189		return -EINTR;
1190	}
1191
1192	line->state = PBLK_LINESTATE_OPEN;
1193	trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1194				line->state);
1195
1196	atomic_set(&line->left_eblks, blk_to_erase);
1197	atomic_set(&line->left_seblks, blk_to_erase);
1198
1199	line->meta_distance = lm->meta_distance;
1200	spin_unlock(&line->lock);
1201
1202	kref_init(&line->ref);
1203	atomic_set(&line->sec_to_update, 0);
1204
1205	return 0;
1206}
1207
1208/* Line allocations in the recovery path are always single threaded */
1209int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1210{
1211	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1212	int ret;
1213
1214	spin_lock(&l_mg->free_lock);
1215	l_mg->data_line = line;
1216	list_del(&line->list);
1217
1218	ret = pblk_line_prepare(pblk, line);
1219	if (ret) {
1220		list_add(&line->list, &l_mg->free_list);
1221		spin_unlock(&l_mg->free_lock);
1222		return ret;
1223	}
1224	spin_unlock(&l_mg->free_lock);
1225
1226	ret = pblk_line_alloc_bitmaps(pblk, line);
1227	if (ret)
1228		goto fail;
1229
1230	if (!pblk_line_init_bb(pblk, line, 0)) {
1231		ret = -EINTR;
1232		goto fail;
1233	}
1234
1235	pblk_rl_free_lines_dec(&pblk->rl, line, true);
1236	return 0;
1237
1238fail:
1239	spin_lock(&l_mg->free_lock);
1240	list_add(&line->list, &l_mg->free_list);
1241	spin_unlock(&l_mg->free_lock);
1242
1243	return ret;
1244}
1245
1246void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1247{
1248	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1249
1250	mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1251	line->map_bitmap = NULL;
1252	line->smeta = NULL;
1253	line->emeta = NULL;
1254}
1255
1256static void pblk_line_reinit(struct pblk_line *line)
1257{
1258	*line->vsc = cpu_to_le32(EMPTY_ENTRY);
1259
1260	line->map_bitmap = NULL;
1261	line->invalid_bitmap = NULL;
1262	line->smeta = NULL;
1263	line->emeta = NULL;
1264}
1265
1266void pblk_line_free(struct pblk_line *line)
1267{
1268	struct pblk *pblk = line->pblk;
1269	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1270
1271	mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1272	mempool_free(line->invalid_bitmap, l_mg->bitmap_pool);
1273
1274	pblk_line_reinit(line);
1275}
1276
1277struct pblk_line *pblk_line_get(struct pblk *pblk)
1278{
1279	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1280	struct pblk_line_meta *lm = &pblk->lm;
1281	struct pblk_line *line;
1282	int ret, bit;
1283
1284	lockdep_assert_held(&l_mg->free_lock);
1285
1286retry:
1287	if (list_empty(&l_mg->free_list)) {
1288		pblk_err(pblk, "no free lines\n");
1289		return NULL;
1290	}
1291
1292	line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1293	list_del(&line->list);
1294	l_mg->nr_free_lines--;
1295
1296	bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1297	if (unlikely(bit >= lm->blk_per_line)) {
1298		spin_lock(&line->lock);
1299		line->state = PBLK_LINESTATE_BAD;
1300		trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1301					line->state);
1302		spin_unlock(&line->lock);
1303
1304		list_add_tail(&line->list, &l_mg->bad_list);
1305
1306		pblk_debug(pblk, "line %d is bad\n", line->id);
1307		goto retry;
1308	}
1309
1310	ret = pblk_line_prepare(pblk, line);
1311	if (ret) {
1312		switch (ret) {
1313		case -EAGAIN:
1314			list_add(&line->list, &l_mg->bad_list);
1315			goto retry;
1316		case -EINTR:
1317			list_add(&line->list, &l_mg->corrupt_list);
1318			goto retry;
1319		default:
1320			pblk_err(pblk, "failed to prepare line %d\n", line->id);
1321			list_add(&line->list, &l_mg->free_list);
1322			l_mg->nr_free_lines++;
1323			return NULL;
1324		}
1325	}
1326
1327	return line;
1328}
1329
1330static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1331					 struct pblk_line *line)
1332{
1333	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1334	struct pblk_line *retry_line;
1335
1336retry:
1337	spin_lock(&l_mg->free_lock);
1338	retry_line = pblk_line_get(pblk);
1339	if (!retry_line) {
1340		l_mg->data_line = NULL;
1341		spin_unlock(&l_mg->free_lock);
1342		return NULL;
1343	}
1344
1345	retry_line->map_bitmap = line->map_bitmap;
1346	retry_line->invalid_bitmap = line->invalid_bitmap;
1347	retry_line->smeta = line->smeta;
1348	retry_line->emeta = line->emeta;
1349	retry_line->meta_line = line->meta_line;
1350
1351	pblk_line_reinit(line);
1352
1353	l_mg->data_line = retry_line;
1354	spin_unlock(&l_mg->free_lock);
1355
1356	pblk_rl_free_lines_dec(&pblk->rl, line, false);
1357
1358	if (pblk_line_erase(pblk, retry_line))
1359		goto retry;
1360
1361	return retry_line;
1362}
1363
1364static void pblk_set_space_limit(struct pblk *pblk)
1365{
1366	struct pblk_rl *rl = &pblk->rl;
1367
1368	atomic_set(&rl->rb_space, 0);
1369}
1370
1371struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1372{
1373	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1374	struct pblk_line *line;
1375
1376	spin_lock(&l_mg->free_lock);
1377	line = pblk_line_get(pblk);
1378	if (!line) {
1379		spin_unlock(&l_mg->free_lock);
1380		return NULL;
1381	}
1382
1383	line->seq_nr = l_mg->d_seq_nr++;
1384	line->type = PBLK_LINETYPE_DATA;
1385	l_mg->data_line = line;
1386
1387	pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1388
1389	/* Allocate next line for preparation */
1390	l_mg->data_next = pblk_line_get(pblk);
1391	if (!l_mg->data_next) {
1392		/* If we cannot get a new line, we need to stop the pipeline.
1393		 * Only allow as many writes in as we can store safely and then
1394		 * fail gracefully
1395		 */
1396		pblk_set_space_limit(pblk);
1397
1398		l_mg->data_next = NULL;
1399	} else {
1400		l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1401		l_mg->data_next->type = PBLK_LINETYPE_DATA;
1402	}
1403	spin_unlock(&l_mg->free_lock);
1404
1405	if (pblk_line_alloc_bitmaps(pblk, line))
1406		return NULL;
1407
1408	if (pblk_line_erase(pblk, line)) {
1409		line = pblk_line_retry(pblk, line);
1410		if (!line)
1411			return NULL;
1412	}
1413
1414retry_setup:
1415	if (!pblk_line_init_metadata(pblk, line, NULL)) {
1416		line = pblk_line_retry(pblk, line);
1417		if (!line)
1418			return NULL;
1419
1420		goto retry_setup;
1421	}
1422
1423	if (!pblk_line_init_bb(pblk, line, 1)) {
1424		line = pblk_line_retry(pblk, line);
1425		if (!line)
1426			return NULL;
1427
1428		goto retry_setup;
1429	}
1430
1431	pblk_rl_free_lines_dec(&pblk->rl, line, true);
1432
1433	return line;
1434}
1435
1436void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
1437{
1438	struct pblk_line *line;
1439
1440	line = pblk_ppa_to_line(pblk, ppa);
1441	kref_put(&line->ref, pblk_line_put_wq);
1442}
1443
1444void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
1445{
1446	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
1447	int i;
1448
1449	for (i = 0; i < rqd->nr_ppas; i++)
1450		pblk_ppa_to_line_put(pblk, ppa_list[i]);
1451}
1452
1453static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1454{
1455	lockdep_assert_held(&pblk->l_mg.free_lock);
1456
1457	pblk_set_space_limit(pblk);
1458	pblk->state = PBLK_STATE_STOPPING;
1459	trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1460}
1461
1462static void pblk_line_close_meta_sync(struct pblk *pblk)
1463{
1464	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1465	struct pblk_line_meta *lm = &pblk->lm;
1466	struct pblk_line *line, *tline;
1467	LIST_HEAD(list);
1468
1469	spin_lock(&l_mg->close_lock);
1470	if (list_empty(&l_mg->emeta_list)) {
1471		spin_unlock(&l_mg->close_lock);
1472		return;
1473	}
1474
1475	list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1476	spin_unlock(&l_mg->close_lock);
1477
1478	list_for_each_entry_safe(line, tline, &list, list) {
1479		struct pblk_emeta *emeta = line->emeta;
1480
1481		while (emeta->mem < lm->emeta_len[0]) {
1482			int ret;
1483
1484			ret = pblk_submit_meta_io(pblk, line);
1485			if (ret) {
1486				pblk_err(pblk, "sync meta line %d failed (%d)\n",
1487							line->id, ret);
1488				return;
1489			}
1490		}
1491	}
1492
1493	pblk_wait_for_meta(pblk);
1494	flush_workqueue(pblk->close_wq);
1495}
1496
1497void __pblk_pipeline_flush(struct pblk *pblk)
1498{
1499	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1500	int ret;
1501
1502	spin_lock(&l_mg->free_lock);
1503	if (pblk->state == PBLK_STATE_RECOVERING ||
1504					pblk->state == PBLK_STATE_STOPPED) {
1505		spin_unlock(&l_mg->free_lock);
1506		return;
1507	}
1508	pblk->state = PBLK_STATE_RECOVERING;
1509	trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1510	spin_unlock(&l_mg->free_lock);
1511
1512	pblk_flush_writer(pblk);
1513	pblk_wait_for_meta(pblk);
1514
1515	ret = pblk_recov_pad(pblk);
1516	if (ret) {
1517		pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
1518		return;
1519	}
1520
1521	flush_workqueue(pblk->bb_wq);
1522	pblk_line_close_meta_sync(pblk);
1523}
1524
1525void __pblk_pipeline_stop(struct pblk *pblk)
1526{
1527	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1528
1529	spin_lock(&l_mg->free_lock);
1530	pblk->state = PBLK_STATE_STOPPED;
1531	trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1532	l_mg->data_line = NULL;
1533	l_mg->data_next = NULL;
1534	spin_unlock(&l_mg->free_lock);
1535}
1536
1537void pblk_pipeline_stop(struct pblk *pblk)
1538{
1539	__pblk_pipeline_flush(pblk);
1540	__pblk_pipeline_stop(pblk);
1541}
1542
1543struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1544{
1545	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1546	struct pblk_line *cur, *new = NULL;
1547	unsigned int left_seblks;
1548
 
1549	new = l_mg->data_next;
1550	if (!new)
1551		goto out;
1552
1553	spin_lock(&l_mg->free_lock);
1554	cur = l_mg->data_line;
1555	l_mg->data_line = new;
1556
 
1557	pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1558	spin_unlock(&l_mg->free_lock);
1559
1560retry_erase:
1561	left_seblks = atomic_read(&new->left_seblks);
1562	if (left_seblks) {
1563		/* If line is not fully erased, erase it */
1564		if (atomic_read(&new->left_eblks)) {
1565			if (pblk_line_erase(pblk, new))
1566				goto out;
1567		} else {
1568			io_schedule();
1569		}
1570		goto retry_erase;
1571	}
1572
1573	if (pblk_line_alloc_bitmaps(pblk, new))
1574		return NULL;
1575
1576retry_setup:
1577	if (!pblk_line_init_metadata(pblk, new, cur)) {
1578		new = pblk_line_retry(pblk, new);
1579		if (!new)
1580			goto out;
1581
1582		goto retry_setup;
1583	}
1584
1585	if (!pblk_line_init_bb(pblk, new, 1)) {
1586		new = pblk_line_retry(pblk, new);
1587		if (!new)
1588			goto out;
1589
1590		goto retry_setup;
1591	}
1592
1593	pblk_rl_free_lines_dec(&pblk->rl, new, true);
1594
1595	/* Allocate next line for preparation */
1596	spin_lock(&l_mg->free_lock);
1597	l_mg->data_next = pblk_line_get(pblk);
1598	if (!l_mg->data_next) {
1599		/* If we cannot get a new line, we need to stop the pipeline.
1600		 * Only allow as many writes in as we can store safely and then
1601		 * fail gracefully
1602		 */
1603		pblk_stop_writes(pblk, new);
1604		l_mg->data_next = NULL;
1605	} else {
1606		l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1607		l_mg->data_next->type = PBLK_LINETYPE_DATA;
1608	}
1609	spin_unlock(&l_mg->free_lock);
1610
1611out:
1612	return new;
1613}
1614
 
 
 
 
 
 
 
 
 
 
 
 
 
1615static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
1616{
1617	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1618	struct pblk_gc *gc = &pblk->gc;
1619
1620	spin_lock(&line->lock);
1621	WARN_ON(line->state != PBLK_LINESTATE_GC);
1622	if (line->w_err_gc->has_gc_err) {
1623		spin_unlock(&line->lock);
1624		pblk_err(pblk, "line %d had errors during GC\n", line->id);
1625		pblk_put_line_back(pblk, line);
1626		line->w_err_gc->has_gc_err = 0;
1627		return;
1628	}
1629
1630	line->state = PBLK_LINESTATE_FREE;
1631	trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1632					line->state);
1633	line->gc_group = PBLK_LINEGC_NONE;
1634	pblk_line_free(line);
1635
1636	if (line->w_err_gc->has_write_err) {
1637		pblk_rl_werr_line_out(&pblk->rl);
1638		line->w_err_gc->has_write_err = 0;
1639	}
1640
1641	spin_unlock(&line->lock);
 
1642	atomic_dec(&gc->pipeline_gc);
1643
1644	spin_lock(&l_mg->free_lock);
1645	list_add_tail(&line->list, &l_mg->free_list);
1646	l_mg->nr_free_lines++;
1647	spin_unlock(&l_mg->free_lock);
1648
1649	pblk_rl_free_lines_inc(&pblk->rl, line);
1650}
1651
1652static void pblk_line_put_ws(struct work_struct *work)
1653{
1654	struct pblk_line_ws *line_put_ws = container_of(work,
1655						struct pblk_line_ws, ws);
1656	struct pblk *pblk = line_put_ws->pblk;
1657	struct pblk_line *line = line_put_ws->line;
1658
1659	__pblk_line_put(pblk, line);
1660	mempool_free(line_put_ws, &pblk->gen_ws_pool);
1661}
1662
1663void pblk_line_put(struct kref *ref)
1664{
1665	struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1666	struct pblk *pblk = line->pblk;
1667
1668	__pblk_line_put(pblk, line);
1669}
1670
1671void pblk_line_put_wq(struct kref *ref)
1672{
1673	struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1674	struct pblk *pblk = line->pblk;
1675	struct pblk_line_ws *line_put_ws;
1676
1677	line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
1678	if (!line_put_ws)
1679		return;
1680
1681	line_put_ws->pblk = pblk;
1682	line_put_ws->line = line;
1683	line_put_ws->priv = NULL;
1684
1685	INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
1686	queue_work(pblk->r_end_wq, &line_put_ws->ws);
1687}
1688
1689int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1690{
1691	struct nvm_rq *rqd;
1692	int err;
1693
1694	rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
1695
1696	pblk_setup_e_rq(pblk, rqd, ppa);
1697
1698	rqd->end_io = pblk_end_io_erase;
1699	rqd->private = pblk;
1700
1701	trace_pblk_chunk_reset(pblk_disk_name(pblk),
1702				&ppa, PBLK_CHUNK_RESET_START);
1703
1704	/* The write thread schedules erases so that it minimizes disturbances
1705	 * with writes. Thus, there is no need to take the LUN semaphore.
1706	 */
1707	err = pblk_submit_io(pblk, rqd, NULL);
1708	if (err) {
1709		struct nvm_tgt_dev *dev = pblk->dev;
1710		struct nvm_geo *geo = &dev->geo;
1711
1712		pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
1713					pblk_ppa_to_line_id(ppa),
1714					pblk_ppa_to_pos(geo, ppa));
1715	}
1716
1717	return err;
1718}
1719
1720struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1721{
1722	return pblk->l_mg.data_line;
1723}
1724
1725/* For now, always erase next line */
1726struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1727{
1728	return pblk->l_mg.data_next;
1729}
1730
1731int pblk_line_is_full(struct pblk_line *line)
1732{
1733	return (line->left_msecs == 0);
1734}
1735
1736static void pblk_line_should_sync_meta(struct pblk *pblk)
1737{
1738	if (pblk_rl_is_limit(&pblk->rl))
1739		pblk_line_close_meta_sync(pblk);
1740}
1741
1742void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1743{
1744	struct nvm_tgt_dev *dev = pblk->dev;
1745	struct nvm_geo *geo = &dev->geo;
1746	struct pblk_line_meta *lm = &pblk->lm;
1747	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1748	struct list_head *move_list;
1749	int i;
1750
1751#ifdef CONFIG_NVM_PBLK_DEBUG
1752	WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1753				"pblk: corrupt closed line %d\n", line->id);
1754#endif
1755
1756	spin_lock(&l_mg->free_lock);
1757	WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1758	spin_unlock(&l_mg->free_lock);
1759
1760	spin_lock(&l_mg->gc_lock);
1761	spin_lock(&line->lock);
1762	WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1763	line->state = PBLK_LINESTATE_CLOSED;
1764	move_list = pblk_line_gc_list(pblk, line);
 
1765	list_add_tail(&line->list, move_list);
1766
1767	mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1768	line->map_bitmap = NULL;
1769	line->smeta = NULL;
1770	line->emeta = NULL;
1771
1772	for (i = 0; i < lm->blk_per_line; i++) {
1773		struct pblk_lun *rlun = &pblk->luns[i];
1774		int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1775		int state = line->chks[pos].state;
1776
1777		if (!(state & NVM_CHK_ST_OFFLINE))
1778			state = NVM_CHK_ST_CLOSED;
1779	}
1780
1781	spin_unlock(&line->lock);
1782	spin_unlock(&l_mg->gc_lock);
1783
1784	trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1785					line->state);
1786}
1787
1788void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1789{
1790	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1791	struct pblk_line_meta *lm = &pblk->lm;
1792	struct pblk_emeta *emeta = line->emeta;
1793	struct line_emeta *emeta_buf = emeta->buf;
1794	struct wa_counters *wa = emeta_to_wa(lm, emeta_buf);
1795
1796	/* No need for exact vsc value; avoid a big line lock and take aprox. */
1797	memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1798	memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1799
1800	wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
1801	wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
1802	wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
1803
1804	if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) {
1805		emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1806		guid_copy((guid_t *)&emeta_buf->header.uuid,
1807							&pblk->instance_uuid);
1808		emeta_buf->header.id = cpu_to_le32(line->id);
1809		emeta_buf->header.type = cpu_to_le16(line->type);
1810		emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1811		emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1812		emeta_buf->header.crc = cpu_to_le32(
1813			pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1814	}
1815
1816	emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1817	emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1818
1819	spin_lock(&l_mg->close_lock);
1820	spin_lock(&line->lock);
1821
1822	/* Update the in-memory start address for emeta, in case it has
1823	 * shifted due to write errors
1824	 */
1825	if (line->emeta_ssec != line->cur_sec)
1826		line->emeta_ssec = line->cur_sec;
1827
1828	list_add_tail(&line->list, &l_mg->emeta_list);
1829	spin_unlock(&line->lock);
1830	spin_unlock(&l_mg->close_lock);
1831
1832	pblk_line_should_sync_meta(pblk);
1833}
1834
1835static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
1836{
1837	struct pblk_line_meta *lm = &pblk->lm;
1838	unsigned int lba_list_size = lm->emeta_len[2];
1839	struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1840	struct pblk_emeta *emeta = line->emeta;
1841
1842	w_err_gc->lba_list = kvmalloc(lba_list_size, GFP_KERNEL);
1843	memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
1844				lba_list_size);
1845}
1846
1847void pblk_line_close_ws(struct work_struct *work)
1848{
1849	struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1850									ws);
1851	struct pblk *pblk = line_ws->pblk;
1852	struct pblk_line *line = line_ws->line;
1853	struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1854
1855	/* Write errors makes the emeta start address stored in smeta invalid,
1856	 * so keep a copy of the lba list until we've gc'd the line
1857	 */
1858	if (w_err_gc->has_write_err)
1859		pblk_save_lba_list(pblk, line);
1860
1861	pblk_line_close(pblk, line);
1862	mempool_free(line_ws, &pblk->gen_ws_pool);
1863}
1864
1865void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1866		      void (*work)(struct work_struct *), gfp_t gfp_mask,
1867		      struct workqueue_struct *wq)
1868{
1869	struct pblk_line_ws *line_ws;
1870
1871	line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
1872
1873	line_ws->pblk = pblk;
1874	line_ws->line = line;
1875	line_ws->priv = priv;
1876
1877	INIT_WORK(&line_ws->ws, work);
1878	queue_work(wq, &line_ws->ws);
1879}
1880
1881static void __pblk_down_chunk(struct pblk *pblk, int pos)
 
1882{
1883	struct pblk_lun *rlun = &pblk->luns[pos];
1884	int ret;
1885
1886	/*
1887	 * Only send one inflight I/O per LUN. Since we map at a page
1888	 * granurality, all ppas in the I/O will map to the same LUN
1889	 */
 
 
 
 
 
 
 
1890
1891	ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
1892	if (ret == -ETIME || ret == -EINTR)
1893		pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
1894				-ret);
1895}
1896
1897void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa)
1898{
1899	struct nvm_tgt_dev *dev = pblk->dev;
1900	struct nvm_geo *geo = &dev->geo;
1901	int pos = pblk_ppa_to_pos(geo, ppa);
1902
1903	__pblk_down_chunk(pblk, pos);
1904}
1905
1906void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
1907		  unsigned long *lun_bitmap)
1908{
1909	struct nvm_tgt_dev *dev = pblk->dev;
1910	struct nvm_geo *geo = &dev->geo;
1911	int pos = pblk_ppa_to_pos(geo, ppa);
1912
1913	/* If the LUN has been locked for this same request, do no attempt to
1914	 * lock it again
1915	 */
1916	if (test_and_set_bit(pos, lun_bitmap))
1917		return;
1918
1919	__pblk_down_chunk(pblk, pos);
1920}
1921
1922void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa)
1923{
1924	struct nvm_tgt_dev *dev = pblk->dev;
1925	struct nvm_geo *geo = &dev->geo;
1926	struct pblk_lun *rlun;
1927	int pos = pblk_ppa_to_pos(geo, ppa);
 
 
 
 
 
 
 
 
1928
1929	rlun = &pblk->luns[pos];
1930	up(&rlun->wr_sem);
1931}
1932
1933void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap)
 
1934{
1935	struct nvm_tgt_dev *dev = pblk->dev;
1936	struct nvm_geo *geo = &dev->geo;
1937	struct pblk_lun *rlun;
1938	int num_lun = geo->all_luns;
1939	int bit = -1;
1940
1941	while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) {
1942		rlun = &pblk->luns[bit];
1943		up(&rlun->wr_sem);
1944	}
1945}
1946
1947void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1948{
1949	struct ppa_addr ppa_l2p;
1950
1951	/* logic error: lba out-of-bounds. Ignore update */
1952	if (!(lba < pblk->capacity)) {
1953		WARN(1, "pblk: corrupted L2P map request\n");
1954		return;
1955	}
1956
1957	spin_lock(&pblk->trans_lock);
1958	ppa_l2p = pblk_trans_map_get(pblk, lba);
1959
1960	if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
1961		pblk_map_invalidate(pblk, ppa_l2p);
1962
1963	pblk_trans_map_set(pblk, lba, ppa);
1964	spin_unlock(&pblk->trans_lock);
1965}
1966
1967void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1968{
1969
1970#ifdef CONFIG_NVM_PBLK_DEBUG
1971	/* Callers must ensure that the ppa points to a cache address */
1972	BUG_ON(!pblk_addr_in_cache(ppa));
1973	BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1974#endif
1975
1976	pblk_update_map(pblk, lba, ppa);
1977}
1978
1979int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
1980		       struct pblk_line *gc_line, u64 paddr_gc)
1981{
1982	struct ppa_addr ppa_l2p, ppa_gc;
1983	int ret = 1;
1984
1985#ifdef CONFIG_NVM_PBLK_DEBUG
1986	/* Callers must ensure that the ppa points to a cache address */
1987	BUG_ON(!pblk_addr_in_cache(ppa_new));
1988	BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
1989#endif
1990
1991	/* logic error: lba out-of-bounds. Ignore update */
1992	if (!(lba < pblk->capacity)) {
1993		WARN(1, "pblk: corrupted L2P map request\n");
1994		return 0;
1995	}
1996
1997	spin_lock(&pblk->trans_lock);
1998	ppa_l2p = pblk_trans_map_get(pblk, lba);
1999	ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
2000
2001	if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
2002		spin_lock(&gc_line->lock);
2003		WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
2004						"pblk: corrupted GC update");
2005		spin_unlock(&gc_line->lock);
2006
2007		ret = 0;
2008		goto out;
2009	}
2010
2011	pblk_trans_map_set(pblk, lba, ppa_new);
2012out:
2013	spin_unlock(&pblk->trans_lock);
2014	return ret;
2015}
2016
2017void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
2018			 struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
2019{
2020	struct ppa_addr ppa_l2p;
2021
2022#ifdef CONFIG_NVM_PBLK_DEBUG
2023	/* Callers must ensure that the ppa points to a device address */
2024	BUG_ON(pblk_addr_in_cache(ppa_mapped));
2025#endif
2026	/* Invalidate and discard padded entries */
2027	if (lba == ADDR_EMPTY) {
2028		atomic64_inc(&pblk->pad_wa);
2029#ifdef CONFIG_NVM_PBLK_DEBUG
2030		atomic_long_inc(&pblk->padded_wb);
2031#endif
2032		if (!pblk_ppa_empty(ppa_mapped))
2033			pblk_map_invalidate(pblk, ppa_mapped);
2034		return;
2035	}
2036
2037	/* logic error: lba out-of-bounds. Ignore update */
2038	if (!(lba < pblk->capacity)) {
2039		WARN(1, "pblk: corrupted L2P map request\n");
2040		return;
2041	}
2042
2043	spin_lock(&pblk->trans_lock);
2044	ppa_l2p = pblk_trans_map_get(pblk, lba);
2045
2046	/* Do not update L2P if the cacheline has been updated. In this case,
2047	 * the mapped ppa must be invalidated
2048	 */
2049	if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
2050		if (!pblk_ppa_empty(ppa_mapped))
2051			pblk_map_invalidate(pblk, ppa_mapped);
2052		goto out;
2053	}
2054
2055#ifdef CONFIG_NVM_PBLK_DEBUG
2056	WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
2057#endif
2058
2059	pblk_trans_map_set(pblk, lba, ppa_mapped);
2060out:
2061	spin_unlock(&pblk->trans_lock);
2062}
2063
2064int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
2065			 sector_t blba, int nr_secs, bool *from_cache)
2066{
2067	int i;
2068
2069	spin_lock(&pblk->trans_lock);
2070	for (i = 0; i < nr_secs; i++) {
2071		struct ppa_addr ppa;
2072
2073		ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
2074
2075		/* If the L2P entry maps to a line, the reference is valid */
2076		if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
2077			struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);
2078
2079			if (i > 0 && *from_cache)
2080				break;
2081			*from_cache = false;
2082
2083			kref_get(&line->ref);
2084		} else {
2085			if (i > 0 && !*from_cache)
2086				break;
2087			*from_cache = true;
2088		}
2089	}
2090	spin_unlock(&pblk->trans_lock);
2091	return i;
2092}
2093
2094void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
2095			  u64 *lba_list, int nr_secs)
2096{
2097	u64 lba;
2098	int i;
2099
2100	spin_lock(&pblk->trans_lock);
2101	for (i = 0; i < nr_secs; i++) {
2102		lba = lba_list[i];
2103		if (lba != ADDR_EMPTY) {
2104			/* logic error: lba out-of-bounds. Ignore update */
2105			if (!(lba < pblk->capacity)) {
2106				WARN(1, "pblk: corrupted L2P map request\n");
2107				continue;
2108			}
2109			ppas[i] = pblk_trans_map_get(pblk, lba);
2110		}
2111	}
2112	spin_unlock(&pblk->trans_lock);
2113}
2114
2115void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd)
2116{
2117	void *buffer;
2118
2119	if (pblk_is_oob_meta_supported(pblk)) {
2120		/* Just use OOB metadata buffer as always */
2121		buffer = rqd->meta_list;
2122	} else {
2123		/* We need to reuse last page of request (packed metadata)
2124		 * in similar way as traditional oob metadata
2125		 */
2126		buffer = page_to_virt(
2127			rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
2128	}
2129
2130	return buffer;
2131}
2132
2133void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
2134{
2135	void *meta_list = rqd->meta_list;
2136	void *page;
2137	int i = 0;
2138
2139	if (pblk_is_oob_meta_supported(pblk))
2140		return;
2141
2142	page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
2143	/* We need to fill oob meta buffer with data from packed metadata */
2144	for (; i < rqd->nr_ppas; i++)
2145		memcpy(pblk_get_meta(pblk, meta_list, i),
2146			page + (i * sizeof(struct pblk_sec_meta)),
2147			sizeof(struct pblk_sec_meta));
2148}
v4.17
 
   1/*
   2 * Copyright (C) 2016 CNEX Labs
   3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
   4 *                  Matias Bjorling <matias@cnexlabs.com>
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License version
   8 * 2 as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License for more details.
  14 *
  15 * pblk-core.c - pblk's core functionality
  16 *
  17 */
  18
 
 
  19#include "pblk.h"
 
  20
  21static void pblk_line_mark_bb(struct work_struct *work)
  22{
  23	struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
  24									ws);
  25	struct pblk *pblk = line_ws->pblk;
  26	struct nvm_tgt_dev *dev = pblk->dev;
  27	struct ppa_addr *ppa = line_ws->priv;
  28	int ret;
  29
  30	ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
  31	if (ret) {
  32		struct pblk_line *line;
  33		int pos;
  34
  35		line = &pblk->lines[pblk_ppa_to_line(*ppa)];
  36		pos = pblk_ppa_to_pos(&dev->geo, *ppa);
  37
  38		pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
  39				line->id, pos);
  40	}
  41
  42	kfree(ppa);
  43	mempool_free(line_ws, pblk->gen_ws_pool);
  44}
  45
  46static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
  47			 struct ppa_addr ppa_addr)
  48{
  49	struct nvm_tgt_dev *dev = pblk->dev;
  50	struct nvm_geo *geo = &dev->geo;
  51	struct ppa_addr *ppa;
  52	int pos = pblk_ppa_to_pos(geo, ppa_addr);
  53
  54	pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
  55	atomic_long_inc(&pblk->erase_failed);
  56
  57	atomic_dec(&line->blk_in_line);
  58	if (test_and_set_bit(pos, line->blk_bitmap))
  59		pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
  60							line->id, pos);
  61
  62	/* Not necessary to mark bad blocks on 2.0 spec. */
  63	if (geo->version == NVM_OCSSD_SPEC_20)
  64		return;
  65
  66	ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
  67	if (!ppa)
  68		return;
  69
  70	*ppa = ppa_addr;
  71	pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
  72						GFP_ATOMIC, pblk->bb_wq);
  73}
  74
  75static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
  76{
  77	struct nvm_tgt_dev *dev = pblk->dev;
  78	struct nvm_geo *geo = &dev->geo;
  79	struct nvm_chk_meta *chunk;
  80	struct pblk_line *line;
  81	int pos;
  82
  83	line = &pblk->lines[pblk_ppa_to_line(rqd->ppa_addr)];
  84	pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
  85	chunk = &line->chks[pos];
  86
  87	atomic_dec(&line->left_seblks);
  88
  89	if (rqd->error) {
 
 
 
  90		chunk->state = NVM_CHK_ST_OFFLINE;
  91		pblk_mark_bb(pblk, line, rqd->ppa_addr);
  92	} else {
 
 
 
  93		chunk->state = NVM_CHK_ST_FREE;
  94	}
  95
 
 
 
  96	atomic_dec(&pblk->inflight_io);
  97}
  98
  99/* Erase completion assumes that only one block is erased at the time */
 100static void pblk_end_io_erase(struct nvm_rq *rqd)
 101{
 102	struct pblk *pblk = rqd->private;
 103
 104	__pblk_end_io_erase(pblk, rqd);
 105	mempool_free(rqd, pblk->e_rq_pool);
 106}
 107
 108/*
 109 * Get information for all chunks from the device.
 110 *
 111 * The caller is responsible for freeing the returned structure
 112 */
 113struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk)
 114{
 115	struct nvm_tgt_dev *dev = pblk->dev;
 116	struct nvm_geo *geo = &dev->geo;
 117	struct nvm_chk_meta *meta;
 118	struct ppa_addr ppa;
 119	unsigned long len;
 120	int ret;
 121
 122	ppa.ppa = 0;
 123
 124	len = geo->all_chunks * sizeof(*meta);
 125	meta = kzalloc(len, GFP_KERNEL);
 126	if (!meta)
 127		return ERR_PTR(-ENOMEM);
 128
 129	ret = nvm_get_chunk_meta(dev, meta, ppa, geo->all_chunks);
 130	if (ret) {
 131		kfree(meta);
 132		return ERR_PTR(-EIO);
 133	}
 134
 135	return meta;
 136}
 137
 138struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
 139					      struct nvm_chk_meta *meta,
 140					      struct ppa_addr ppa)
 141{
 142	struct nvm_tgt_dev *dev = pblk->dev;
 143	struct nvm_geo *geo = &dev->geo;
 144	int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
 145	int lun_off = ppa.m.pu * geo->num_chk;
 146	int chk_off = ppa.m.chk;
 147
 148	return meta + ch_off + lun_off + chk_off;
 149}
 150
 151void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
 152			   u64 paddr)
 153{
 154	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 155	struct list_head *move_list = NULL;
 156
 157	/* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
 158	 * table is modified with reclaimed sectors, a check is done to endure
 159	 * that newer updates are not overwritten.
 160	 */
 161	spin_lock(&line->lock);
 162	WARN_ON(line->state == PBLK_LINESTATE_FREE);
 163
 164	if (test_and_set_bit(paddr, line->invalid_bitmap)) {
 165		WARN_ONCE(1, "pblk: double invalidate\n");
 166		spin_unlock(&line->lock);
 167		return;
 168	}
 169	le32_add_cpu(line->vsc, -1);
 170
 171	if (line->state == PBLK_LINESTATE_CLOSED)
 172		move_list = pblk_line_gc_list(pblk, line);
 173	spin_unlock(&line->lock);
 174
 175	if (move_list) {
 176		spin_lock(&l_mg->gc_lock);
 177		spin_lock(&line->lock);
 178		/* Prevent moving a line that has just been chosen for GC */
 179		if (line->state == PBLK_LINESTATE_GC) {
 180			spin_unlock(&line->lock);
 181			spin_unlock(&l_mg->gc_lock);
 182			return;
 183		}
 184		spin_unlock(&line->lock);
 185
 186		list_move_tail(&line->list, move_list);
 187		spin_unlock(&l_mg->gc_lock);
 188	}
 189}
 190
 191void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
 192{
 193	struct pblk_line *line;
 194	u64 paddr;
 195	int line_id;
 196
 197#ifdef CONFIG_NVM_DEBUG
 198	/* Callers must ensure that the ppa points to a device address */
 199	BUG_ON(pblk_addr_in_cache(ppa));
 200	BUG_ON(pblk_ppa_empty(ppa));
 201#endif
 202
 203	line_id = pblk_ppa_to_line(ppa);
 204	line = &pblk->lines[line_id];
 205	paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
 206
 207	__pblk_map_invalidate(pblk, line, paddr);
 208}
 209
 210static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
 211				  unsigned int nr_secs)
 212{
 213	sector_t lba;
 214
 215	spin_lock(&pblk->trans_lock);
 216	for (lba = slba; lba < slba + nr_secs; lba++) {
 217		struct ppa_addr ppa;
 218
 219		ppa = pblk_trans_map_get(pblk, lba);
 220
 221		if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
 222			pblk_map_invalidate(pblk, ppa);
 223
 224		pblk_ppa_set_empty(&ppa);
 225		pblk_trans_map_set(pblk, lba, ppa);
 226	}
 227	spin_unlock(&pblk->trans_lock);
 228}
 229
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 230/* Caller must guarantee that the request is a valid type */
 231struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
 232{
 233	mempool_t *pool;
 234	struct nvm_rq *rqd;
 235	int rq_size;
 236
 237	switch (type) {
 238	case PBLK_WRITE:
 239	case PBLK_WRITE_INT:
 240		pool = pblk->w_rq_pool;
 241		rq_size = pblk_w_rq_size;
 242		break;
 243	case PBLK_READ:
 244		pool = pblk->r_rq_pool;
 245		rq_size = pblk_g_rq_size;
 246		break;
 247	default:
 248		pool = pblk->e_rq_pool;
 249		rq_size = pblk_g_rq_size;
 250	}
 251
 252	rqd = mempool_alloc(pool, GFP_KERNEL);
 253	memset(rqd, 0, rq_size);
 254
 255	return rqd;
 256}
 257
 258/* Typically used on completion path. Cannot guarantee request consistency */
 259void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
 260{
 261	struct nvm_tgt_dev *dev = pblk->dev;
 262	mempool_t *pool;
 263
 264	switch (type) {
 265	case PBLK_WRITE:
 266		kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
 
 267	case PBLK_WRITE_INT:
 268		pool = pblk->w_rq_pool;
 269		break;
 270	case PBLK_READ:
 271		pool = pblk->r_rq_pool;
 272		break;
 273	case PBLK_ERASE:
 274		pool = pblk->e_rq_pool;
 275		break;
 276	default:
 277		pr_err("pblk: trying to free unknown rqd type\n");
 278		return;
 279	}
 280
 281	nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
 282	mempool_free(rqd, pool);
 283}
 284
 285void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
 286			 int nr_pages)
 287{
 288	struct bio_vec bv;
 289	int i;
 
 290
 291	WARN_ON(off + nr_pages != bio->bi_vcnt);
 292
 293	for (i = off; i < nr_pages + off; i++) {
 294		bv = bio->bi_io_vec[i];
 295		mempool_free(bv.bv_page, pblk->page_bio_pool);
 
 296	}
 297}
 298
 299int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
 300		       int nr_pages)
 301{
 302	struct request_queue *q = pblk->dev->q;
 303	struct page *page;
 304	int i, ret;
 305
 306	for (i = 0; i < nr_pages; i++) {
 307		page = mempool_alloc(pblk->page_bio_pool, flags);
 308
 309		ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
 310		if (ret != PBLK_EXPOSED_PAGE_SIZE) {
 311			pr_err("pblk: could not add page to bio\n");
 312			mempool_free(page, pblk->page_bio_pool);
 313			goto err;
 314		}
 315	}
 316
 317	return 0;
 318err:
 319	pblk_bio_free_pages(pblk, bio, 0, i - 1);
 320	return -1;
 321}
 322
 323static void pblk_write_kick(struct pblk *pblk)
 324{
 325	wake_up_process(pblk->writer_ts);
 326	mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
 327}
 328
 329void pblk_write_timer_fn(struct timer_list *t)
 330{
 331	struct pblk *pblk = from_timer(pblk, t, wtimer);
 332
 333	/* kick the write thread every tick to flush outstanding data */
 334	pblk_write_kick(pblk);
 335}
 336
 337void pblk_write_should_kick(struct pblk *pblk)
 338{
 339	unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
 340
 341	if (secs_avail >= pblk->min_write_pgs)
 342		pblk_write_kick(pblk);
 343}
 344
 345void pblk_end_io_sync(struct nvm_rq *rqd)
 346{
 347	struct completion *waiting = rqd->private;
 348
 349	complete(waiting);
 350}
 351
 352static void pblk_wait_for_meta(struct pblk *pblk)
 353{
 354	do {
 355		if (!atomic_read(&pblk->inflight_io))
 356			break;
 357
 358		schedule();
 359	} while (1);
 360}
 361
 362static void pblk_flush_writer(struct pblk *pblk)
 363{
 364	pblk_rb_flush(&pblk->rwb);
 365	do {
 366		if (!pblk_rb_sync_count(&pblk->rwb))
 367			break;
 368
 369		pblk_write_kick(pblk);
 370		schedule();
 371	} while (1);
 372}
 373
 374struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
 375{
 376	struct pblk_line_meta *lm = &pblk->lm;
 377	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 378	struct list_head *move_list = NULL;
 379	int vsc = le32_to_cpu(*line->vsc);
 
 
 380
 381	lockdep_assert_held(&line->lock);
 382
 383	if (!vsc) {
 
 
 
 
 
 
 384		if (line->gc_group != PBLK_LINEGC_FULL) {
 385			line->gc_group = PBLK_LINEGC_FULL;
 386			move_list = &l_mg->gc_full_list;
 387		}
 388	} else if (vsc < lm->high_thrs) {
 389		if (line->gc_group != PBLK_LINEGC_HIGH) {
 390			line->gc_group = PBLK_LINEGC_HIGH;
 391			move_list = &l_mg->gc_high_list;
 392		}
 393	} else if (vsc < lm->mid_thrs) {
 394		if (line->gc_group != PBLK_LINEGC_MID) {
 395			line->gc_group = PBLK_LINEGC_MID;
 396			move_list = &l_mg->gc_mid_list;
 397		}
 398	} else if (vsc < line->sec_in_line) {
 399		if (line->gc_group != PBLK_LINEGC_LOW) {
 400			line->gc_group = PBLK_LINEGC_LOW;
 401			move_list = &l_mg->gc_low_list;
 402		}
 403	} else if (vsc == line->sec_in_line) {
 404		if (line->gc_group != PBLK_LINEGC_EMPTY) {
 405			line->gc_group = PBLK_LINEGC_EMPTY;
 406			move_list = &l_mg->gc_empty_list;
 407		}
 408	} else {
 409		line->state = PBLK_LINESTATE_CORRUPT;
 
 
 
 410		line->gc_group = PBLK_LINEGC_NONE;
 411		move_list =  &l_mg->corrupt_list;
 412		pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
 413						line->id, vsc,
 414						line->sec_in_line,
 415						lm->high_thrs, lm->mid_thrs);
 416	}
 417
 418	return move_list;
 419}
 420
 421void pblk_discard(struct pblk *pblk, struct bio *bio)
 422{
 423	sector_t slba = pblk_get_lba(bio);
 424	sector_t nr_secs = pblk_get_secs(bio);
 425
 426	pblk_invalidate_range(pblk, slba, nr_secs);
 427}
 428
 429void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
 430{
 431	atomic_long_inc(&pblk->write_failed);
 432#ifdef CONFIG_NVM_DEBUG
 433	pblk_print_failed_rqd(pblk, rqd, rqd->error);
 434#endif
 435}
 436
 437void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
 438{
 439	/* Empty page read is not necessarily an error (e.g., L2P recovery) */
 440	if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
 441		atomic_long_inc(&pblk->read_empty);
 442		return;
 443	}
 444
 445	switch (rqd->error) {
 446	case NVM_RSP_WARN_HIGHECC:
 447		atomic_long_inc(&pblk->read_high_ecc);
 448		break;
 449	case NVM_RSP_ERR_FAILECC:
 450	case NVM_RSP_ERR_FAILCRC:
 451		atomic_long_inc(&pblk->read_failed);
 452		break;
 453	default:
 454		pr_err("pblk: unknown read error:%d\n", rqd->error);
 455	}
 456#ifdef CONFIG_NVM_DEBUG
 457	pblk_print_failed_rqd(pblk, rqd, rqd->error);
 458#endif
 459}
 460
 461void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
 462{
 463	pblk->sec_per_write = sec_per_write;
 464}
 465
 466int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
 467{
 468	struct nvm_tgt_dev *dev = pblk->dev;
 469
 470#ifdef CONFIG_NVM_DEBUG
 471	int ret;
 472
 473	ret = pblk_check_io(pblk, rqd);
 474	if (ret)
 475		return ret;
 476#endif
 477
 478	atomic_inc(&pblk->inflight_io);
 
 
 
 
 
 
 
 
 
 
 
 
 479
 480	return nvm_submit_io(dev, rqd);
 
 
 
 
 
 
 481}
 482
 483int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
 484{
 485	struct nvm_tgt_dev *dev = pblk->dev;
 
 486
 487#ifdef CONFIG_NVM_DEBUG
 488	int ret;
 489
 490	ret = pblk_check_io(pblk, rqd);
 491	if (ret)
 492		return ret;
 493#endif
 494
 495	atomic_inc(&pblk->inflight_io);
 496
 497	return nvm_submit_io_sync(dev, rqd);
 498}
 
 499
 500static void pblk_bio_map_addr_endio(struct bio *bio)
 501{
 502	bio_put(bio);
 503}
 504
 505struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
 506			      unsigned int nr_secs, unsigned int len,
 507			      int alloc_type, gfp_t gfp_mask)
 508{
 509	struct nvm_tgt_dev *dev = pblk->dev;
 510	void *kaddr = data;
 511	struct page *page;
 512	struct bio *bio;
 513	int i, ret;
 514
 515	if (alloc_type == PBLK_KMALLOC_META)
 516		return bio_map_kern(dev->q, kaddr, len, gfp_mask);
 
 517
 518	bio = bio_kmalloc(gfp_mask, nr_secs);
 519	if (!bio)
 520		return ERR_PTR(-ENOMEM);
 521
 522	for (i = 0; i < nr_secs; i++) {
 523		page = vmalloc_to_page(kaddr);
 524		if (!page) {
 525			pr_err("pblk: could not map vmalloc bio\n");
 526			bio_put(bio);
 527			bio = ERR_PTR(-ENOMEM);
 528			goto out;
 529		}
 530
 531		ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
 532		if (ret != PAGE_SIZE) {
 533			pr_err("pblk: could not add page to bio\n");
 534			bio_put(bio);
 535			bio = ERR_PTR(-ENOMEM);
 536			goto out;
 537		}
 538
 539		kaddr += PAGE_SIZE;
 540	}
 541
 542	bio->bi_end_io = pblk_bio_map_addr_endio;
 543out:
 544	return bio;
 545}
 546
 547int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
 548		   unsigned long secs_to_flush)
 549{
 550	int max = pblk->sec_per_write;
 551	int min = pblk->min_write_pgs;
 552	int secs_to_sync = 0;
 553
 
 
 
 554	if (secs_avail >= max)
 555		secs_to_sync = max;
 556	else if (secs_avail >= min)
 557		secs_to_sync = min * (secs_avail / min);
 558	else if (secs_to_flush)
 559		secs_to_sync = min;
 560
 561	return secs_to_sync;
 562}
 563
 564void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
 565{
 566	u64 addr;
 567	int i;
 568
 569	spin_lock(&line->lock);
 570	addr = find_next_zero_bit(line->map_bitmap,
 571					pblk->lm.sec_per_line, line->cur_sec);
 572	line->cur_sec = addr - nr_secs;
 573
 574	for (i = 0; i < nr_secs; i++, line->cur_sec--)
 575		WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
 576	spin_unlock(&line->lock);
 577}
 578
 579u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
 580{
 581	u64 addr;
 582	int i;
 583
 584	lockdep_assert_held(&line->lock);
 585
 586	/* logic error: ppa out-of-bounds. Prevent generating bad address */
 587	if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
 588		WARN(1, "pblk: page allocation out of bounds\n");
 589		nr_secs = pblk->lm.sec_per_line - line->cur_sec;
 590	}
 591
 592	line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
 593					pblk->lm.sec_per_line, line->cur_sec);
 594	for (i = 0; i < nr_secs; i++, line->cur_sec++)
 595		WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
 596
 597	return addr;
 598}
 599
 600u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
 601{
 602	u64 addr;
 603
 604	/* Lock needed in case a write fails and a recovery needs to remap
 605	 * failed write buffer entries
 606	 */
 607	spin_lock(&line->lock);
 608	addr = __pblk_alloc_page(pblk, line, nr_secs);
 609	line->left_msecs -= nr_secs;
 610	WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
 611	spin_unlock(&line->lock);
 612
 613	return addr;
 614}
 615
 616u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
 617{
 618	u64 paddr;
 619
 620	spin_lock(&line->lock);
 621	paddr = find_next_zero_bit(line->map_bitmap,
 622					pblk->lm.sec_per_line, line->cur_sec);
 623	spin_unlock(&line->lock);
 624
 625	return paddr;
 626}
 627
 628/*
 629 * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
 630 * taking the per LUN semaphore.
 631 */
 632static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
 633				     void *emeta_buf, u64 paddr, int dir)
 634{
 635	struct nvm_tgt_dev *dev = pblk->dev;
 636	struct nvm_geo *geo = &dev->geo;
 637	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 638	struct pblk_line_meta *lm = &pblk->lm;
 639	void *ppa_list, *meta_list;
 640	struct bio *bio;
 
 
 
 
 
 
 
 
 
 
 
 
 641	struct nvm_rq rqd;
 642	dma_addr_t dma_ppa_list, dma_meta_list;
 643	int min = pblk->min_write_pgs;
 644	int left_ppas = lm->emeta_sec[0];
 645	int id = line->id;
 646	int rq_ppas, rq_len;
 647	int cmd_op, bio_op;
 648	int i, j;
 649	int ret;
 650
 651	if (dir == PBLK_WRITE) {
 652		bio_op = REQ_OP_WRITE;
 653		cmd_op = NVM_OP_PWRITE;
 654	} else if (dir == PBLK_READ) {
 655		bio_op = REQ_OP_READ;
 656		cmd_op = NVM_OP_PREAD;
 657	} else
 658		return -EINVAL;
 659
 660	meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
 661							&dma_meta_list);
 662	if (!meta_list)
 663		return -ENOMEM;
 664
 665	ppa_list = meta_list + pblk_dma_meta_size;
 666	dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
 667
 668next_rq:
 669	memset(&rqd, 0, sizeof(struct nvm_rq));
 
 
 
 670
 671	rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
 672	rq_len = rq_ppas * geo->csecs;
 673
 674	bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
 675					l_mg->emeta_alloc_type, GFP_KERNEL);
 676	if (IS_ERR(bio)) {
 677		ret = PTR_ERR(bio);
 678		goto free_rqd_dma;
 679	}
 680
 681	bio->bi_iter.bi_sector = 0; /* internal bio */
 682	bio_set_op_attrs(bio, bio_op, 0);
 
 
 683
 684	rqd.bio = bio;
 685	rqd.meta_list = meta_list;
 686	rqd.ppa_list = ppa_list;
 687	rqd.dma_meta_list = dma_meta_list;
 688	rqd.dma_ppa_list = dma_ppa_list;
 689	rqd.opcode = cmd_op;
 690	rqd.nr_ppas = rq_ppas;
 
 
 691
 692	if (dir == PBLK_WRITE) {
 693		struct pblk_sec_meta *meta_list = rqd.meta_list;
 694
 695		rqd.flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
 696		for (i = 0; i < rqd.nr_ppas; ) {
 697			spin_lock(&line->lock);
 698			paddr = __pblk_alloc_page(pblk, line, min);
 699			spin_unlock(&line->lock);
 700			for (j = 0; j < min; j++, i++, paddr++) {
 701				meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
 702				rqd.ppa_list[i] =
 703					addr_to_gen_ppa(pblk, paddr, id);
 704			}
 705		}
 706	} else {
 707		for (i = 0; i < rqd.nr_ppas; ) {
 708			struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
 709			int pos = pblk_ppa_to_pos(geo, ppa);
 710			int read_type = PBLK_READ_RANDOM;
 711
 712			if (pblk_io_aligned(pblk, rq_ppas))
 713				read_type = PBLK_READ_SEQUENTIAL;
 714			rqd.flags = pblk_set_read_mode(pblk, read_type);
 715
 716			while (test_bit(pos, line->blk_bitmap)) {
 717				paddr += min;
 718				if (pblk_boundary_paddr_checks(pblk, paddr)) {
 719					pr_err("pblk: corrupt emeta line:%d\n",
 720								line->id);
 721					bio_put(bio);
 722					ret = -EINTR;
 723					goto free_rqd_dma;
 724				}
 725
 726				ppa = addr_to_gen_ppa(pblk, paddr, id);
 727				pos = pblk_ppa_to_pos(geo, ppa);
 728			}
 
 729
 730			if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
 731				pr_err("pblk: corrupt emeta line:%d\n",
 732								line->id);
 733				bio_put(bio);
 734				ret = -EINTR;
 735				goto free_rqd_dma;
 736			}
 737
 738			for (j = 0; j < min; j++, i++, paddr++)
 739				rqd.ppa_list[i] =
 740					addr_to_gen_ppa(pblk, paddr, line->id);
 741		}
 742	}
 743
 744	ret = pblk_submit_io_sync(pblk, &rqd);
 745	if (ret) {
 746		pr_err("pblk: emeta I/O submission failed: %d\n", ret);
 747		bio_put(bio);
 748		goto free_rqd_dma;
 749	}
 750
 751	atomic_dec(&pblk->inflight_io);
 752
 753	if (rqd.error) {
 754		if (dir == PBLK_WRITE)
 755			pblk_log_write_err(pblk, &rqd);
 756		else
 757			pblk_log_read_err(pblk, &rqd);
 758	}
 759
 760	emeta_buf += rq_len;
 761	left_ppas -= rq_ppas;
 762	if (left_ppas)
 763		goto next_rq;
 764free_rqd_dma:
 765	nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
 766	return ret;
 767}
 768
 769u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
 
 770{
 771	struct nvm_tgt_dev *dev = pblk->dev;
 772	struct nvm_geo *geo = &dev->geo;
 773	struct pblk_line_meta *lm = &pblk->lm;
 774	int bit;
 775
 776	/* This usually only happens on bad lines */
 777	bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
 778	if (bit >= lm->blk_per_line)
 779		return -1;
 780
 781	return bit * geo->ws_opt;
 782}
 783
 784static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
 785				     u64 paddr, int dir)
 786{
 787	struct nvm_tgt_dev *dev = pblk->dev;
 788	struct pblk_line_meta *lm = &pblk->lm;
 789	struct bio *bio;
 790	struct nvm_rq rqd;
 791	__le64 *lba_list = NULL;
 792	int i, ret;
 793	int cmd_op, bio_op;
 794	int flags;
 
 
 
 
 795
 796	if (dir == PBLK_WRITE) {
 797		bio_op = REQ_OP_WRITE;
 798		cmd_op = NVM_OP_PWRITE;
 799		flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
 800		lba_list = emeta_to_lbas(pblk, line->emeta->buf);
 801	} else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) {
 802		bio_op = REQ_OP_READ;
 803		cmd_op = NVM_OP_PREAD;
 804		flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
 805	} else
 806		return -EINVAL;
 807
 808	memset(&rqd, 0, sizeof(struct nvm_rq));
 809
 810	rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
 811							&rqd.dma_meta_list);
 812	if (!rqd.meta_list)
 813		return -ENOMEM;
 814
 815	rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
 816	rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
 817
 818	bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
 819	if (IS_ERR(bio)) {
 820		ret = PTR_ERR(bio);
 821		goto free_ppa_list;
 822	}
 823
 824	bio->bi_iter.bi_sector = 0; /* internal bio */
 825	bio_set_op_attrs(bio, bio_op, 0);
 826
 827	rqd.bio = bio;
 828	rqd.opcode = cmd_op;
 829	rqd.flags = flags;
 830	rqd.nr_ppas = lm->smeta_sec;
 
 
 
 831
 832	for (i = 0; i < lm->smeta_sec; i++, paddr++) {
 833		struct pblk_sec_meta *meta_list = rqd.meta_list;
 
 
 
 
 
 
 
 
 
 
 
 834
 835		rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
 
 
 836
 837		if (dir == PBLK_WRITE) {
 838			__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
 
 
 839
 840			meta_list[i].lba = lba_list[paddr] = addr_empty;
 841		}
 842	}
 843
 844	/*
 845	 * This I/O is sent by the write thread when a line is replace. Since
 846	 * the write thread is the only one sending write and erase commands,
 847	 * there is no need to take the LUN semaphore.
 848	 */
 849	ret = pblk_submit_io_sync(pblk, &rqd);
 850	if (ret) {
 851		pr_err("pblk: smeta I/O submission failed: %d\n", ret);
 852		bio_put(bio);
 853		goto free_ppa_list;
 854	}
 855
 856	atomic_dec(&pblk->inflight_io);
 857
 858	if (rqd.error) {
 859		if (dir == PBLK_WRITE)
 860			pblk_log_write_err(pblk, &rqd);
 861		else if (dir == PBLK_READ)
 862			pblk_log_read_err(pblk, &rqd);
 863	}
 864
 865free_ppa_list:
 
 
 
 
 
 866	nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
 867
 868	return ret;
 869}
 870
 871int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
 872{
 873	u64 bpaddr = pblk_line_smeta_start(pblk, line);
 874
 875	return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ_RECOV);
 876}
 877
 878int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
 879			 void *emeta_buf)
 880{
 881	return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
 882						line->emeta_ssec, PBLK_READ);
 883}
 884
 885static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
 886			    struct ppa_addr ppa)
 887{
 888	rqd->opcode = NVM_OP_ERASE;
 889	rqd->ppa_addr = ppa;
 890	rqd->nr_ppas = 1;
 891	rqd->flags = pblk_set_progr_mode(pblk, PBLK_ERASE);
 892	rqd->bio = NULL;
 893}
 894
 895static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
 896{
 897	struct nvm_rq rqd;
 898	int ret = 0;
 899
 900	memset(&rqd, 0, sizeof(struct nvm_rq));
 
 901
 902	pblk_setup_e_rq(pblk, &rqd, ppa);
 903
 904	/* The write thread schedules erases so that it minimizes disturbances
 905	 * with writes. Thus, there is no need to take the LUN semaphore.
 906	 */
 907	ret = pblk_submit_io_sync(pblk, &rqd);
 908	if (ret) {
 909		struct nvm_tgt_dev *dev = pblk->dev;
 910		struct nvm_geo *geo = &dev->geo;
 911
 912		pr_err("pblk: could not sync erase line:%d,blk:%d\n",
 913					pblk_ppa_to_line(ppa),
 914					pblk_ppa_to_pos(geo, ppa));
 915
 916		rqd.error = ret;
 917		goto out;
 918	}
 919
 920out:
 921	rqd.private = pblk;
 922	__pblk_end_io_erase(pblk, &rqd);
 923
 924	return ret;
 925}
 926
 927int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
 928{
 929	struct pblk_line_meta *lm = &pblk->lm;
 930	struct ppa_addr ppa;
 931	int ret, bit = -1;
 932
 933	/* Erase only good blocks, one at a time */
 934	do {
 935		spin_lock(&line->lock);
 936		bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
 937								bit + 1);
 938		if (bit >= lm->blk_per_line) {
 939			spin_unlock(&line->lock);
 940			break;
 941		}
 942
 943		ppa = pblk->luns[bit].bppa; /* set ch and lun */
 944		ppa.a.blk = line->id;
 945
 946		atomic_dec(&line->left_eblks);
 947		WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
 948		spin_unlock(&line->lock);
 949
 950		ret = pblk_blk_erase_sync(pblk, ppa);
 951		if (ret) {
 952			pr_err("pblk: failed to erase line %d\n", line->id);
 953			return ret;
 954		}
 955	} while (1);
 956
 957	return 0;
 958}
 959
 960static void pblk_line_setup_metadata(struct pblk_line *line,
 961				     struct pblk_line_mgmt *l_mg,
 962				     struct pblk_line_meta *lm)
 963{
 964	int meta_line;
 965
 966	lockdep_assert_held(&l_mg->free_lock);
 967
 968retry_meta:
 969	meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
 970	if (meta_line == PBLK_DATA_LINES) {
 971		spin_unlock(&l_mg->free_lock);
 972		io_schedule();
 973		spin_lock(&l_mg->free_lock);
 974		goto retry_meta;
 975	}
 976
 977	set_bit(meta_line, &l_mg->meta_bitmap);
 978	line->meta_line = meta_line;
 979
 980	line->smeta = l_mg->sline_meta[meta_line];
 981	line->emeta = l_mg->eline_meta[meta_line];
 982
 983	memset(line->smeta, 0, lm->smeta_len);
 984	memset(line->emeta->buf, 0, lm->emeta_len[0]);
 985
 986	line->emeta->mem = 0;
 987	atomic_set(&line->emeta->sync, 0);
 988}
 989
 990/* For now lines are always assumed full lines. Thus, smeta former and current
 991 * lun bitmaps are omitted.
 992 */
 993static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
 994				  struct pblk_line *cur)
 995{
 996	struct nvm_tgt_dev *dev = pblk->dev;
 997	struct nvm_geo *geo = &dev->geo;
 998	struct pblk_line_meta *lm = &pblk->lm;
 999	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1000	struct pblk_emeta *emeta = line->emeta;
1001	struct line_emeta *emeta_buf = emeta->buf;
1002	struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
1003	int nr_blk_line;
1004
1005	/* After erasing the line, new bad blocks might appear and we risk
1006	 * having an invalid line
1007	 */
1008	nr_blk_line = lm->blk_per_line -
1009			bitmap_weight(line->blk_bitmap, lm->blk_per_line);
1010	if (nr_blk_line < lm->min_blk_line) {
1011		spin_lock(&l_mg->free_lock);
1012		spin_lock(&line->lock);
1013		line->state = PBLK_LINESTATE_BAD;
 
 
1014		spin_unlock(&line->lock);
1015
1016		list_add_tail(&line->list, &l_mg->bad_list);
1017		spin_unlock(&l_mg->free_lock);
1018
1019		pr_debug("pblk: line %d is bad\n", line->id);
1020
1021		return 0;
1022	}
1023
1024	/* Run-time metadata */
1025	line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
1026
1027	/* Mark LUNs allocated in this line (all for now) */
1028	bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
1029
1030	smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1031	memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
1032	smeta_buf->header.id = cpu_to_le32(line->id);
1033	smeta_buf->header.type = cpu_to_le16(line->type);
1034	smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
1035	smeta_buf->header.version_minor = SMETA_VERSION_MINOR;
1036
1037	/* Start metadata */
1038	smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1039	smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
1040
1041	/* Fill metadata among lines */
1042	if (cur) {
1043		memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
1044		smeta_buf->prev_id = cpu_to_le32(cur->id);
1045		cur->emeta->buf->next_id = cpu_to_le32(line->id);
1046	} else {
1047		smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
1048	}
1049
1050	/* All smeta must be set at this point */
1051	smeta_buf->header.crc = cpu_to_le32(
1052			pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
1053	smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
1054
1055	/* End metadata */
1056	memcpy(&emeta_buf->header, &smeta_buf->header,
1057						sizeof(struct line_header));
1058
1059	emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1060	emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1061	emeta_buf->header.crc = cpu_to_le32(
1062			pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1063
1064	emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1065	emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1066	emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1067	emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1068	emeta_buf->crc = cpu_to_le32(0);
1069	emeta_buf->prev_id = smeta_buf->prev_id;
1070
1071	return 1;
1072}
1073
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1074/* For now lines are always assumed full lines. Thus, smeta former and current
1075 * lun bitmaps are omitted.
1076 */
1077static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1078			     int init)
1079{
1080	struct nvm_tgt_dev *dev = pblk->dev;
1081	struct nvm_geo *geo = &dev->geo;
1082	struct pblk_line_meta *lm = &pblk->lm;
1083	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1084	u64 off;
1085	int bit = -1;
1086	int emeta_secs;
1087
1088	line->sec_in_line = lm->sec_per_line;
1089
1090	/* Capture bad block information on line mapping bitmaps */
1091	while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1092					bit + 1)) < lm->blk_per_line) {
1093		off = bit * geo->ws_opt;
1094		bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1095							lm->sec_per_line);
1096		bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1097							lm->sec_per_line);
1098		line->sec_in_line -= geo->clba;
1099	}
1100
1101	/* Mark smeta metadata sectors as bad sectors */
1102	bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1103	off = bit * geo->ws_opt;
1104	bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1105	line->sec_in_line -= lm->smeta_sec;
1106	line->smeta_ssec = off;
1107	line->cur_sec = off + lm->smeta_sec;
1108
1109	if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
1110		pr_debug("pblk: line smeta I/O failed. Retry\n");
1111		return 1;
1112	}
1113
1114	bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1115
1116	/* Mark emeta metadata sectors as bad sectors. We need to consider bad
1117	 * blocks to make sure that there are enough sectors to store emeta
1118	 */
1119	emeta_secs = lm->emeta_sec[0];
1120	off = lm->sec_per_line;
1121	while (emeta_secs) {
1122		off -= geo->ws_opt;
1123		if (!test_bit(off, line->invalid_bitmap)) {
1124			bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
1125			emeta_secs -= geo->ws_opt;
1126		}
1127	}
1128
1129	line->emeta_ssec = off;
1130	line->sec_in_line -= lm->emeta_sec[0];
1131	line->nr_valid_lbas = 0;
1132	line->left_msecs = line->sec_in_line;
1133	*line->vsc = cpu_to_le32(line->sec_in_line);
1134
1135	if (lm->sec_per_line - line->sec_in_line !=
1136		bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1137		spin_lock(&line->lock);
1138		line->state = PBLK_LINESTATE_BAD;
 
 
1139		spin_unlock(&line->lock);
1140
1141		list_add_tail(&line->list, &l_mg->bad_list);
1142		pr_err("pblk: unexpected line %d is bad\n", line->id);
1143
1144		return 0;
1145	}
1146
1147	return 1;
1148}
1149
1150static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
1151{
1152	struct pblk_line_meta *lm = &pblk->lm;
1153	struct nvm_tgt_dev *dev = pblk->dev;
1154	struct nvm_geo *geo = &dev->geo;
1155	int blk_to_erase = atomic_read(&line->blk_in_line);
1156	int i;
1157
1158	for (i = 0; i < lm->blk_per_line; i++) {
1159		struct pblk_lun *rlun = &pblk->luns[i];
1160		int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1161		int state = line->chks[pos].state;
1162
1163		/* Free chunks should not be erased */
1164		if (state & NVM_CHK_ST_FREE) {
1165			set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
1166							line->erase_bitmap);
1167			blk_to_erase--;
1168		}
1169	}
1170
1171	return blk_to_erase;
1172}
1173
1174static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1175{
1176	struct pblk_line_meta *lm = &pblk->lm;
 
1177	int blk_to_erase;
1178
1179	line->map_bitmap = kzalloc(lm->sec_bitmap_len, GFP_ATOMIC);
1180	if (!line->map_bitmap)
1181		return -ENOMEM;
1182
1183	/* will be initialized using bb info from map_bitmap */
1184	line->invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_ATOMIC);
1185	if (!line->invalid_bitmap) {
1186		kfree(line->map_bitmap);
1187		return -ENOMEM;
1188	}
1189
1190	/* Bad blocks do not need to be erased */
1191	bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1192
1193	spin_lock(&line->lock);
1194
1195	/* If we have not written to this line, we need to mark up free chunks
1196	 * as already erased
1197	 */
1198	if (line->state == PBLK_LINESTATE_NEW) {
1199		blk_to_erase = pblk_prepare_new_line(pblk, line);
1200		line->state = PBLK_LINESTATE_FREE;
 
 
1201	} else {
1202		blk_to_erase = atomic_read(&line->blk_in_line);
 
 
 
 
 
1203	}
1204
1205	if (line->state != PBLK_LINESTATE_FREE) {
1206		kfree(line->map_bitmap);
1207		kfree(line->invalid_bitmap);
1208		spin_unlock(&line->lock);
1209		WARN(1, "pblk: corrupted line %d, state %d\n",
1210							line->id, line->state);
1211		return -EAGAIN;
 
1212	}
1213
1214	line->state = PBLK_LINESTATE_OPEN;
 
 
1215
1216	atomic_set(&line->left_eblks, blk_to_erase);
1217	atomic_set(&line->left_seblks, blk_to_erase);
1218
1219	line->meta_distance = lm->meta_distance;
1220	spin_unlock(&line->lock);
1221
1222	kref_init(&line->ref);
 
1223
1224	return 0;
1225}
1226
 
1227int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1228{
1229	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1230	int ret;
1231
1232	spin_lock(&l_mg->free_lock);
1233	l_mg->data_line = line;
1234	list_del(&line->list);
1235
1236	ret = pblk_line_prepare(pblk, line);
1237	if (ret) {
1238		list_add(&line->list, &l_mg->free_list);
1239		spin_unlock(&l_mg->free_lock);
1240		return ret;
1241	}
1242	spin_unlock(&l_mg->free_lock);
1243
1244	pblk_rl_free_lines_dec(&pblk->rl, line, true);
 
 
1245
1246	if (!pblk_line_init_bb(pblk, line, 0)) {
1247		list_add(&line->list, &l_mg->free_list);
1248		return -EINTR;
1249	}
1250
 
1251	return 0;
 
 
 
 
 
 
 
1252}
1253
1254void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1255{
1256	kfree(line->map_bitmap);
 
 
1257	line->map_bitmap = NULL;
1258	line->smeta = NULL;
1259	line->emeta = NULL;
1260}
1261
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1262struct pblk_line *pblk_line_get(struct pblk *pblk)
1263{
1264	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1265	struct pblk_line_meta *lm = &pblk->lm;
1266	struct pblk_line *line;
1267	int ret, bit;
1268
1269	lockdep_assert_held(&l_mg->free_lock);
1270
1271retry:
1272	if (list_empty(&l_mg->free_list)) {
1273		pr_err("pblk: no free lines\n");
1274		return NULL;
1275	}
1276
1277	line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1278	list_del(&line->list);
1279	l_mg->nr_free_lines--;
1280
1281	bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1282	if (unlikely(bit >= lm->blk_per_line)) {
1283		spin_lock(&line->lock);
1284		line->state = PBLK_LINESTATE_BAD;
 
 
1285		spin_unlock(&line->lock);
1286
1287		list_add_tail(&line->list, &l_mg->bad_list);
1288
1289		pr_debug("pblk: line %d is bad\n", line->id);
1290		goto retry;
1291	}
1292
1293	ret = pblk_line_prepare(pblk, line);
1294	if (ret) {
1295		if (ret == -EAGAIN) {
 
 
 
 
1296			list_add(&line->list, &l_mg->corrupt_list);
1297			goto retry;
1298		} else {
1299			pr_err("pblk: failed to prepare line %d\n", line->id);
1300			list_add(&line->list, &l_mg->free_list);
1301			l_mg->nr_free_lines++;
1302			return NULL;
1303		}
1304	}
1305
1306	return line;
1307}
1308
1309static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1310					 struct pblk_line *line)
1311{
1312	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1313	struct pblk_line *retry_line;
1314
1315retry:
1316	spin_lock(&l_mg->free_lock);
1317	retry_line = pblk_line_get(pblk);
1318	if (!retry_line) {
1319		l_mg->data_line = NULL;
1320		spin_unlock(&l_mg->free_lock);
1321		return NULL;
1322	}
1323
 
 
1324	retry_line->smeta = line->smeta;
1325	retry_line->emeta = line->emeta;
1326	retry_line->meta_line = line->meta_line;
1327
1328	pblk_line_free(pblk, line);
 
1329	l_mg->data_line = retry_line;
1330	spin_unlock(&l_mg->free_lock);
1331
1332	pblk_rl_free_lines_dec(&pblk->rl, line, false);
1333
1334	if (pblk_line_erase(pblk, retry_line))
1335		goto retry;
1336
1337	return retry_line;
1338}
1339
1340static void pblk_set_space_limit(struct pblk *pblk)
1341{
1342	struct pblk_rl *rl = &pblk->rl;
1343
1344	atomic_set(&rl->rb_space, 0);
1345}
1346
1347struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1348{
1349	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1350	struct pblk_line *line;
1351
1352	spin_lock(&l_mg->free_lock);
1353	line = pblk_line_get(pblk);
1354	if (!line) {
1355		spin_unlock(&l_mg->free_lock);
1356		return NULL;
1357	}
1358
1359	line->seq_nr = l_mg->d_seq_nr++;
1360	line->type = PBLK_LINETYPE_DATA;
1361	l_mg->data_line = line;
1362
1363	pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1364
1365	/* Allocate next line for preparation */
1366	l_mg->data_next = pblk_line_get(pblk);
1367	if (!l_mg->data_next) {
1368		/* If we cannot get a new line, we need to stop the pipeline.
1369		 * Only allow as many writes in as we can store safely and then
1370		 * fail gracefully
1371		 */
1372		pblk_set_space_limit(pblk);
1373
1374		l_mg->data_next = NULL;
1375	} else {
1376		l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1377		l_mg->data_next->type = PBLK_LINETYPE_DATA;
1378	}
1379	spin_unlock(&l_mg->free_lock);
1380
 
 
 
1381	if (pblk_line_erase(pblk, line)) {
1382		line = pblk_line_retry(pblk, line);
1383		if (!line)
1384			return NULL;
1385	}
1386
1387retry_setup:
1388	if (!pblk_line_init_metadata(pblk, line, NULL)) {
1389		line = pblk_line_retry(pblk, line);
1390		if (!line)
1391			return NULL;
1392
1393		goto retry_setup;
1394	}
1395
1396	if (!pblk_line_init_bb(pblk, line, 1)) {
1397		line = pblk_line_retry(pblk, line);
1398		if (!line)
1399			return NULL;
1400
1401		goto retry_setup;
1402	}
1403
1404	pblk_rl_free_lines_dec(&pblk->rl, line, true);
1405
1406	return line;
1407}
1408
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1409static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1410{
1411	lockdep_assert_held(&pblk->l_mg.free_lock);
1412
1413	pblk_set_space_limit(pblk);
1414	pblk->state = PBLK_STATE_STOPPING;
 
1415}
1416
1417static void pblk_line_close_meta_sync(struct pblk *pblk)
1418{
1419	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1420	struct pblk_line_meta *lm = &pblk->lm;
1421	struct pblk_line *line, *tline;
1422	LIST_HEAD(list);
1423
1424	spin_lock(&l_mg->close_lock);
1425	if (list_empty(&l_mg->emeta_list)) {
1426		spin_unlock(&l_mg->close_lock);
1427		return;
1428	}
1429
1430	list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1431	spin_unlock(&l_mg->close_lock);
1432
1433	list_for_each_entry_safe(line, tline, &list, list) {
1434		struct pblk_emeta *emeta = line->emeta;
1435
1436		while (emeta->mem < lm->emeta_len[0]) {
1437			int ret;
1438
1439			ret = pblk_submit_meta_io(pblk, line);
1440			if (ret) {
1441				pr_err("pblk: sync meta line %d failed (%d)\n",
1442							line->id, ret);
1443				return;
1444			}
1445		}
1446	}
1447
1448	pblk_wait_for_meta(pblk);
1449	flush_workqueue(pblk->close_wq);
1450}
1451
1452void pblk_pipeline_stop(struct pblk *pblk)
1453{
1454	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1455	int ret;
1456
1457	spin_lock(&l_mg->free_lock);
1458	if (pblk->state == PBLK_STATE_RECOVERING ||
1459					pblk->state == PBLK_STATE_STOPPED) {
1460		spin_unlock(&l_mg->free_lock);
1461		return;
1462	}
1463	pblk->state = PBLK_STATE_RECOVERING;
 
1464	spin_unlock(&l_mg->free_lock);
1465
1466	pblk_flush_writer(pblk);
1467	pblk_wait_for_meta(pblk);
1468
1469	ret = pblk_recov_pad(pblk);
1470	if (ret) {
1471		pr_err("pblk: could not close data on teardown(%d)\n", ret);
1472		return;
1473	}
1474
1475	flush_workqueue(pblk->bb_wq);
1476	pblk_line_close_meta_sync(pblk);
 
 
 
 
 
1477
1478	spin_lock(&l_mg->free_lock);
1479	pblk->state = PBLK_STATE_STOPPED;
 
1480	l_mg->data_line = NULL;
1481	l_mg->data_next = NULL;
1482	spin_unlock(&l_mg->free_lock);
1483}
1484
 
 
 
 
 
 
1485struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1486{
1487	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1488	struct pblk_line *cur, *new = NULL;
1489	unsigned int left_seblks;
1490
1491	cur = l_mg->data_line;
1492	new = l_mg->data_next;
1493	if (!new)
1494		goto out;
 
 
 
1495	l_mg->data_line = new;
1496
1497	spin_lock(&l_mg->free_lock);
1498	pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1499	spin_unlock(&l_mg->free_lock);
1500
1501retry_erase:
1502	left_seblks = atomic_read(&new->left_seblks);
1503	if (left_seblks) {
1504		/* If line is not fully erased, erase it */
1505		if (atomic_read(&new->left_eblks)) {
1506			if (pblk_line_erase(pblk, new))
1507				goto out;
1508		} else {
1509			io_schedule();
1510		}
1511		goto retry_erase;
1512	}
1513
 
 
 
1514retry_setup:
1515	if (!pblk_line_init_metadata(pblk, new, cur)) {
1516		new = pblk_line_retry(pblk, new);
1517		if (!new)
1518			goto out;
1519
1520		goto retry_setup;
1521	}
1522
1523	if (!pblk_line_init_bb(pblk, new, 1)) {
1524		new = pblk_line_retry(pblk, new);
1525		if (!new)
1526			goto out;
1527
1528		goto retry_setup;
1529	}
1530
1531	pblk_rl_free_lines_dec(&pblk->rl, new, true);
1532
1533	/* Allocate next line for preparation */
1534	spin_lock(&l_mg->free_lock);
1535	l_mg->data_next = pblk_line_get(pblk);
1536	if (!l_mg->data_next) {
1537		/* If we cannot get a new line, we need to stop the pipeline.
1538		 * Only allow as many writes in as we can store safely and then
1539		 * fail gracefully
1540		 */
1541		pblk_stop_writes(pblk, new);
1542		l_mg->data_next = NULL;
1543	} else {
1544		l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1545		l_mg->data_next->type = PBLK_LINETYPE_DATA;
1546	}
1547	spin_unlock(&l_mg->free_lock);
1548
1549out:
1550	return new;
1551}
1552
1553void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
1554{
1555	kfree(line->map_bitmap);
1556	kfree(line->invalid_bitmap);
1557
1558	*line->vsc = cpu_to_le32(EMPTY_ENTRY);
1559
1560	line->map_bitmap = NULL;
1561	line->invalid_bitmap = NULL;
1562	line->smeta = NULL;
1563	line->emeta = NULL;
1564}
1565
1566static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
1567{
1568	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1569	struct pblk_gc *gc = &pblk->gc;
1570
1571	spin_lock(&line->lock);
1572	WARN_ON(line->state != PBLK_LINESTATE_GC);
 
 
 
 
 
 
 
 
1573	line->state = PBLK_LINESTATE_FREE;
 
 
1574	line->gc_group = PBLK_LINEGC_NONE;
1575	pblk_line_free(pblk, line);
 
 
 
 
 
 
1576	spin_unlock(&line->lock);
1577
1578	atomic_dec(&gc->pipeline_gc);
1579
1580	spin_lock(&l_mg->free_lock);
1581	list_add_tail(&line->list, &l_mg->free_list);
1582	l_mg->nr_free_lines++;
1583	spin_unlock(&l_mg->free_lock);
1584
1585	pblk_rl_free_lines_inc(&pblk->rl, line);
1586}
1587
1588static void pblk_line_put_ws(struct work_struct *work)
1589{
1590	struct pblk_line_ws *line_put_ws = container_of(work,
1591						struct pblk_line_ws, ws);
1592	struct pblk *pblk = line_put_ws->pblk;
1593	struct pblk_line *line = line_put_ws->line;
1594
1595	__pblk_line_put(pblk, line);
1596	mempool_free(line_put_ws, pblk->gen_ws_pool);
1597}
1598
1599void pblk_line_put(struct kref *ref)
1600{
1601	struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1602	struct pblk *pblk = line->pblk;
1603
1604	__pblk_line_put(pblk, line);
1605}
1606
1607void pblk_line_put_wq(struct kref *ref)
1608{
1609	struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1610	struct pblk *pblk = line->pblk;
1611	struct pblk_line_ws *line_put_ws;
1612
1613	line_put_ws = mempool_alloc(pblk->gen_ws_pool, GFP_ATOMIC);
1614	if (!line_put_ws)
1615		return;
1616
1617	line_put_ws->pblk = pblk;
1618	line_put_ws->line = line;
1619	line_put_ws->priv = NULL;
1620
1621	INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
1622	queue_work(pblk->r_end_wq, &line_put_ws->ws);
1623}
1624
1625int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1626{
1627	struct nvm_rq *rqd;
1628	int err;
1629
1630	rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
1631
1632	pblk_setup_e_rq(pblk, rqd, ppa);
1633
1634	rqd->end_io = pblk_end_io_erase;
1635	rqd->private = pblk;
1636
 
 
 
1637	/* The write thread schedules erases so that it minimizes disturbances
1638	 * with writes. Thus, there is no need to take the LUN semaphore.
1639	 */
1640	err = pblk_submit_io(pblk, rqd);
1641	if (err) {
1642		struct nvm_tgt_dev *dev = pblk->dev;
1643		struct nvm_geo *geo = &dev->geo;
1644
1645		pr_err("pblk: could not async erase line:%d,blk:%d\n",
1646					pblk_ppa_to_line(ppa),
1647					pblk_ppa_to_pos(geo, ppa));
1648	}
1649
1650	return err;
1651}
1652
1653struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1654{
1655	return pblk->l_mg.data_line;
1656}
1657
1658/* For now, always erase next line */
1659struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1660{
1661	return pblk->l_mg.data_next;
1662}
1663
1664int pblk_line_is_full(struct pblk_line *line)
1665{
1666	return (line->left_msecs == 0);
1667}
1668
1669static void pblk_line_should_sync_meta(struct pblk *pblk)
1670{
1671	if (pblk_rl_is_limit(&pblk->rl))
1672		pblk_line_close_meta_sync(pblk);
1673}
1674
1675void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1676{
1677	struct nvm_tgt_dev *dev = pblk->dev;
1678	struct nvm_geo *geo = &dev->geo;
1679	struct pblk_line_meta *lm = &pblk->lm;
1680	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1681	struct list_head *move_list;
1682	int i;
1683
1684#ifdef CONFIG_NVM_DEBUG
1685	WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1686				"pblk: corrupt closed line %d\n", line->id);
1687#endif
1688
1689	spin_lock(&l_mg->free_lock);
1690	WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1691	spin_unlock(&l_mg->free_lock);
1692
1693	spin_lock(&l_mg->gc_lock);
1694	spin_lock(&line->lock);
1695	WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1696	line->state = PBLK_LINESTATE_CLOSED;
1697	move_list = pblk_line_gc_list(pblk, line);
1698
1699	list_add_tail(&line->list, move_list);
1700
1701	kfree(line->map_bitmap);
1702	line->map_bitmap = NULL;
1703	line->smeta = NULL;
1704	line->emeta = NULL;
1705
1706	for (i = 0; i < lm->blk_per_line; i++) {
1707		struct pblk_lun *rlun = &pblk->luns[i];
1708		int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1709		int state = line->chks[pos].state;
1710
1711		if (!(state & NVM_CHK_ST_OFFLINE))
1712			state = NVM_CHK_ST_CLOSED;
1713	}
1714
1715	spin_unlock(&line->lock);
1716	spin_unlock(&l_mg->gc_lock);
 
 
 
1717}
1718
1719void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1720{
1721	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1722	struct pblk_line_meta *lm = &pblk->lm;
1723	struct pblk_emeta *emeta = line->emeta;
1724	struct line_emeta *emeta_buf = emeta->buf;
1725	struct wa_counters *wa = emeta_to_wa(lm, emeta_buf);
1726
1727	/* No need for exact vsc value; avoid a big line lock and take aprox. */
1728	memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1729	memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1730
1731	wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
1732	wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
1733	wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
1734
 
 
 
 
 
 
 
 
 
 
 
 
1735	emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1736	emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1737
1738	spin_lock(&l_mg->close_lock);
1739	spin_lock(&line->lock);
 
 
 
 
 
 
 
1740	list_add_tail(&line->list, &l_mg->emeta_list);
1741	spin_unlock(&line->lock);
1742	spin_unlock(&l_mg->close_lock);
1743
1744	pblk_line_should_sync_meta(pblk);
1745}
1746
 
 
 
 
 
 
 
 
 
 
 
 
1747void pblk_line_close_ws(struct work_struct *work)
1748{
1749	struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1750									ws);
1751	struct pblk *pblk = line_ws->pblk;
1752	struct pblk_line *line = line_ws->line;
 
 
 
 
 
 
 
1753
1754	pblk_line_close(pblk, line);
1755	mempool_free(line_ws, pblk->gen_ws_pool);
1756}
1757
1758void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1759		      void (*work)(struct work_struct *), gfp_t gfp_mask,
1760		      struct workqueue_struct *wq)
1761{
1762	struct pblk_line_ws *line_ws;
1763
1764	line_ws = mempool_alloc(pblk->gen_ws_pool, gfp_mask);
1765
1766	line_ws->pblk = pblk;
1767	line_ws->line = line;
1768	line_ws->priv = priv;
1769
1770	INIT_WORK(&line_ws->ws, work);
1771	queue_work(wq, &line_ws->ws);
1772}
1773
1774static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
1775			     int nr_ppas, int pos)
1776{
1777	struct pblk_lun *rlun = &pblk->luns[pos];
1778	int ret;
1779
1780	/*
1781	 * Only send one inflight I/O per LUN. Since we map at a page
1782	 * granurality, all ppas in the I/O will map to the same LUN
1783	 */
1784#ifdef CONFIG_NVM_DEBUG
1785	int i;
1786
1787	for (i = 1; i < nr_ppas; i++)
1788		WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
1789				ppa_list[0].a.ch != ppa_list[i].a.ch);
1790#endif
1791
1792	ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
1793	if (ret == -ETIME || ret == -EINTR)
1794		pr_err("pblk: taking lun semaphore timed out: err %d\n", -ret);
 
1795}
1796
1797void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
1798{
1799	struct nvm_tgt_dev *dev = pblk->dev;
1800	struct nvm_geo *geo = &dev->geo;
1801	int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1802
1803	__pblk_down_page(pblk, ppa_list, nr_ppas, pos);
1804}
1805
1806void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1807		  unsigned long *lun_bitmap)
1808{
1809	struct nvm_tgt_dev *dev = pblk->dev;
1810	struct nvm_geo *geo = &dev->geo;
1811	int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1812
1813	/* If the LUN has been locked for this same request, do no attempt to
1814	 * lock it again
1815	 */
1816	if (test_and_set_bit(pos, lun_bitmap))
1817		return;
1818
1819	__pblk_down_page(pblk, ppa_list, nr_ppas, pos);
1820}
1821
1822void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
1823{
1824	struct nvm_tgt_dev *dev = pblk->dev;
1825	struct nvm_geo *geo = &dev->geo;
1826	struct pblk_lun *rlun;
1827	int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
1828
1829#ifdef CONFIG_NVM_DEBUG
1830	int i;
1831
1832	for (i = 1; i < nr_ppas; i++)
1833		WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
1834				ppa_list[0].a.ch != ppa_list[i].a.ch);
1835#endif
1836
1837	rlun = &pblk->luns[pos];
1838	up(&rlun->wr_sem);
1839}
1840
1841void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1842		unsigned long *lun_bitmap)
1843{
1844	struct nvm_tgt_dev *dev = pblk->dev;
1845	struct nvm_geo *geo = &dev->geo;
1846	struct pblk_lun *rlun;
1847	int num_lun = geo->all_luns;
1848	int bit = -1;
1849
1850	while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) {
1851		rlun = &pblk->luns[bit];
1852		up(&rlun->wr_sem);
1853	}
1854}
1855
1856void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1857{
1858	struct ppa_addr ppa_l2p;
1859
1860	/* logic error: lba out-of-bounds. Ignore update */
1861	if (!(lba < pblk->rl.nr_secs)) {
1862		WARN(1, "pblk: corrupted L2P map request\n");
1863		return;
1864	}
1865
1866	spin_lock(&pblk->trans_lock);
1867	ppa_l2p = pblk_trans_map_get(pblk, lba);
1868
1869	if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
1870		pblk_map_invalidate(pblk, ppa_l2p);
1871
1872	pblk_trans_map_set(pblk, lba, ppa);
1873	spin_unlock(&pblk->trans_lock);
1874}
1875
1876void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1877{
1878
1879#ifdef CONFIG_NVM_DEBUG
1880	/* Callers must ensure that the ppa points to a cache address */
1881	BUG_ON(!pblk_addr_in_cache(ppa));
1882	BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1883#endif
1884
1885	pblk_update_map(pblk, lba, ppa);
1886}
1887
1888int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
1889		       struct pblk_line *gc_line, u64 paddr_gc)
1890{
1891	struct ppa_addr ppa_l2p, ppa_gc;
1892	int ret = 1;
1893
1894#ifdef CONFIG_NVM_DEBUG
1895	/* Callers must ensure that the ppa points to a cache address */
1896	BUG_ON(!pblk_addr_in_cache(ppa_new));
1897	BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
1898#endif
1899
1900	/* logic error: lba out-of-bounds. Ignore update */
1901	if (!(lba < pblk->rl.nr_secs)) {
1902		WARN(1, "pblk: corrupted L2P map request\n");
1903		return 0;
1904	}
1905
1906	spin_lock(&pblk->trans_lock);
1907	ppa_l2p = pblk_trans_map_get(pblk, lba);
1908	ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
1909
1910	if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
1911		spin_lock(&gc_line->lock);
1912		WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
1913						"pblk: corrupted GC update");
1914		spin_unlock(&gc_line->lock);
1915
1916		ret = 0;
1917		goto out;
1918	}
1919
1920	pblk_trans_map_set(pblk, lba, ppa_new);
1921out:
1922	spin_unlock(&pblk->trans_lock);
1923	return ret;
1924}
1925
1926void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
1927			 struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
1928{
1929	struct ppa_addr ppa_l2p;
1930
1931#ifdef CONFIG_NVM_DEBUG
1932	/* Callers must ensure that the ppa points to a device address */
1933	BUG_ON(pblk_addr_in_cache(ppa_mapped));
1934#endif
1935	/* Invalidate and discard padded entries */
1936	if (lba == ADDR_EMPTY) {
1937		atomic64_inc(&pblk->pad_wa);
1938#ifdef CONFIG_NVM_DEBUG
1939		atomic_long_inc(&pblk->padded_wb);
1940#endif
1941		if (!pblk_ppa_empty(ppa_mapped))
1942			pblk_map_invalidate(pblk, ppa_mapped);
1943		return;
1944	}
1945
1946	/* logic error: lba out-of-bounds. Ignore update */
1947	if (!(lba < pblk->rl.nr_secs)) {
1948		WARN(1, "pblk: corrupted L2P map request\n");
1949		return;
1950	}
1951
1952	spin_lock(&pblk->trans_lock);
1953	ppa_l2p = pblk_trans_map_get(pblk, lba);
1954
1955	/* Do not update L2P if the cacheline has been updated. In this case,
1956	 * the mapped ppa must be invalidated
1957	 */
1958	if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
1959		if (!pblk_ppa_empty(ppa_mapped))
1960			pblk_map_invalidate(pblk, ppa_mapped);
1961		goto out;
1962	}
1963
1964#ifdef CONFIG_NVM_DEBUG
1965	WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
1966#endif
1967
1968	pblk_trans_map_set(pblk, lba, ppa_mapped);
1969out:
1970	spin_unlock(&pblk->trans_lock);
1971}
1972
1973void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
1974			 sector_t blba, int nr_secs)
1975{
1976	int i;
1977
1978	spin_lock(&pblk->trans_lock);
1979	for (i = 0; i < nr_secs; i++) {
1980		struct ppa_addr ppa;
1981
1982		ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
1983
1984		/* If the L2P entry maps to a line, the reference is valid */
1985		if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
1986			int line_id = pblk_ppa_to_line(ppa);
1987			struct pblk_line *line = &pblk->lines[line_id];
 
 
 
1988
1989			kref_get(&line->ref);
 
 
 
 
1990		}
1991	}
1992	spin_unlock(&pblk->trans_lock);
 
1993}
1994
1995void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
1996			  u64 *lba_list, int nr_secs)
1997{
1998	u64 lba;
1999	int i;
2000
2001	spin_lock(&pblk->trans_lock);
2002	for (i = 0; i < nr_secs; i++) {
2003		lba = lba_list[i];
2004		if (lba != ADDR_EMPTY) {
2005			/* logic error: lba out-of-bounds. Ignore update */
2006			if (!(lba < pblk->rl.nr_secs)) {
2007				WARN(1, "pblk: corrupted L2P map request\n");
2008				continue;
2009			}
2010			ppas[i] = pblk_trans_map_get(pblk, lba);
2011		}
2012	}
2013	spin_unlock(&pblk->trans_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2014}