Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
   4 * Copyright (C) 2016 CNEX Labs
   5 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
   6 *                  Matias Bjorling <matias@cnexlabs.com>
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License version
  10 * 2 as published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful, but
  13 * WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * General Public License for more details.
  16 *
  17 * Implementation of a physical block-device target for Open-channel SSDs.
  18 *
  19 * pblk-init.c - pblk's initialization.
  20 */
  21
  22#include "pblk.h"
  23#include "pblk-trace.h"
  24
  25static unsigned int write_buffer_size;
  26
  27module_param(write_buffer_size, uint, 0644);
  28MODULE_PARM_DESC(write_buffer_size, "number of entries in a write buffer");
  29
  30struct pblk_global_caches {
  31	struct kmem_cache	*ws;
  32	struct kmem_cache	*rec;
  33	struct kmem_cache	*g_rq;
  34	struct kmem_cache	*w_rq;
  35
  36	struct kref		kref;
  37
  38	struct mutex		mutex; /* Ensures consistency between
  39					* caches and kref
  40					*/
  41};
  42
  43static struct pblk_global_caches pblk_caches = {
  44	.mutex = __MUTEX_INITIALIZER(pblk_caches.mutex),
  45	.kref = KREF_INIT(0),
  46};
  47
  48struct bio_set pblk_bio_set;
  49
  50static blk_qc_t pblk_submit_bio(struct bio *bio)
  51{
  52	struct pblk *pblk = bio->bi_disk->queue->queuedata;
  53
  54	if (bio_op(bio) == REQ_OP_DISCARD) {
  55		pblk_discard(pblk, bio);
  56		if (!(bio->bi_opf & REQ_PREFLUSH)) {
  57			bio_endio(bio);
  58			return BLK_QC_T_NONE;
  59		}
  60	}
  61
  62	/* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
  63	 * constraint. Writes can be of arbitrary size.
  64	 */
  65	if (bio_data_dir(bio) == READ) {
  66		blk_queue_split(&bio);
  67		pblk_submit_read(pblk, bio);
  68	} else {
  69		/* Prevent deadlock in the case of a modest LUN configuration
  70		 * and large user I/Os. Unless stalled, the rate limiter
  71		 * leaves at least 256KB available for user I/O.
  72		 */
  73		if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
  74			blk_queue_split(&bio);
  75
  76		pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
  77	}
  78
  79	return BLK_QC_T_NONE;
  80}
  81
  82static const struct block_device_operations pblk_bops = {
  83	.owner		= THIS_MODULE,
  84	.submit_bio	= pblk_submit_bio,
  85};
  86
  87
  88static size_t pblk_trans_map_size(struct pblk *pblk)
  89{
  90	int entry_size = 8;
  91
  92	if (pblk->addrf_len < 32)
  93		entry_size = 4;
  94
  95	return entry_size * pblk->capacity;
  96}
  97
  98#ifdef CONFIG_NVM_PBLK_DEBUG
  99static u32 pblk_l2p_crc(struct pblk *pblk)
 100{
 101	size_t map_size;
 102	u32 crc = ~(u32)0;
 103
 104	map_size = pblk_trans_map_size(pblk);
 105	crc = crc32_le(crc, pblk->trans_map, map_size);
 106	return crc;
 107}
 108#endif
 109
 110static void pblk_l2p_free(struct pblk *pblk)
 111{
 112	vfree(pblk->trans_map);
 113}
 114
 115static int pblk_l2p_recover(struct pblk *pblk, bool factory_init)
 116{
 117	struct pblk_line *line = NULL;
 118
 119	if (factory_init) {
 120		guid_gen(&pblk->instance_uuid);
 121	} else {
 122		line = pblk_recov_l2p(pblk);
 123		if (IS_ERR(line)) {
 124			pblk_err(pblk, "could not recover l2p table\n");
 125			return -EFAULT;
 126		}
 127	}
 128
 129#ifdef CONFIG_NVM_PBLK_DEBUG
 130	pblk_info(pblk, "init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
 131#endif
 132
 133	/* Free full lines directly as GC has not been started yet */
 134	pblk_gc_free_full_lines(pblk);
 135
 136	if (!line) {
 137		/* Configure next line for user data */
 138		line = pblk_line_get_first_data(pblk);
 139		if (!line)
 140			return -EFAULT;
 141	}
 142
 143	return 0;
 144}
 145
 146static int pblk_l2p_init(struct pblk *pblk, bool factory_init)
 147{
 148	sector_t i;
 149	struct ppa_addr ppa;
 150	size_t map_size;
 151	int ret = 0;
 152
 153	map_size = pblk_trans_map_size(pblk);
 154	pblk->trans_map = __vmalloc(map_size, GFP_KERNEL | __GFP_NOWARN |
 155				    __GFP_RETRY_MAYFAIL | __GFP_HIGHMEM);
 
 156	if (!pblk->trans_map) {
 157		pblk_err(pblk, "failed to allocate L2P (need %zu of memory)\n",
 158				map_size);
 159		return -ENOMEM;
 160	}
 161
 162	pblk_ppa_set_empty(&ppa);
 163
 164	for (i = 0; i < pblk->capacity; i++)
 165		pblk_trans_map_set(pblk, i, ppa);
 166
 167	ret = pblk_l2p_recover(pblk, factory_init);
 168	if (ret)
 169		vfree(pblk->trans_map);
 170
 171	return ret;
 172}
 173
 174static void pblk_rwb_free(struct pblk *pblk)
 175{
 176	if (pblk_rb_tear_down_check(&pblk->rwb))
 177		pblk_err(pblk, "write buffer error on tear down\n");
 178
 179	pblk_rb_free(&pblk->rwb);
 180}
 181
 182static int pblk_rwb_init(struct pblk *pblk)
 183{
 184	struct nvm_tgt_dev *dev = pblk->dev;
 185	struct nvm_geo *geo = &dev->geo;
 186	unsigned long buffer_size;
 187	int pgs_in_buffer, threshold;
 188
 189	threshold = geo->mw_cunits * geo->all_luns;
 190	pgs_in_buffer = (max(geo->mw_cunits, geo->ws_opt) + geo->ws_opt)
 191								* geo->all_luns;
 192
 193	if (write_buffer_size && (write_buffer_size > pgs_in_buffer))
 194		buffer_size = write_buffer_size;
 195	else
 196		buffer_size = pgs_in_buffer;
 197
 198	return pblk_rb_init(&pblk->rwb, buffer_size, threshold, geo->csecs);
 199}
 200
 201static int pblk_set_addrf_12(struct pblk *pblk, struct nvm_geo *geo,
 202			     struct nvm_addrf_12 *dst)
 203{
 204	struct nvm_addrf_12 *src = (struct nvm_addrf_12 *)&geo->addrf;
 205	int power_len;
 206
 207	/* Re-calculate channel and lun format to adapt to configuration */
 208	power_len = get_count_order(geo->num_ch);
 209	if (1 << power_len != geo->num_ch) {
 210		pblk_err(pblk, "supports only power-of-two channel config.\n");
 211		return -EINVAL;
 212	}
 213	dst->ch_len = power_len;
 214
 215	power_len = get_count_order(geo->num_lun);
 216	if (1 << power_len != geo->num_lun) {
 217		pblk_err(pblk, "supports only power-of-two LUN config.\n");
 218		return -EINVAL;
 219	}
 220	dst->lun_len = power_len;
 221
 222	dst->blk_len = src->blk_len;
 223	dst->pg_len = src->pg_len;
 224	dst->pln_len = src->pln_len;
 225	dst->sec_len = src->sec_len;
 226
 227	dst->sec_offset = 0;
 228	dst->pln_offset = dst->sec_len;
 229	dst->ch_offset = dst->pln_offset + dst->pln_len;
 230	dst->lun_offset = dst->ch_offset + dst->ch_len;
 231	dst->pg_offset = dst->lun_offset + dst->lun_len;
 232	dst->blk_offset = dst->pg_offset + dst->pg_len;
 233
 234	dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
 235	dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset;
 236	dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
 237	dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
 238	dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset;
 239	dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset;
 240
 241	return dst->blk_offset + src->blk_len;
 242}
 243
 244static int pblk_set_addrf_20(struct nvm_geo *geo, struct nvm_addrf *adst,
 245			     struct pblk_addrf *udst)
 246{
 247	struct nvm_addrf *src = &geo->addrf;
 248
 249	adst->ch_len = get_count_order(geo->num_ch);
 250	adst->lun_len = get_count_order(geo->num_lun);
 251	adst->chk_len = src->chk_len;
 252	adst->sec_len = src->sec_len;
 253
 254	adst->sec_offset = 0;
 255	adst->ch_offset = adst->sec_len;
 256	adst->lun_offset = adst->ch_offset + adst->ch_len;
 257	adst->chk_offset = adst->lun_offset + adst->lun_len;
 258
 259	adst->sec_mask = ((1ULL << adst->sec_len) - 1) << adst->sec_offset;
 260	adst->chk_mask = ((1ULL << adst->chk_len) - 1) << adst->chk_offset;
 261	adst->lun_mask = ((1ULL << adst->lun_len) - 1) << adst->lun_offset;
 262	adst->ch_mask = ((1ULL << adst->ch_len) - 1) << adst->ch_offset;
 263
 264	udst->sec_stripe = geo->ws_opt;
 265	udst->ch_stripe = geo->num_ch;
 266	udst->lun_stripe = geo->num_lun;
 267
 268	udst->sec_lun_stripe = udst->sec_stripe * udst->ch_stripe;
 269	udst->sec_ws_stripe = udst->sec_lun_stripe * udst->lun_stripe;
 270
 271	return adst->chk_offset + adst->chk_len;
 272}
 273
 274static int pblk_set_addrf(struct pblk *pblk)
 275{
 276	struct nvm_tgt_dev *dev = pblk->dev;
 277	struct nvm_geo *geo = &dev->geo;
 278	int mod;
 279
 280	switch (geo->version) {
 281	case NVM_OCSSD_SPEC_12:
 282		div_u64_rem(geo->clba, pblk->min_write_pgs, &mod);
 283		if (mod) {
 284			pblk_err(pblk, "bad configuration of sectors/pages\n");
 285			return -EINVAL;
 286		}
 287
 288		pblk->addrf_len = pblk_set_addrf_12(pblk, geo,
 289							(void *)&pblk->addrf);
 290		break;
 291	case NVM_OCSSD_SPEC_20:
 292		pblk->addrf_len = pblk_set_addrf_20(geo, (void *)&pblk->addrf,
 293							&pblk->uaddrf);
 294		break;
 295	default:
 296		pblk_err(pblk, "OCSSD revision not supported (%d)\n",
 297								geo->version);
 298		return -EINVAL;
 299	}
 300
 301	return 0;
 302}
 303
 304static int pblk_create_global_caches(void)
 305{
 306
 307	pblk_caches.ws = kmem_cache_create("pblk_blk_ws",
 308				sizeof(struct pblk_line_ws), 0, 0, NULL);
 309	if (!pblk_caches.ws)
 310		return -ENOMEM;
 311
 312	pblk_caches.rec = kmem_cache_create("pblk_rec",
 313				sizeof(struct pblk_rec_ctx), 0, 0, NULL);
 314	if (!pblk_caches.rec)
 315		goto fail_destroy_ws;
 316
 317	pblk_caches.g_rq = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
 318				0, 0, NULL);
 319	if (!pblk_caches.g_rq)
 320		goto fail_destroy_rec;
 321
 322	pblk_caches.w_rq = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
 323				0, 0, NULL);
 324	if (!pblk_caches.w_rq)
 325		goto fail_destroy_g_rq;
 326
 327	return 0;
 328
 329fail_destroy_g_rq:
 330	kmem_cache_destroy(pblk_caches.g_rq);
 331fail_destroy_rec:
 332	kmem_cache_destroy(pblk_caches.rec);
 333fail_destroy_ws:
 334	kmem_cache_destroy(pblk_caches.ws);
 335
 336	return -ENOMEM;
 337}
 338
 339static int pblk_get_global_caches(void)
 340{
 341	int ret = 0;
 342
 343	mutex_lock(&pblk_caches.mutex);
 344
 345	if (kref_get_unless_zero(&pblk_caches.kref))
 346		goto out;
 347
 348	ret = pblk_create_global_caches();
 349	if (!ret)
 350		kref_init(&pblk_caches.kref);
 351
 352out:
 353	mutex_unlock(&pblk_caches.mutex);
 354	return ret;
 355}
 356
 357static void pblk_destroy_global_caches(struct kref *ref)
 358{
 359	struct pblk_global_caches *c;
 360
 361	c = container_of(ref, struct pblk_global_caches, kref);
 362
 363	kmem_cache_destroy(c->ws);
 364	kmem_cache_destroy(c->rec);
 365	kmem_cache_destroy(c->g_rq);
 366	kmem_cache_destroy(c->w_rq);
 367}
 368
 369static void pblk_put_global_caches(void)
 370{
 371	mutex_lock(&pblk_caches.mutex);
 372	kref_put(&pblk_caches.kref, pblk_destroy_global_caches);
 373	mutex_unlock(&pblk_caches.mutex);
 374}
 375
 376static int pblk_core_init(struct pblk *pblk)
 377{
 378	struct nvm_tgt_dev *dev = pblk->dev;
 379	struct nvm_geo *geo = &dev->geo;
 380	int ret, max_write_ppas;
 381
 382	atomic64_set(&pblk->user_wa, 0);
 383	atomic64_set(&pblk->pad_wa, 0);
 384	atomic64_set(&pblk->gc_wa, 0);
 385	pblk->user_rst_wa = 0;
 386	pblk->pad_rst_wa = 0;
 387	pblk->gc_rst_wa = 0;
 388
 389	atomic64_set(&pblk->nr_flush, 0);
 390	pblk->nr_flush_rst = 0;
 391
 392	pblk->min_write_pgs = geo->ws_opt;
 393	pblk->min_write_pgs_data = pblk->min_write_pgs;
 394	max_write_ppas = pblk->min_write_pgs * geo->all_luns;
 395	pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
 396	pblk->max_write_pgs = min_t(int, pblk->max_write_pgs,
 397		queue_max_hw_sectors(dev->q) / (geo->csecs >> SECTOR_SHIFT));
 398	pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
 399
 400	pblk->oob_meta_size = geo->sos;
 401	if (!pblk_is_oob_meta_supported(pblk)) {
 402		/* For drives which does not have OOB metadata feature
 403		 * in order to support recovery feature we need to use
 404		 * so called packed metadata. Packed metada will store
 405		 * the same information as OOB metadata (l2p table mapping,
 406		 * but in the form of the single page at the end of
 407		 * every write request.
 408		 */
 409		if (pblk->min_write_pgs
 410			* sizeof(struct pblk_sec_meta) > PAGE_SIZE) {
 411			/* We want to keep all the packed metadata on single
 412			 * page per write requests. So we need to ensure that
 413			 * it will fit.
 414			 *
 415			 * This is more like sanity check, since there is
 416			 * no device with such a big minimal write size
 417			 * (above 1 metabytes).
 418			 */
 419			pblk_err(pblk, "Not supported min write size\n");
 420			return -EINVAL;
 421		}
 422		/* For packed meta approach we do some simplification.
 423		 * On read path we always issue requests which size
 424		 * equal to max_write_pgs, with all pages filled with
 425		 * user payload except of last one page which will be
 426		 * filled with packed metadata.
 427		 */
 428		pblk->max_write_pgs = pblk->min_write_pgs;
 429		pblk->min_write_pgs_data = pblk->min_write_pgs - 1;
 430	}
 431
 432	pblk->pad_dist = kcalloc(pblk->min_write_pgs - 1, sizeof(atomic64_t),
 433								GFP_KERNEL);
 434	if (!pblk->pad_dist)
 435		return -ENOMEM;
 436
 437	if (pblk_get_global_caches())
 438		goto fail_free_pad_dist;
 439
 440	/* Internal bios can be at most the sectors signaled by the device. */
 441	ret = mempool_init_page_pool(&pblk->page_bio_pool, NVM_MAX_VLBA, 0);
 442	if (ret)
 443		goto free_global_caches;
 444
 445	ret = mempool_init_slab_pool(&pblk->gen_ws_pool, PBLK_GEN_WS_POOL_SIZE,
 446				     pblk_caches.ws);
 447	if (ret)
 448		goto free_page_bio_pool;
 449
 450	ret = mempool_init_slab_pool(&pblk->rec_pool, geo->all_luns,
 451				     pblk_caches.rec);
 452	if (ret)
 453		goto free_gen_ws_pool;
 454
 455	ret = mempool_init_slab_pool(&pblk->r_rq_pool, geo->all_luns,
 456				     pblk_caches.g_rq);
 457	if (ret)
 458		goto free_rec_pool;
 459
 460	ret = mempool_init_slab_pool(&pblk->e_rq_pool, geo->all_luns,
 461				     pblk_caches.g_rq);
 462	if (ret)
 463		goto free_r_rq_pool;
 464
 465	ret = mempool_init_slab_pool(&pblk->w_rq_pool, geo->all_luns,
 466				     pblk_caches.w_rq);
 467	if (ret)
 468		goto free_e_rq_pool;
 469
 470	pblk->close_wq = alloc_workqueue("pblk-close-wq",
 471			WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
 472	if (!pblk->close_wq)
 473		goto free_w_rq_pool;
 474
 475	pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
 476			WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
 477	if (!pblk->bb_wq)
 478		goto free_close_wq;
 479
 480	pblk->r_end_wq = alloc_workqueue("pblk-read-end-wq",
 481			WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
 482	if (!pblk->r_end_wq)
 483		goto free_bb_wq;
 484
 485	if (pblk_set_addrf(pblk))
 486		goto free_r_end_wq;
 487
 488	INIT_LIST_HEAD(&pblk->compl_list);
 489	INIT_LIST_HEAD(&pblk->resubmit_list);
 490
 491	return 0;
 492
 493free_r_end_wq:
 494	destroy_workqueue(pblk->r_end_wq);
 495free_bb_wq:
 496	destroy_workqueue(pblk->bb_wq);
 497free_close_wq:
 498	destroy_workqueue(pblk->close_wq);
 499free_w_rq_pool:
 500	mempool_exit(&pblk->w_rq_pool);
 501free_e_rq_pool:
 502	mempool_exit(&pblk->e_rq_pool);
 503free_r_rq_pool:
 504	mempool_exit(&pblk->r_rq_pool);
 505free_rec_pool:
 506	mempool_exit(&pblk->rec_pool);
 507free_gen_ws_pool:
 508	mempool_exit(&pblk->gen_ws_pool);
 509free_page_bio_pool:
 510	mempool_exit(&pblk->page_bio_pool);
 511free_global_caches:
 512	pblk_put_global_caches();
 513fail_free_pad_dist:
 514	kfree(pblk->pad_dist);
 515	return -ENOMEM;
 516}
 517
 518static void pblk_core_free(struct pblk *pblk)
 519{
 520	if (pblk->close_wq)
 521		destroy_workqueue(pblk->close_wq);
 522
 523	if (pblk->r_end_wq)
 524		destroy_workqueue(pblk->r_end_wq);
 525
 526	if (pblk->bb_wq)
 527		destroy_workqueue(pblk->bb_wq);
 528
 529	mempool_exit(&pblk->page_bio_pool);
 530	mempool_exit(&pblk->gen_ws_pool);
 531	mempool_exit(&pblk->rec_pool);
 532	mempool_exit(&pblk->r_rq_pool);
 533	mempool_exit(&pblk->e_rq_pool);
 534	mempool_exit(&pblk->w_rq_pool);
 535
 536	pblk_put_global_caches();
 537	kfree(pblk->pad_dist);
 538}
 539
 540static void pblk_line_mg_free(struct pblk *pblk)
 541{
 542	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 543	int i;
 544
 545	kfree(l_mg->bb_template);
 546	kfree(l_mg->bb_aux);
 547	kfree(l_mg->vsc_list);
 548
 549	for (i = 0; i < PBLK_DATA_LINES; i++) {
 550		kfree(l_mg->sline_meta[i]);
 551		kvfree(l_mg->eline_meta[i]->buf);
 552		kfree(l_mg->eline_meta[i]);
 553	}
 554
 555	mempool_destroy(l_mg->bitmap_pool);
 556	kmem_cache_destroy(l_mg->bitmap_cache);
 557}
 558
 559static void pblk_line_meta_free(struct pblk_line_mgmt *l_mg,
 560				struct pblk_line *line)
 561{
 562	struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
 563
 564	kfree(line->blk_bitmap);
 565	kfree(line->erase_bitmap);
 566	kfree(line->chks);
 567
 568	kvfree(w_err_gc->lba_list);
 569	kfree(w_err_gc);
 570}
 571
 572static void pblk_lines_free(struct pblk *pblk)
 573{
 574	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 575	struct pblk_line *line;
 576	int i;
 577
 578	for (i = 0; i < l_mg->nr_lines; i++) {
 579		line = &pblk->lines[i];
 580
 581		pblk_line_free(line);
 582		pblk_line_meta_free(l_mg, line);
 583	}
 584
 585	pblk_line_mg_free(pblk);
 586
 587	kfree(pblk->luns);
 588	kfree(pblk->lines);
 589}
 590
 591static int pblk_luns_init(struct pblk *pblk)
 592{
 593	struct nvm_tgt_dev *dev = pblk->dev;
 594	struct nvm_geo *geo = &dev->geo;
 595	struct pblk_lun *rlun;
 596	int i;
 597
 598	/* TODO: Implement unbalanced LUN support */
 599	if (geo->num_lun < 0) {
 600		pblk_err(pblk, "unbalanced LUN config.\n");
 601		return -EINVAL;
 602	}
 603
 604	pblk->luns = kcalloc(geo->all_luns, sizeof(struct pblk_lun),
 605								GFP_KERNEL);
 606	if (!pblk->luns)
 607		return -ENOMEM;
 608
 609	for (i = 0; i < geo->all_luns; i++) {
 610		/* Stripe across channels */
 611		int ch = i % geo->num_ch;
 612		int lun_raw = i / geo->num_ch;
 613		int lunid = lun_raw + ch * geo->num_lun;
 614
 615		rlun = &pblk->luns[i];
 616		rlun->bppa = dev->luns[lunid];
 617
 618		sema_init(&rlun->wr_sem, 1);
 619	}
 620
 621	return 0;
 622}
 623
 624/* See comment over struct line_emeta definition */
 625static unsigned int calc_emeta_len(struct pblk *pblk)
 626{
 627	struct pblk_line_meta *lm = &pblk->lm;
 628	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 629	struct nvm_tgt_dev *dev = pblk->dev;
 630	struct nvm_geo *geo = &dev->geo;
 631
 632	/* Round to sector size so that lba_list starts on its own sector */
 633	lm->emeta_sec[1] = DIV_ROUND_UP(
 634			sizeof(struct line_emeta) + lm->blk_bitmap_len +
 635			sizeof(struct wa_counters), geo->csecs);
 636	lm->emeta_len[1] = lm->emeta_sec[1] * geo->csecs;
 637
 638	/* Round to sector size so that vsc_list starts on its own sector */
 639	lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0];
 640	lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64),
 641			geo->csecs);
 642	lm->emeta_len[2] = lm->emeta_sec[2] * geo->csecs;
 643
 644	lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32),
 645			geo->csecs);
 646	lm->emeta_len[3] = lm->emeta_sec[3] * geo->csecs;
 647
 648	lm->vsc_list_len = l_mg->nr_lines * sizeof(u32);
 649
 650	return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
 651}
 652
 653static int pblk_set_provision(struct pblk *pblk, int nr_free_chks)
 654{
 655	struct nvm_tgt_dev *dev = pblk->dev;
 656	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 657	struct pblk_line_meta *lm = &pblk->lm;
 658	struct nvm_geo *geo = &dev->geo;
 659	sector_t provisioned;
 660	int sec_meta, blk_meta, clba;
 661	int minimum;
 662
 663	if (geo->op == NVM_TARGET_DEFAULT_OP)
 664		pblk->op = PBLK_DEFAULT_OP;
 665	else
 666		pblk->op = geo->op;
 667
 668	minimum = pblk_get_min_chks(pblk);
 669	provisioned = nr_free_chks;
 670	provisioned *= (100 - pblk->op);
 671	sector_div(provisioned, 100);
 672
 673	if ((nr_free_chks - provisioned) < minimum) {
 674		if (geo->op != NVM_TARGET_DEFAULT_OP) {
 675			pblk_err(pblk, "OP too small to create a sane instance\n");
 676			return -EINTR;
 677		}
 678
 679		/* If the user did not specify an OP value, and PBLK_DEFAULT_OP
 680		 * is not enough, calculate and set sane value
 681		 */
 682
 683		provisioned = nr_free_chks - minimum;
 684		pblk->op =  (100 * minimum) / nr_free_chks;
 685		pblk_info(pblk, "Default OP insufficient, adjusting OP to %d\n",
 686				pblk->op);
 687	}
 688
 689	pblk->op_blks = nr_free_chks - provisioned;
 690
 691	/* Internally pblk manages all free blocks, but all calculations based
 692	 * on user capacity consider only provisioned blocks
 693	 */
 694	pblk->rl.total_blocks = nr_free_chks;
 695
 696	/* Consider sectors used for metadata */
 697	sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
 698	blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
 699
 700	clba = (geo->clba / pblk->min_write_pgs) * pblk->min_write_pgs_data;
 701	pblk->capacity = (provisioned - blk_meta) * clba;
 702
 703	atomic_set(&pblk->rl.free_blocks, nr_free_chks);
 704	atomic_set(&pblk->rl.free_user_blocks, nr_free_chks);
 705
 706	return 0;
 707}
 708
 709static int pblk_setup_line_meta_chk(struct pblk *pblk, struct pblk_line *line,
 710				   struct nvm_chk_meta *meta)
 711{
 712	struct nvm_tgt_dev *dev = pblk->dev;
 713	struct nvm_geo *geo = &dev->geo;
 714	struct pblk_line_meta *lm = &pblk->lm;
 715	int i, nr_bad_chks = 0;
 716
 717	for (i = 0; i < lm->blk_per_line; i++) {
 718		struct pblk_lun *rlun = &pblk->luns[i];
 719		struct nvm_chk_meta *chunk;
 720		struct nvm_chk_meta *chunk_meta;
 721		struct ppa_addr ppa;
 722		int pos;
 723
 724		ppa = rlun->bppa;
 725		pos = pblk_ppa_to_pos(geo, ppa);
 726		chunk = &line->chks[pos];
 727
 728		ppa.m.chk = line->id;
 729		chunk_meta = pblk_chunk_get_off(pblk, meta, ppa);
 730
 731		chunk->state = chunk_meta->state;
 732		chunk->type = chunk_meta->type;
 733		chunk->wi = chunk_meta->wi;
 734		chunk->slba = chunk_meta->slba;
 735		chunk->cnlb = chunk_meta->cnlb;
 736		chunk->wp = chunk_meta->wp;
 737
 738		trace_pblk_chunk_state(pblk_disk_name(pblk), &ppa,
 739					chunk->state);
 740
 741		if (chunk->type & NVM_CHK_TP_SZ_SPEC) {
 742			WARN_ONCE(1, "pblk: custom-sized chunks unsupported\n");
 743			continue;
 744		}
 745
 746		if (!(chunk->state & NVM_CHK_ST_OFFLINE))
 747			continue;
 748
 749		set_bit(pos, line->blk_bitmap);
 750		nr_bad_chks++;
 751	}
 752
 753	return nr_bad_chks;
 754}
 755
 756static long pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line,
 757				 void *chunk_meta, int line_id)
 758{
 759	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 760	struct pblk_line_meta *lm = &pblk->lm;
 761	long nr_bad_chks, chk_in_line;
 762
 763	line->pblk = pblk;
 764	line->id = line_id;
 765	line->type = PBLK_LINETYPE_FREE;
 766	line->state = PBLK_LINESTATE_NEW;
 767	line->gc_group = PBLK_LINEGC_NONE;
 768	line->vsc = &l_mg->vsc_list[line_id];
 769	spin_lock_init(&line->lock);
 770
 771	nr_bad_chks = pblk_setup_line_meta_chk(pblk, line, chunk_meta);
 772
 773	chk_in_line = lm->blk_per_line - nr_bad_chks;
 774	if (nr_bad_chks < 0 || nr_bad_chks > lm->blk_per_line ||
 775					chk_in_line < lm->min_blk_line) {
 776		line->state = PBLK_LINESTATE_BAD;
 777		list_add_tail(&line->list, &l_mg->bad_list);
 778		return 0;
 779	}
 780
 781	atomic_set(&line->blk_in_line, chk_in_line);
 782	list_add_tail(&line->list, &l_mg->free_list);
 783	l_mg->nr_free_lines++;
 784
 785	return chk_in_line;
 786}
 787
 788static int pblk_alloc_line_meta(struct pblk *pblk, struct pblk_line *line)
 789{
 790	struct pblk_line_meta *lm = &pblk->lm;
 791
 792	line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
 793	if (!line->blk_bitmap)
 794		return -ENOMEM;
 795
 796	line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
 797	if (!line->erase_bitmap)
 798		goto free_blk_bitmap;
 799
 800
 801	line->chks = kmalloc_array(lm->blk_per_line,
 802				   sizeof(struct nvm_chk_meta), GFP_KERNEL);
 803	if (!line->chks)
 804		goto free_erase_bitmap;
 805
 806	line->w_err_gc = kzalloc(sizeof(struct pblk_w_err_gc), GFP_KERNEL);
 807	if (!line->w_err_gc)
 808		goto free_chks;
 809
 810	return 0;
 811
 812free_chks:
 813	kfree(line->chks);
 814free_erase_bitmap:
 815	kfree(line->erase_bitmap);
 816free_blk_bitmap:
 817	kfree(line->blk_bitmap);
 818	return -ENOMEM;
 819}
 820
 821static int pblk_line_mg_init(struct pblk *pblk)
 822{
 823	struct nvm_tgt_dev *dev = pblk->dev;
 824	struct nvm_geo *geo = &dev->geo;
 825	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 826	struct pblk_line_meta *lm = &pblk->lm;
 827	int i, bb_distance;
 828
 829	l_mg->nr_lines = geo->num_chk;
 830	l_mg->log_line = l_mg->data_line = NULL;
 831	l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
 832	l_mg->nr_free_lines = 0;
 833	bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
 834
 835	INIT_LIST_HEAD(&l_mg->free_list);
 836	INIT_LIST_HEAD(&l_mg->corrupt_list);
 837	INIT_LIST_HEAD(&l_mg->bad_list);
 838	INIT_LIST_HEAD(&l_mg->gc_full_list);
 839	INIT_LIST_HEAD(&l_mg->gc_high_list);
 840	INIT_LIST_HEAD(&l_mg->gc_mid_list);
 841	INIT_LIST_HEAD(&l_mg->gc_low_list);
 842	INIT_LIST_HEAD(&l_mg->gc_empty_list);
 843	INIT_LIST_HEAD(&l_mg->gc_werr_list);
 844
 845	INIT_LIST_HEAD(&l_mg->emeta_list);
 846
 847	l_mg->gc_lists[0] = &l_mg->gc_werr_list;
 848	l_mg->gc_lists[1] = &l_mg->gc_high_list;
 849	l_mg->gc_lists[2] = &l_mg->gc_mid_list;
 850	l_mg->gc_lists[3] = &l_mg->gc_low_list;
 851
 852	spin_lock_init(&l_mg->free_lock);
 853	spin_lock_init(&l_mg->close_lock);
 854	spin_lock_init(&l_mg->gc_lock);
 855
 856	l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL);
 857	if (!l_mg->vsc_list)
 858		goto fail;
 859
 860	l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
 861	if (!l_mg->bb_template)
 862		goto fail_free_vsc_list;
 863
 864	l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
 865	if (!l_mg->bb_aux)
 866		goto fail_free_bb_template;
 867
 868	/* smeta is always small enough to fit on a kmalloc memory allocation,
 869	 * emeta depends on the number of LUNs allocated to the pblk instance
 870	 */
 871	for (i = 0; i < PBLK_DATA_LINES; i++) {
 872		l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL);
 873		if (!l_mg->sline_meta[i])
 874			goto fail_free_smeta;
 875	}
 876
 877	l_mg->bitmap_cache = kmem_cache_create("pblk_lm_bitmap",
 878			lm->sec_bitmap_len, 0, 0, NULL);
 879	if (!l_mg->bitmap_cache)
 880		goto fail_free_smeta;
 881
 882	/* the bitmap pool is used for both valid and map bitmaps */
 883	l_mg->bitmap_pool = mempool_create_slab_pool(PBLK_DATA_LINES * 2,
 884				l_mg->bitmap_cache);
 885	if (!l_mg->bitmap_pool)
 886		goto fail_destroy_bitmap_cache;
 887
 888	/* emeta allocates three different buffers for managing metadata with
 889	 * in-memory and in-media layouts
 890	 */
 891	for (i = 0; i < PBLK_DATA_LINES; i++) {
 892		struct pblk_emeta *emeta;
 893
 894		emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL);
 895		if (!emeta)
 896			goto fail_free_emeta;
 897
 898		emeta->buf = kvmalloc(lm->emeta_len[0], GFP_KERNEL);
 899		if (!emeta->buf) {
 900			kfree(emeta);
 901			goto fail_free_emeta;
 902		}
 903
 904		emeta->nr_entries = lm->emeta_sec[0];
 905		l_mg->eline_meta[i] = emeta;
 906	}
 907
 908	for (i = 0; i < l_mg->nr_lines; i++)
 909		l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY);
 910
 911	bb_distance = (geo->all_luns) * geo->ws_opt;
 912	for (i = 0; i < lm->sec_per_line; i += bb_distance)
 913		bitmap_set(l_mg->bb_template, i, geo->ws_opt);
 914
 915	return 0;
 916
 917fail_free_emeta:
 918	while (--i >= 0) {
 919		kvfree(l_mg->eline_meta[i]->buf);
 920		kfree(l_mg->eline_meta[i]);
 921	}
 922
 923	mempool_destroy(l_mg->bitmap_pool);
 924fail_destroy_bitmap_cache:
 925	kmem_cache_destroy(l_mg->bitmap_cache);
 926fail_free_smeta:
 927	for (i = 0; i < PBLK_DATA_LINES; i++)
 928		kfree(l_mg->sline_meta[i]);
 929	kfree(l_mg->bb_aux);
 930fail_free_bb_template:
 931	kfree(l_mg->bb_template);
 932fail_free_vsc_list:
 933	kfree(l_mg->vsc_list);
 934fail:
 935	return -ENOMEM;
 936}
 937
 938static int pblk_line_meta_init(struct pblk *pblk)
 939{
 940	struct nvm_tgt_dev *dev = pblk->dev;
 941	struct nvm_geo *geo = &dev->geo;
 942	struct pblk_line_meta *lm = &pblk->lm;
 943	unsigned int smeta_len, emeta_len;
 944	int i;
 945
 946	lm->sec_per_line = geo->clba * geo->all_luns;
 947	lm->blk_per_line = geo->all_luns;
 948	lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
 949	lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
 950	lm->lun_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
 951	lm->mid_thrs = lm->sec_per_line / 2;
 952	lm->high_thrs = lm->sec_per_line / 4;
 953	lm->meta_distance = (geo->all_luns / 2) * pblk->min_write_pgs;
 954
 955	/* Calculate necessary pages for smeta. See comment over struct
 956	 * line_smeta definition
 957	 */
 958	i = 1;
 959add_smeta_page:
 960	lm->smeta_sec = i * geo->ws_opt;
 961	lm->smeta_len = lm->smeta_sec * geo->csecs;
 962
 963	smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
 964	if (smeta_len > lm->smeta_len) {
 965		i++;
 966		goto add_smeta_page;
 967	}
 968
 969	/* Calculate necessary pages for emeta. See comment over struct
 970	 * line_emeta definition
 971	 */
 972	i = 1;
 973add_emeta_page:
 974	lm->emeta_sec[0] = i * geo->ws_opt;
 975	lm->emeta_len[0] = lm->emeta_sec[0] * geo->csecs;
 976
 977	emeta_len = calc_emeta_len(pblk);
 978	if (emeta_len > lm->emeta_len[0]) {
 979		i++;
 980		goto add_emeta_page;
 981	}
 982
 983	lm->emeta_bb = geo->all_luns > i ? geo->all_luns - i : 0;
 984
 985	lm->min_blk_line = 1;
 986	if (geo->all_luns > 1)
 987		lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
 988					lm->emeta_sec[0], geo->clba);
 989
 990	if (lm->min_blk_line > lm->blk_per_line) {
 991		pblk_err(pblk, "config. not supported. Min. LUN in line:%d\n",
 992							lm->blk_per_line);
 993		return -EINVAL;
 994	}
 995
 996	return 0;
 997}
 998
 999static int pblk_lines_init(struct pblk *pblk)
1000{
1001	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1002	struct pblk_line *line;
1003	void *chunk_meta;
1004	int nr_free_chks = 0;
1005	int i, ret;
1006
1007	ret = pblk_line_meta_init(pblk);
1008	if (ret)
1009		return ret;
1010
1011	ret = pblk_line_mg_init(pblk);
1012	if (ret)
1013		return ret;
1014
1015	ret = pblk_luns_init(pblk);
1016	if (ret)
1017		goto fail_free_meta;
1018
1019	chunk_meta = pblk_get_chunk_meta(pblk);
1020	if (IS_ERR(chunk_meta)) {
1021		ret = PTR_ERR(chunk_meta);
1022		goto fail_free_luns;
1023	}
1024
1025	pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
1026								GFP_KERNEL);
1027	if (!pblk->lines) {
1028		ret = -ENOMEM;
1029		goto fail_free_chunk_meta;
1030	}
1031
1032	for (i = 0; i < l_mg->nr_lines; i++) {
1033		line = &pblk->lines[i];
1034
1035		ret = pblk_alloc_line_meta(pblk, line);
1036		if (ret)
1037			goto fail_free_lines;
1038
1039		nr_free_chks += pblk_setup_line_meta(pblk, line, chunk_meta, i);
1040
1041		trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1042								line->state);
1043	}
1044
1045	if (!nr_free_chks) {
1046		pblk_err(pblk, "too many bad blocks prevent for sane instance\n");
1047		ret = -EINTR;
1048		goto fail_free_lines;
1049	}
1050
1051	ret = pblk_set_provision(pblk, nr_free_chks);
1052	if (ret)
1053		goto fail_free_lines;
1054
1055	vfree(chunk_meta);
1056	return 0;
1057
1058fail_free_lines:
1059	while (--i >= 0)
1060		pblk_line_meta_free(l_mg, &pblk->lines[i]);
1061	kfree(pblk->lines);
1062fail_free_chunk_meta:
1063	vfree(chunk_meta);
1064fail_free_luns:
1065	kfree(pblk->luns);
1066fail_free_meta:
1067	pblk_line_mg_free(pblk);
1068
1069	return ret;
1070}
1071
1072static int pblk_writer_init(struct pblk *pblk)
1073{
1074	pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
1075	if (IS_ERR(pblk->writer_ts)) {
1076		int err = PTR_ERR(pblk->writer_ts);
1077
1078		if (err != -EINTR)
1079			pblk_err(pblk, "could not allocate writer kthread (%d)\n",
1080					err);
1081		return err;
1082	}
1083
1084	timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
1085	mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
1086
1087	return 0;
1088}
1089
1090static void pblk_writer_stop(struct pblk *pblk)
1091{
1092	/* The pipeline must be stopped and the write buffer emptied before the
1093	 * write thread is stopped
1094	 */
1095	WARN(pblk_rb_read_count(&pblk->rwb),
1096			"Stopping not fully persisted write buffer\n");
1097
1098	WARN(pblk_rb_sync_count(&pblk->rwb),
1099			"Stopping not fully synced write buffer\n");
1100
1101	del_timer_sync(&pblk->wtimer);
1102	if (pblk->writer_ts)
1103		kthread_stop(pblk->writer_ts);
1104}
1105
1106static void pblk_free(struct pblk *pblk)
1107{
1108	pblk_lines_free(pblk);
1109	pblk_l2p_free(pblk);
1110	pblk_rwb_free(pblk);
1111	pblk_core_free(pblk);
1112
1113	kfree(pblk);
1114}
1115
1116static void pblk_tear_down(struct pblk *pblk, bool graceful)
1117{
1118	if (graceful)
1119		__pblk_pipeline_flush(pblk);
1120	__pblk_pipeline_stop(pblk);
1121	pblk_writer_stop(pblk);
1122	pblk_rb_sync_l2p(&pblk->rwb);
1123	pblk_rl_free(&pblk->rl);
1124
1125	pblk_debug(pblk, "consistent tear down (graceful:%d)\n", graceful);
1126}
1127
1128static void pblk_exit(void *private, bool graceful)
1129{
1130	struct pblk *pblk = private;
1131
1132	pblk_gc_exit(pblk, graceful);
1133	pblk_tear_down(pblk, graceful);
1134
1135#ifdef CONFIG_NVM_PBLK_DEBUG
1136	pblk_info(pblk, "exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
1137#endif
1138
1139	pblk_free(pblk);
1140}
1141
1142static sector_t pblk_capacity(void *private)
1143{
1144	struct pblk *pblk = private;
1145
1146	return pblk->capacity * NR_PHY_IN_LOG;
1147}
1148
1149static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
1150		       int flags)
1151{
1152	struct nvm_geo *geo = &dev->geo;
1153	struct request_queue *bqueue = dev->q;
1154	struct request_queue *tqueue = tdisk->queue;
1155	struct pblk *pblk;
1156	int ret;
1157
1158	pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
1159	if (!pblk)
1160		return ERR_PTR(-ENOMEM);
1161
1162	pblk->dev = dev;
1163	pblk->disk = tdisk;
1164	pblk->state = PBLK_STATE_RUNNING;
1165	trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1166	pblk->gc.gc_enabled = 0;
1167
1168	if (!(geo->version == NVM_OCSSD_SPEC_12 ||
1169					geo->version == NVM_OCSSD_SPEC_20)) {
1170		pblk_err(pblk, "OCSSD version not supported (%u)\n",
1171							geo->version);
1172		kfree(pblk);
1173		return ERR_PTR(-EINVAL);
1174	}
1175
1176	if (geo->ext) {
1177		pblk_err(pblk, "extended metadata not supported\n");
1178		kfree(pblk);
1179		return ERR_PTR(-EINVAL);
1180	}
1181
1182	spin_lock_init(&pblk->resubmit_lock);
1183	spin_lock_init(&pblk->trans_lock);
1184	spin_lock_init(&pblk->lock);
1185
1186#ifdef CONFIG_NVM_PBLK_DEBUG
1187	atomic_long_set(&pblk->inflight_writes, 0);
1188	atomic_long_set(&pblk->padded_writes, 0);
1189	atomic_long_set(&pblk->padded_wb, 0);
1190	atomic_long_set(&pblk->req_writes, 0);
1191	atomic_long_set(&pblk->sub_writes, 0);
1192	atomic_long_set(&pblk->sync_writes, 0);
1193	atomic_long_set(&pblk->inflight_reads, 0);
1194	atomic_long_set(&pblk->cache_reads, 0);
1195	atomic_long_set(&pblk->sync_reads, 0);
1196	atomic_long_set(&pblk->recov_writes, 0);
1197	atomic_long_set(&pblk->recov_writes, 0);
1198	atomic_long_set(&pblk->recov_gc_writes, 0);
1199	atomic_long_set(&pblk->recov_gc_reads, 0);
1200#endif
1201
1202	atomic_long_set(&pblk->read_failed, 0);
1203	atomic_long_set(&pblk->read_empty, 0);
1204	atomic_long_set(&pblk->read_high_ecc, 0);
1205	atomic_long_set(&pblk->read_failed_gc, 0);
1206	atomic_long_set(&pblk->write_failed, 0);
1207	atomic_long_set(&pblk->erase_failed, 0);
1208
1209	ret = pblk_core_init(pblk);
1210	if (ret) {
1211		pblk_err(pblk, "could not initialize core\n");
1212		goto fail;
1213	}
1214
1215	ret = pblk_lines_init(pblk);
1216	if (ret) {
1217		pblk_err(pblk, "could not initialize lines\n");
1218		goto fail_free_core;
1219	}
1220
1221	ret = pblk_rwb_init(pblk);
1222	if (ret) {
1223		pblk_err(pblk, "could not initialize write buffer\n");
1224		goto fail_free_lines;
1225	}
1226
1227	ret = pblk_l2p_init(pblk, flags & NVM_TARGET_FACTORY);
1228	if (ret) {
1229		pblk_err(pblk, "could not initialize maps\n");
1230		goto fail_free_rwb;
1231	}
1232
1233	ret = pblk_writer_init(pblk);
1234	if (ret) {
1235		if (ret != -EINTR)
1236			pblk_err(pblk, "could not initialize write thread\n");
1237		goto fail_free_l2p;
1238	}
1239
1240	ret = pblk_gc_init(pblk);
1241	if (ret) {
1242		pblk_err(pblk, "could not initialize gc\n");
1243		goto fail_stop_writer;
1244	}
1245
1246	/* inherit the size from the underlying device */
1247	blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1248	blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1249
1250	blk_queue_write_cache(tqueue, true, false);
1251
1252	tqueue->limits.discard_granularity = geo->clba * geo->csecs;
1253	tqueue->limits.discard_alignment = 0;
1254	blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
1255	blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue);
1256
1257	pblk_info(pblk, "luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
1258			geo->all_luns, pblk->l_mg.nr_lines,
1259			(unsigned long long)pblk->capacity,
1260			pblk->rwb.nr_entries);
1261
1262	wake_up_process(pblk->writer_ts);
1263
1264	/* Check if we need to start GC */
1265	pblk_gc_should_kick(pblk);
1266
1267	return pblk;
1268
1269fail_stop_writer:
1270	pblk_writer_stop(pblk);
1271fail_free_l2p:
1272	pblk_l2p_free(pblk);
1273fail_free_rwb:
1274	pblk_rwb_free(pblk);
1275fail_free_lines:
1276	pblk_lines_free(pblk);
1277fail_free_core:
1278	pblk_core_free(pblk);
1279fail:
1280	kfree(pblk);
1281	return ERR_PTR(ret);
1282}
1283
1284/* physical block device target */
1285static struct nvm_tgt_type tt_pblk = {
1286	.name		= "pblk",
1287	.version	= {1, 0, 0},
1288
1289	.bops		= &pblk_bops,
1290	.capacity	= pblk_capacity,
1291
1292	.init		= pblk_init,
1293	.exit		= pblk_exit,
1294
1295	.sysfs_init	= pblk_sysfs_init,
1296	.sysfs_exit	= pblk_sysfs_exit,
1297	.owner		= THIS_MODULE,
1298};
1299
1300static int __init pblk_module_init(void)
1301{
1302	int ret;
1303
1304	ret = bioset_init(&pblk_bio_set, BIO_POOL_SIZE, 0, 0);
1305	if (ret)
1306		return ret;
1307	ret = nvm_register_tgt_type(&tt_pblk);
1308	if (ret)
1309		bioset_exit(&pblk_bio_set);
1310	return ret;
1311}
1312
1313static void pblk_module_exit(void)
1314{
1315	bioset_exit(&pblk_bio_set);
1316	nvm_unregister_tgt_type(&tt_pblk);
1317}
1318
1319module_init(pblk_module_init);
1320module_exit(pblk_module_exit);
1321MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
1322MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
1323MODULE_LICENSE("GPL v2");
1324MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
   4 * Copyright (C) 2016 CNEX Labs
   5 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
   6 *                  Matias Bjorling <matias@cnexlabs.com>
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License version
  10 * 2 as published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful, but
  13 * WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * General Public License for more details.
  16 *
  17 * Implementation of a physical block-device target for Open-channel SSDs.
  18 *
  19 * pblk-init.c - pblk's initialization.
  20 */
  21
  22#include "pblk.h"
  23#include "pblk-trace.h"
  24
  25static unsigned int write_buffer_size;
  26
  27module_param(write_buffer_size, uint, 0644);
  28MODULE_PARM_DESC(write_buffer_size, "number of entries in a write buffer");
  29
  30struct pblk_global_caches {
  31	struct kmem_cache	*ws;
  32	struct kmem_cache	*rec;
  33	struct kmem_cache	*g_rq;
  34	struct kmem_cache	*w_rq;
  35
  36	struct kref		kref;
  37
  38	struct mutex		mutex; /* Ensures consistency between
  39					* caches and kref
  40					*/
  41};
  42
  43static struct pblk_global_caches pblk_caches = {
  44	.mutex = __MUTEX_INITIALIZER(pblk_caches.mutex),
  45	.kref = KREF_INIT(0),
  46};
  47
  48struct bio_set pblk_bio_set;
  49
  50static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
  51{
  52	struct pblk *pblk = q->queuedata;
  53
  54	if (bio_op(bio) == REQ_OP_DISCARD) {
  55		pblk_discard(pblk, bio);
  56		if (!(bio->bi_opf & REQ_PREFLUSH)) {
  57			bio_endio(bio);
  58			return BLK_QC_T_NONE;
  59		}
  60	}
  61
  62	/* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
  63	 * constraint. Writes can be of arbitrary size.
  64	 */
  65	if (bio_data_dir(bio) == READ) {
  66		blk_queue_split(q, &bio);
  67		pblk_submit_read(pblk, bio);
  68	} else {
  69		/* Prevent deadlock in the case of a modest LUN configuration
  70		 * and large user I/Os. Unless stalled, the rate limiter
  71		 * leaves at least 256KB available for user I/O.
  72		 */
  73		if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
  74			blk_queue_split(q, &bio);
  75
  76		pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
  77	}
  78
  79	return BLK_QC_T_NONE;
  80}
  81
 
 
 
 
 
 
  82static size_t pblk_trans_map_size(struct pblk *pblk)
  83{
  84	int entry_size = 8;
  85
  86	if (pblk->addrf_len < 32)
  87		entry_size = 4;
  88
  89	return entry_size * pblk->capacity;
  90}
  91
  92#ifdef CONFIG_NVM_PBLK_DEBUG
  93static u32 pblk_l2p_crc(struct pblk *pblk)
  94{
  95	size_t map_size;
  96	u32 crc = ~(u32)0;
  97
  98	map_size = pblk_trans_map_size(pblk);
  99	crc = crc32_le(crc, pblk->trans_map, map_size);
 100	return crc;
 101}
 102#endif
 103
 104static void pblk_l2p_free(struct pblk *pblk)
 105{
 106	vfree(pblk->trans_map);
 107}
 108
 109static int pblk_l2p_recover(struct pblk *pblk, bool factory_init)
 110{
 111	struct pblk_line *line = NULL;
 112
 113	if (factory_init) {
 114		guid_gen(&pblk->instance_uuid);
 115	} else {
 116		line = pblk_recov_l2p(pblk);
 117		if (IS_ERR(line)) {
 118			pblk_err(pblk, "could not recover l2p table\n");
 119			return -EFAULT;
 120		}
 121	}
 122
 123#ifdef CONFIG_NVM_PBLK_DEBUG
 124	pblk_info(pblk, "init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
 125#endif
 126
 127	/* Free full lines directly as GC has not been started yet */
 128	pblk_gc_free_full_lines(pblk);
 129
 130	if (!line) {
 131		/* Configure next line for user data */
 132		line = pblk_line_get_first_data(pblk);
 133		if (!line)
 134			return -EFAULT;
 135	}
 136
 137	return 0;
 138}
 139
 140static int pblk_l2p_init(struct pblk *pblk, bool factory_init)
 141{
 142	sector_t i;
 143	struct ppa_addr ppa;
 144	size_t map_size;
 145	int ret = 0;
 146
 147	map_size = pblk_trans_map_size(pblk);
 148	pblk->trans_map = __vmalloc(map_size, GFP_KERNEL | __GFP_NOWARN
 149					| __GFP_RETRY_MAYFAIL | __GFP_HIGHMEM,
 150					PAGE_KERNEL);
 151	if (!pblk->trans_map) {
 152		pblk_err(pblk, "failed to allocate L2P (need %zu of memory)\n",
 153				map_size);
 154		return -ENOMEM;
 155	}
 156
 157	pblk_ppa_set_empty(&ppa);
 158
 159	for (i = 0; i < pblk->capacity; i++)
 160		pblk_trans_map_set(pblk, i, ppa);
 161
 162	ret = pblk_l2p_recover(pblk, factory_init);
 163	if (ret)
 164		vfree(pblk->trans_map);
 165
 166	return ret;
 167}
 168
 169static void pblk_rwb_free(struct pblk *pblk)
 170{
 171	if (pblk_rb_tear_down_check(&pblk->rwb))
 172		pblk_err(pblk, "write buffer error on tear down\n");
 173
 174	pblk_rb_free(&pblk->rwb);
 175}
 176
 177static int pblk_rwb_init(struct pblk *pblk)
 178{
 179	struct nvm_tgt_dev *dev = pblk->dev;
 180	struct nvm_geo *geo = &dev->geo;
 181	unsigned long buffer_size;
 182	int pgs_in_buffer, threshold;
 183
 184	threshold = geo->mw_cunits * geo->all_luns;
 185	pgs_in_buffer = (max(geo->mw_cunits, geo->ws_opt) + geo->ws_opt)
 186								* geo->all_luns;
 187
 188	if (write_buffer_size && (write_buffer_size > pgs_in_buffer))
 189		buffer_size = write_buffer_size;
 190	else
 191		buffer_size = pgs_in_buffer;
 192
 193	return pblk_rb_init(&pblk->rwb, buffer_size, threshold, geo->csecs);
 194}
 195
 196static int pblk_set_addrf_12(struct pblk *pblk, struct nvm_geo *geo,
 197			     struct nvm_addrf_12 *dst)
 198{
 199	struct nvm_addrf_12 *src = (struct nvm_addrf_12 *)&geo->addrf;
 200	int power_len;
 201
 202	/* Re-calculate channel and lun format to adapt to configuration */
 203	power_len = get_count_order(geo->num_ch);
 204	if (1 << power_len != geo->num_ch) {
 205		pblk_err(pblk, "supports only power-of-two channel config.\n");
 206		return -EINVAL;
 207	}
 208	dst->ch_len = power_len;
 209
 210	power_len = get_count_order(geo->num_lun);
 211	if (1 << power_len != geo->num_lun) {
 212		pblk_err(pblk, "supports only power-of-two LUN config.\n");
 213		return -EINVAL;
 214	}
 215	dst->lun_len = power_len;
 216
 217	dst->blk_len = src->blk_len;
 218	dst->pg_len = src->pg_len;
 219	dst->pln_len = src->pln_len;
 220	dst->sec_len = src->sec_len;
 221
 222	dst->sec_offset = 0;
 223	dst->pln_offset = dst->sec_len;
 224	dst->ch_offset = dst->pln_offset + dst->pln_len;
 225	dst->lun_offset = dst->ch_offset + dst->ch_len;
 226	dst->pg_offset = dst->lun_offset + dst->lun_len;
 227	dst->blk_offset = dst->pg_offset + dst->pg_len;
 228
 229	dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
 230	dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset;
 231	dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
 232	dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
 233	dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset;
 234	dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset;
 235
 236	return dst->blk_offset + src->blk_len;
 237}
 238
 239static int pblk_set_addrf_20(struct nvm_geo *geo, struct nvm_addrf *adst,
 240			     struct pblk_addrf *udst)
 241{
 242	struct nvm_addrf *src = &geo->addrf;
 243
 244	adst->ch_len = get_count_order(geo->num_ch);
 245	adst->lun_len = get_count_order(geo->num_lun);
 246	adst->chk_len = src->chk_len;
 247	adst->sec_len = src->sec_len;
 248
 249	adst->sec_offset = 0;
 250	adst->ch_offset = adst->sec_len;
 251	adst->lun_offset = adst->ch_offset + adst->ch_len;
 252	adst->chk_offset = adst->lun_offset + adst->lun_len;
 253
 254	adst->sec_mask = ((1ULL << adst->sec_len) - 1) << adst->sec_offset;
 255	adst->chk_mask = ((1ULL << adst->chk_len) - 1) << adst->chk_offset;
 256	adst->lun_mask = ((1ULL << adst->lun_len) - 1) << adst->lun_offset;
 257	adst->ch_mask = ((1ULL << adst->ch_len) - 1) << adst->ch_offset;
 258
 259	udst->sec_stripe = geo->ws_opt;
 260	udst->ch_stripe = geo->num_ch;
 261	udst->lun_stripe = geo->num_lun;
 262
 263	udst->sec_lun_stripe = udst->sec_stripe * udst->ch_stripe;
 264	udst->sec_ws_stripe = udst->sec_lun_stripe * udst->lun_stripe;
 265
 266	return adst->chk_offset + adst->chk_len;
 267}
 268
 269static int pblk_set_addrf(struct pblk *pblk)
 270{
 271	struct nvm_tgt_dev *dev = pblk->dev;
 272	struct nvm_geo *geo = &dev->geo;
 273	int mod;
 274
 275	switch (geo->version) {
 276	case NVM_OCSSD_SPEC_12:
 277		div_u64_rem(geo->clba, pblk->min_write_pgs, &mod);
 278		if (mod) {
 279			pblk_err(pblk, "bad configuration of sectors/pages\n");
 280			return -EINVAL;
 281		}
 282
 283		pblk->addrf_len = pblk_set_addrf_12(pblk, geo,
 284							(void *)&pblk->addrf);
 285		break;
 286	case NVM_OCSSD_SPEC_20:
 287		pblk->addrf_len = pblk_set_addrf_20(geo, (void *)&pblk->addrf,
 288							&pblk->uaddrf);
 289		break;
 290	default:
 291		pblk_err(pblk, "OCSSD revision not supported (%d)\n",
 292								geo->version);
 293		return -EINVAL;
 294	}
 295
 296	return 0;
 297}
 298
 299static int pblk_create_global_caches(void)
 300{
 301
 302	pblk_caches.ws = kmem_cache_create("pblk_blk_ws",
 303				sizeof(struct pblk_line_ws), 0, 0, NULL);
 304	if (!pblk_caches.ws)
 305		return -ENOMEM;
 306
 307	pblk_caches.rec = kmem_cache_create("pblk_rec",
 308				sizeof(struct pblk_rec_ctx), 0, 0, NULL);
 309	if (!pblk_caches.rec)
 310		goto fail_destroy_ws;
 311
 312	pblk_caches.g_rq = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
 313				0, 0, NULL);
 314	if (!pblk_caches.g_rq)
 315		goto fail_destroy_rec;
 316
 317	pblk_caches.w_rq = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
 318				0, 0, NULL);
 319	if (!pblk_caches.w_rq)
 320		goto fail_destroy_g_rq;
 321
 322	return 0;
 323
 324fail_destroy_g_rq:
 325	kmem_cache_destroy(pblk_caches.g_rq);
 326fail_destroy_rec:
 327	kmem_cache_destroy(pblk_caches.rec);
 328fail_destroy_ws:
 329	kmem_cache_destroy(pblk_caches.ws);
 330
 331	return -ENOMEM;
 332}
 333
 334static int pblk_get_global_caches(void)
 335{
 336	int ret = 0;
 337
 338	mutex_lock(&pblk_caches.mutex);
 339
 340	if (kref_get_unless_zero(&pblk_caches.kref))
 341		goto out;
 342
 343	ret = pblk_create_global_caches();
 344	if (!ret)
 345		kref_init(&pblk_caches.kref);
 346
 347out:
 348	mutex_unlock(&pblk_caches.mutex);
 349	return ret;
 350}
 351
 352static void pblk_destroy_global_caches(struct kref *ref)
 353{
 354	struct pblk_global_caches *c;
 355
 356	c = container_of(ref, struct pblk_global_caches, kref);
 357
 358	kmem_cache_destroy(c->ws);
 359	kmem_cache_destroy(c->rec);
 360	kmem_cache_destroy(c->g_rq);
 361	kmem_cache_destroy(c->w_rq);
 362}
 363
 364static void pblk_put_global_caches(void)
 365{
 366	mutex_lock(&pblk_caches.mutex);
 367	kref_put(&pblk_caches.kref, pblk_destroy_global_caches);
 368	mutex_unlock(&pblk_caches.mutex);
 369}
 370
 371static int pblk_core_init(struct pblk *pblk)
 372{
 373	struct nvm_tgt_dev *dev = pblk->dev;
 374	struct nvm_geo *geo = &dev->geo;
 375	int ret, max_write_ppas;
 376
 377	atomic64_set(&pblk->user_wa, 0);
 378	atomic64_set(&pblk->pad_wa, 0);
 379	atomic64_set(&pblk->gc_wa, 0);
 380	pblk->user_rst_wa = 0;
 381	pblk->pad_rst_wa = 0;
 382	pblk->gc_rst_wa = 0;
 383
 384	atomic64_set(&pblk->nr_flush, 0);
 385	pblk->nr_flush_rst = 0;
 386
 387	pblk->min_write_pgs = geo->ws_opt;
 388	pblk->min_write_pgs_data = pblk->min_write_pgs;
 389	max_write_ppas = pblk->min_write_pgs * geo->all_luns;
 390	pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
 391	pblk->max_write_pgs = min_t(int, pblk->max_write_pgs,
 392		queue_max_hw_sectors(dev->q) / (geo->csecs >> SECTOR_SHIFT));
 393	pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
 394
 395	pblk->oob_meta_size = geo->sos;
 396	if (!pblk_is_oob_meta_supported(pblk)) {
 397		/* For drives which does not have OOB metadata feature
 398		 * in order to support recovery feature we need to use
 399		 * so called packed metadata. Packed metada will store
 400		 * the same information as OOB metadata (l2p table mapping,
 401		 * but in the form of the single page at the end of
 402		 * every write request.
 403		 */
 404		if (pblk->min_write_pgs
 405			* sizeof(struct pblk_sec_meta) > PAGE_SIZE) {
 406			/* We want to keep all the packed metadata on single
 407			 * page per write requests. So we need to ensure that
 408			 * it will fit.
 409			 *
 410			 * This is more like sanity check, since there is
 411			 * no device with such a big minimal write size
 412			 * (above 1 metabytes).
 413			 */
 414			pblk_err(pblk, "Not supported min write size\n");
 415			return -EINVAL;
 416		}
 417		/* For packed meta approach we do some simplification.
 418		 * On read path we always issue requests which size
 419		 * equal to max_write_pgs, with all pages filled with
 420		 * user payload except of last one page which will be
 421		 * filled with packed metadata.
 422		 */
 423		pblk->max_write_pgs = pblk->min_write_pgs;
 424		pblk->min_write_pgs_data = pblk->min_write_pgs - 1;
 425	}
 426
 427	pblk->pad_dist = kcalloc(pblk->min_write_pgs - 1, sizeof(atomic64_t),
 428								GFP_KERNEL);
 429	if (!pblk->pad_dist)
 430		return -ENOMEM;
 431
 432	if (pblk_get_global_caches())
 433		goto fail_free_pad_dist;
 434
 435	/* Internal bios can be at most the sectors signaled by the device. */
 436	ret = mempool_init_page_pool(&pblk->page_bio_pool, NVM_MAX_VLBA, 0);
 437	if (ret)
 438		goto free_global_caches;
 439
 440	ret = mempool_init_slab_pool(&pblk->gen_ws_pool, PBLK_GEN_WS_POOL_SIZE,
 441				     pblk_caches.ws);
 442	if (ret)
 443		goto free_page_bio_pool;
 444
 445	ret = mempool_init_slab_pool(&pblk->rec_pool, geo->all_luns,
 446				     pblk_caches.rec);
 447	if (ret)
 448		goto free_gen_ws_pool;
 449
 450	ret = mempool_init_slab_pool(&pblk->r_rq_pool, geo->all_luns,
 451				     pblk_caches.g_rq);
 452	if (ret)
 453		goto free_rec_pool;
 454
 455	ret = mempool_init_slab_pool(&pblk->e_rq_pool, geo->all_luns,
 456				     pblk_caches.g_rq);
 457	if (ret)
 458		goto free_r_rq_pool;
 459
 460	ret = mempool_init_slab_pool(&pblk->w_rq_pool, geo->all_luns,
 461				     pblk_caches.w_rq);
 462	if (ret)
 463		goto free_e_rq_pool;
 464
 465	pblk->close_wq = alloc_workqueue("pblk-close-wq",
 466			WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
 467	if (!pblk->close_wq)
 468		goto free_w_rq_pool;
 469
 470	pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
 471			WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
 472	if (!pblk->bb_wq)
 473		goto free_close_wq;
 474
 475	pblk->r_end_wq = alloc_workqueue("pblk-read-end-wq",
 476			WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
 477	if (!pblk->r_end_wq)
 478		goto free_bb_wq;
 479
 480	if (pblk_set_addrf(pblk))
 481		goto free_r_end_wq;
 482
 483	INIT_LIST_HEAD(&pblk->compl_list);
 484	INIT_LIST_HEAD(&pblk->resubmit_list);
 485
 486	return 0;
 487
 488free_r_end_wq:
 489	destroy_workqueue(pblk->r_end_wq);
 490free_bb_wq:
 491	destroy_workqueue(pblk->bb_wq);
 492free_close_wq:
 493	destroy_workqueue(pblk->close_wq);
 494free_w_rq_pool:
 495	mempool_exit(&pblk->w_rq_pool);
 496free_e_rq_pool:
 497	mempool_exit(&pblk->e_rq_pool);
 498free_r_rq_pool:
 499	mempool_exit(&pblk->r_rq_pool);
 500free_rec_pool:
 501	mempool_exit(&pblk->rec_pool);
 502free_gen_ws_pool:
 503	mempool_exit(&pblk->gen_ws_pool);
 504free_page_bio_pool:
 505	mempool_exit(&pblk->page_bio_pool);
 506free_global_caches:
 507	pblk_put_global_caches();
 508fail_free_pad_dist:
 509	kfree(pblk->pad_dist);
 510	return -ENOMEM;
 511}
 512
 513static void pblk_core_free(struct pblk *pblk)
 514{
 515	if (pblk->close_wq)
 516		destroy_workqueue(pblk->close_wq);
 517
 518	if (pblk->r_end_wq)
 519		destroy_workqueue(pblk->r_end_wq);
 520
 521	if (pblk->bb_wq)
 522		destroy_workqueue(pblk->bb_wq);
 523
 524	mempool_exit(&pblk->page_bio_pool);
 525	mempool_exit(&pblk->gen_ws_pool);
 526	mempool_exit(&pblk->rec_pool);
 527	mempool_exit(&pblk->r_rq_pool);
 528	mempool_exit(&pblk->e_rq_pool);
 529	mempool_exit(&pblk->w_rq_pool);
 530
 531	pblk_put_global_caches();
 532	kfree(pblk->pad_dist);
 533}
 534
 535static void pblk_line_mg_free(struct pblk *pblk)
 536{
 537	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 538	int i;
 539
 540	kfree(l_mg->bb_template);
 541	kfree(l_mg->bb_aux);
 542	kfree(l_mg->vsc_list);
 543
 544	for (i = 0; i < PBLK_DATA_LINES; i++) {
 545		kfree(l_mg->sline_meta[i]);
 546		kvfree(l_mg->eline_meta[i]->buf);
 547		kfree(l_mg->eline_meta[i]);
 548	}
 549
 550	mempool_destroy(l_mg->bitmap_pool);
 551	kmem_cache_destroy(l_mg->bitmap_cache);
 552}
 553
 554static void pblk_line_meta_free(struct pblk_line_mgmt *l_mg,
 555				struct pblk_line *line)
 556{
 557	struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
 558
 559	kfree(line->blk_bitmap);
 560	kfree(line->erase_bitmap);
 561	kfree(line->chks);
 562
 563	kvfree(w_err_gc->lba_list);
 564	kfree(w_err_gc);
 565}
 566
 567static void pblk_lines_free(struct pblk *pblk)
 568{
 569	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 570	struct pblk_line *line;
 571	int i;
 572
 573	for (i = 0; i < l_mg->nr_lines; i++) {
 574		line = &pblk->lines[i];
 575
 576		pblk_line_free(line);
 577		pblk_line_meta_free(l_mg, line);
 578	}
 579
 580	pblk_line_mg_free(pblk);
 581
 582	kfree(pblk->luns);
 583	kfree(pblk->lines);
 584}
 585
 586static int pblk_luns_init(struct pblk *pblk)
 587{
 588	struct nvm_tgt_dev *dev = pblk->dev;
 589	struct nvm_geo *geo = &dev->geo;
 590	struct pblk_lun *rlun;
 591	int i;
 592
 593	/* TODO: Implement unbalanced LUN support */
 594	if (geo->num_lun < 0) {
 595		pblk_err(pblk, "unbalanced LUN config.\n");
 596		return -EINVAL;
 597	}
 598
 599	pblk->luns = kcalloc(geo->all_luns, sizeof(struct pblk_lun),
 600								GFP_KERNEL);
 601	if (!pblk->luns)
 602		return -ENOMEM;
 603
 604	for (i = 0; i < geo->all_luns; i++) {
 605		/* Stripe across channels */
 606		int ch = i % geo->num_ch;
 607		int lun_raw = i / geo->num_ch;
 608		int lunid = lun_raw + ch * geo->num_lun;
 609
 610		rlun = &pblk->luns[i];
 611		rlun->bppa = dev->luns[lunid];
 612
 613		sema_init(&rlun->wr_sem, 1);
 614	}
 615
 616	return 0;
 617}
 618
 619/* See comment over struct line_emeta definition */
 620static unsigned int calc_emeta_len(struct pblk *pblk)
 621{
 622	struct pblk_line_meta *lm = &pblk->lm;
 623	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 624	struct nvm_tgt_dev *dev = pblk->dev;
 625	struct nvm_geo *geo = &dev->geo;
 626
 627	/* Round to sector size so that lba_list starts on its own sector */
 628	lm->emeta_sec[1] = DIV_ROUND_UP(
 629			sizeof(struct line_emeta) + lm->blk_bitmap_len +
 630			sizeof(struct wa_counters), geo->csecs);
 631	lm->emeta_len[1] = lm->emeta_sec[1] * geo->csecs;
 632
 633	/* Round to sector size so that vsc_list starts on its own sector */
 634	lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0];
 635	lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64),
 636			geo->csecs);
 637	lm->emeta_len[2] = lm->emeta_sec[2] * geo->csecs;
 638
 639	lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32),
 640			geo->csecs);
 641	lm->emeta_len[3] = lm->emeta_sec[3] * geo->csecs;
 642
 643	lm->vsc_list_len = l_mg->nr_lines * sizeof(u32);
 644
 645	return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
 646}
 647
 648static int pblk_set_provision(struct pblk *pblk, int nr_free_chks)
 649{
 650	struct nvm_tgt_dev *dev = pblk->dev;
 651	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 652	struct pblk_line_meta *lm = &pblk->lm;
 653	struct nvm_geo *geo = &dev->geo;
 654	sector_t provisioned;
 655	int sec_meta, blk_meta, clba;
 656	int minimum;
 657
 658	if (geo->op == NVM_TARGET_DEFAULT_OP)
 659		pblk->op = PBLK_DEFAULT_OP;
 660	else
 661		pblk->op = geo->op;
 662
 663	minimum = pblk_get_min_chks(pblk);
 664	provisioned = nr_free_chks;
 665	provisioned *= (100 - pblk->op);
 666	sector_div(provisioned, 100);
 667
 668	if ((nr_free_chks - provisioned) < minimum) {
 669		if (geo->op != NVM_TARGET_DEFAULT_OP) {
 670			pblk_err(pblk, "OP too small to create a sane instance\n");
 671			return -EINTR;
 672		}
 673
 674		/* If the user did not specify an OP value, and PBLK_DEFAULT_OP
 675		 * is not enough, calculate and set sane value
 676		 */
 677
 678		provisioned = nr_free_chks - minimum;
 679		pblk->op =  (100 * minimum) / nr_free_chks;
 680		pblk_info(pblk, "Default OP insufficient, adjusting OP to %d\n",
 681				pblk->op);
 682	}
 683
 684	pblk->op_blks = nr_free_chks - provisioned;
 685
 686	/* Internally pblk manages all free blocks, but all calculations based
 687	 * on user capacity consider only provisioned blocks
 688	 */
 689	pblk->rl.total_blocks = nr_free_chks;
 690
 691	/* Consider sectors used for metadata */
 692	sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
 693	blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
 694
 695	clba = (geo->clba / pblk->min_write_pgs) * pblk->min_write_pgs_data;
 696	pblk->capacity = (provisioned - blk_meta) * clba;
 697
 698	atomic_set(&pblk->rl.free_blocks, nr_free_chks);
 699	atomic_set(&pblk->rl.free_user_blocks, nr_free_chks);
 700
 701	return 0;
 702}
 703
 704static int pblk_setup_line_meta_chk(struct pblk *pblk, struct pblk_line *line,
 705				   struct nvm_chk_meta *meta)
 706{
 707	struct nvm_tgt_dev *dev = pblk->dev;
 708	struct nvm_geo *geo = &dev->geo;
 709	struct pblk_line_meta *lm = &pblk->lm;
 710	int i, nr_bad_chks = 0;
 711
 712	for (i = 0; i < lm->blk_per_line; i++) {
 713		struct pblk_lun *rlun = &pblk->luns[i];
 714		struct nvm_chk_meta *chunk;
 715		struct nvm_chk_meta *chunk_meta;
 716		struct ppa_addr ppa;
 717		int pos;
 718
 719		ppa = rlun->bppa;
 720		pos = pblk_ppa_to_pos(geo, ppa);
 721		chunk = &line->chks[pos];
 722
 723		ppa.m.chk = line->id;
 724		chunk_meta = pblk_chunk_get_off(pblk, meta, ppa);
 725
 726		chunk->state = chunk_meta->state;
 727		chunk->type = chunk_meta->type;
 728		chunk->wi = chunk_meta->wi;
 729		chunk->slba = chunk_meta->slba;
 730		chunk->cnlb = chunk_meta->cnlb;
 731		chunk->wp = chunk_meta->wp;
 732
 733		trace_pblk_chunk_state(pblk_disk_name(pblk), &ppa,
 734					chunk->state);
 735
 736		if (chunk->type & NVM_CHK_TP_SZ_SPEC) {
 737			WARN_ONCE(1, "pblk: custom-sized chunks unsupported\n");
 738			continue;
 739		}
 740
 741		if (!(chunk->state & NVM_CHK_ST_OFFLINE))
 742			continue;
 743
 744		set_bit(pos, line->blk_bitmap);
 745		nr_bad_chks++;
 746	}
 747
 748	return nr_bad_chks;
 749}
 750
 751static long pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line,
 752				 void *chunk_meta, int line_id)
 753{
 754	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 755	struct pblk_line_meta *lm = &pblk->lm;
 756	long nr_bad_chks, chk_in_line;
 757
 758	line->pblk = pblk;
 759	line->id = line_id;
 760	line->type = PBLK_LINETYPE_FREE;
 761	line->state = PBLK_LINESTATE_NEW;
 762	line->gc_group = PBLK_LINEGC_NONE;
 763	line->vsc = &l_mg->vsc_list[line_id];
 764	spin_lock_init(&line->lock);
 765
 766	nr_bad_chks = pblk_setup_line_meta_chk(pblk, line, chunk_meta);
 767
 768	chk_in_line = lm->blk_per_line - nr_bad_chks;
 769	if (nr_bad_chks < 0 || nr_bad_chks > lm->blk_per_line ||
 770					chk_in_line < lm->min_blk_line) {
 771		line->state = PBLK_LINESTATE_BAD;
 772		list_add_tail(&line->list, &l_mg->bad_list);
 773		return 0;
 774	}
 775
 776	atomic_set(&line->blk_in_line, chk_in_line);
 777	list_add_tail(&line->list, &l_mg->free_list);
 778	l_mg->nr_free_lines++;
 779
 780	return chk_in_line;
 781}
 782
 783static int pblk_alloc_line_meta(struct pblk *pblk, struct pblk_line *line)
 784{
 785	struct pblk_line_meta *lm = &pblk->lm;
 786
 787	line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
 788	if (!line->blk_bitmap)
 789		return -ENOMEM;
 790
 791	line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
 792	if (!line->erase_bitmap)
 793		goto free_blk_bitmap;
 794
 795
 796	line->chks = kmalloc_array(lm->blk_per_line,
 797				   sizeof(struct nvm_chk_meta), GFP_KERNEL);
 798	if (!line->chks)
 799		goto free_erase_bitmap;
 800
 801	line->w_err_gc = kzalloc(sizeof(struct pblk_w_err_gc), GFP_KERNEL);
 802	if (!line->w_err_gc)
 803		goto free_chks;
 804
 805	return 0;
 806
 807free_chks:
 808	kfree(line->chks);
 809free_erase_bitmap:
 810	kfree(line->erase_bitmap);
 811free_blk_bitmap:
 812	kfree(line->blk_bitmap);
 813	return -ENOMEM;
 814}
 815
 816static int pblk_line_mg_init(struct pblk *pblk)
 817{
 818	struct nvm_tgt_dev *dev = pblk->dev;
 819	struct nvm_geo *geo = &dev->geo;
 820	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 821	struct pblk_line_meta *lm = &pblk->lm;
 822	int i, bb_distance;
 823
 824	l_mg->nr_lines = geo->num_chk;
 825	l_mg->log_line = l_mg->data_line = NULL;
 826	l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
 827	l_mg->nr_free_lines = 0;
 828	bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
 829
 830	INIT_LIST_HEAD(&l_mg->free_list);
 831	INIT_LIST_HEAD(&l_mg->corrupt_list);
 832	INIT_LIST_HEAD(&l_mg->bad_list);
 833	INIT_LIST_HEAD(&l_mg->gc_full_list);
 834	INIT_LIST_HEAD(&l_mg->gc_high_list);
 835	INIT_LIST_HEAD(&l_mg->gc_mid_list);
 836	INIT_LIST_HEAD(&l_mg->gc_low_list);
 837	INIT_LIST_HEAD(&l_mg->gc_empty_list);
 838	INIT_LIST_HEAD(&l_mg->gc_werr_list);
 839
 840	INIT_LIST_HEAD(&l_mg->emeta_list);
 841
 842	l_mg->gc_lists[0] = &l_mg->gc_werr_list;
 843	l_mg->gc_lists[1] = &l_mg->gc_high_list;
 844	l_mg->gc_lists[2] = &l_mg->gc_mid_list;
 845	l_mg->gc_lists[3] = &l_mg->gc_low_list;
 846
 847	spin_lock_init(&l_mg->free_lock);
 848	spin_lock_init(&l_mg->close_lock);
 849	spin_lock_init(&l_mg->gc_lock);
 850
 851	l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL);
 852	if (!l_mg->vsc_list)
 853		goto fail;
 854
 855	l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
 856	if (!l_mg->bb_template)
 857		goto fail_free_vsc_list;
 858
 859	l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
 860	if (!l_mg->bb_aux)
 861		goto fail_free_bb_template;
 862
 863	/* smeta is always small enough to fit on a kmalloc memory allocation,
 864	 * emeta depends on the number of LUNs allocated to the pblk instance
 865	 */
 866	for (i = 0; i < PBLK_DATA_LINES; i++) {
 867		l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL);
 868		if (!l_mg->sline_meta[i])
 869			goto fail_free_smeta;
 870	}
 871
 872	l_mg->bitmap_cache = kmem_cache_create("pblk_lm_bitmap",
 873			lm->sec_bitmap_len, 0, 0, NULL);
 874	if (!l_mg->bitmap_cache)
 875		goto fail_free_smeta;
 876
 877	/* the bitmap pool is used for both valid and map bitmaps */
 878	l_mg->bitmap_pool = mempool_create_slab_pool(PBLK_DATA_LINES * 2,
 879				l_mg->bitmap_cache);
 880	if (!l_mg->bitmap_pool)
 881		goto fail_destroy_bitmap_cache;
 882
 883	/* emeta allocates three different buffers for managing metadata with
 884	 * in-memory and in-media layouts
 885	 */
 886	for (i = 0; i < PBLK_DATA_LINES; i++) {
 887		struct pblk_emeta *emeta;
 888
 889		emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL);
 890		if (!emeta)
 891			goto fail_free_emeta;
 892
 893		emeta->buf = kvmalloc(lm->emeta_len[0], GFP_KERNEL);
 894		if (!emeta->buf) {
 895			kfree(emeta);
 896			goto fail_free_emeta;
 897		}
 898
 899		emeta->nr_entries = lm->emeta_sec[0];
 900		l_mg->eline_meta[i] = emeta;
 901	}
 902
 903	for (i = 0; i < l_mg->nr_lines; i++)
 904		l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY);
 905
 906	bb_distance = (geo->all_luns) * geo->ws_opt;
 907	for (i = 0; i < lm->sec_per_line; i += bb_distance)
 908		bitmap_set(l_mg->bb_template, i, geo->ws_opt);
 909
 910	return 0;
 911
 912fail_free_emeta:
 913	while (--i >= 0) {
 914		kvfree(l_mg->eline_meta[i]->buf);
 915		kfree(l_mg->eline_meta[i]);
 916	}
 917
 918	mempool_destroy(l_mg->bitmap_pool);
 919fail_destroy_bitmap_cache:
 920	kmem_cache_destroy(l_mg->bitmap_cache);
 921fail_free_smeta:
 922	for (i = 0; i < PBLK_DATA_LINES; i++)
 923		kfree(l_mg->sline_meta[i]);
 924	kfree(l_mg->bb_aux);
 925fail_free_bb_template:
 926	kfree(l_mg->bb_template);
 927fail_free_vsc_list:
 928	kfree(l_mg->vsc_list);
 929fail:
 930	return -ENOMEM;
 931}
 932
 933static int pblk_line_meta_init(struct pblk *pblk)
 934{
 935	struct nvm_tgt_dev *dev = pblk->dev;
 936	struct nvm_geo *geo = &dev->geo;
 937	struct pblk_line_meta *lm = &pblk->lm;
 938	unsigned int smeta_len, emeta_len;
 939	int i;
 940
 941	lm->sec_per_line = geo->clba * geo->all_luns;
 942	lm->blk_per_line = geo->all_luns;
 943	lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
 944	lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
 945	lm->lun_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
 946	lm->mid_thrs = lm->sec_per_line / 2;
 947	lm->high_thrs = lm->sec_per_line / 4;
 948	lm->meta_distance = (geo->all_luns / 2) * pblk->min_write_pgs;
 949
 950	/* Calculate necessary pages for smeta. See comment over struct
 951	 * line_smeta definition
 952	 */
 953	i = 1;
 954add_smeta_page:
 955	lm->smeta_sec = i * geo->ws_opt;
 956	lm->smeta_len = lm->smeta_sec * geo->csecs;
 957
 958	smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
 959	if (smeta_len > lm->smeta_len) {
 960		i++;
 961		goto add_smeta_page;
 962	}
 963
 964	/* Calculate necessary pages for emeta. See comment over struct
 965	 * line_emeta definition
 966	 */
 967	i = 1;
 968add_emeta_page:
 969	lm->emeta_sec[0] = i * geo->ws_opt;
 970	lm->emeta_len[0] = lm->emeta_sec[0] * geo->csecs;
 971
 972	emeta_len = calc_emeta_len(pblk);
 973	if (emeta_len > lm->emeta_len[0]) {
 974		i++;
 975		goto add_emeta_page;
 976	}
 977
 978	lm->emeta_bb = geo->all_luns > i ? geo->all_luns - i : 0;
 979
 980	lm->min_blk_line = 1;
 981	if (geo->all_luns > 1)
 982		lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
 983					lm->emeta_sec[0], geo->clba);
 984
 985	if (lm->min_blk_line > lm->blk_per_line) {
 986		pblk_err(pblk, "config. not supported. Min. LUN in line:%d\n",
 987							lm->blk_per_line);
 988		return -EINVAL;
 989	}
 990
 991	return 0;
 992}
 993
 994static int pblk_lines_init(struct pblk *pblk)
 995{
 996	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 997	struct pblk_line *line;
 998	void *chunk_meta;
 999	int nr_free_chks = 0;
1000	int i, ret;
1001
1002	ret = pblk_line_meta_init(pblk);
1003	if (ret)
1004		return ret;
1005
1006	ret = pblk_line_mg_init(pblk);
1007	if (ret)
1008		return ret;
1009
1010	ret = pblk_luns_init(pblk);
1011	if (ret)
1012		goto fail_free_meta;
1013
1014	chunk_meta = pblk_get_chunk_meta(pblk);
1015	if (IS_ERR(chunk_meta)) {
1016		ret = PTR_ERR(chunk_meta);
1017		goto fail_free_luns;
1018	}
1019
1020	pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
1021								GFP_KERNEL);
1022	if (!pblk->lines) {
1023		ret = -ENOMEM;
1024		goto fail_free_chunk_meta;
1025	}
1026
1027	for (i = 0; i < l_mg->nr_lines; i++) {
1028		line = &pblk->lines[i];
1029
1030		ret = pblk_alloc_line_meta(pblk, line);
1031		if (ret)
1032			goto fail_free_lines;
1033
1034		nr_free_chks += pblk_setup_line_meta(pblk, line, chunk_meta, i);
1035
1036		trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1037								line->state);
1038	}
1039
1040	if (!nr_free_chks) {
1041		pblk_err(pblk, "too many bad blocks prevent for sane instance\n");
1042		ret = -EINTR;
1043		goto fail_free_lines;
1044	}
1045
1046	ret = pblk_set_provision(pblk, nr_free_chks);
1047	if (ret)
1048		goto fail_free_lines;
1049
1050	vfree(chunk_meta);
1051	return 0;
1052
1053fail_free_lines:
1054	while (--i >= 0)
1055		pblk_line_meta_free(l_mg, &pblk->lines[i]);
1056	kfree(pblk->lines);
1057fail_free_chunk_meta:
1058	vfree(chunk_meta);
1059fail_free_luns:
1060	kfree(pblk->luns);
1061fail_free_meta:
1062	pblk_line_mg_free(pblk);
1063
1064	return ret;
1065}
1066
1067static int pblk_writer_init(struct pblk *pblk)
1068{
1069	pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
1070	if (IS_ERR(pblk->writer_ts)) {
1071		int err = PTR_ERR(pblk->writer_ts);
1072
1073		if (err != -EINTR)
1074			pblk_err(pblk, "could not allocate writer kthread (%d)\n",
1075					err);
1076		return err;
1077	}
1078
1079	timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
1080	mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
1081
1082	return 0;
1083}
1084
1085static void pblk_writer_stop(struct pblk *pblk)
1086{
1087	/* The pipeline must be stopped and the write buffer emptied before the
1088	 * write thread is stopped
1089	 */
1090	WARN(pblk_rb_read_count(&pblk->rwb),
1091			"Stopping not fully persisted write buffer\n");
1092
1093	WARN(pblk_rb_sync_count(&pblk->rwb),
1094			"Stopping not fully synced write buffer\n");
1095
1096	del_timer_sync(&pblk->wtimer);
1097	if (pblk->writer_ts)
1098		kthread_stop(pblk->writer_ts);
1099}
1100
1101static void pblk_free(struct pblk *pblk)
1102{
1103	pblk_lines_free(pblk);
1104	pblk_l2p_free(pblk);
1105	pblk_rwb_free(pblk);
1106	pblk_core_free(pblk);
1107
1108	kfree(pblk);
1109}
1110
1111static void pblk_tear_down(struct pblk *pblk, bool graceful)
1112{
1113	if (graceful)
1114		__pblk_pipeline_flush(pblk);
1115	__pblk_pipeline_stop(pblk);
1116	pblk_writer_stop(pblk);
1117	pblk_rb_sync_l2p(&pblk->rwb);
1118	pblk_rl_free(&pblk->rl);
1119
1120	pblk_debug(pblk, "consistent tear down (graceful:%d)\n", graceful);
1121}
1122
1123static void pblk_exit(void *private, bool graceful)
1124{
1125	struct pblk *pblk = private;
1126
1127	pblk_gc_exit(pblk, graceful);
1128	pblk_tear_down(pblk, graceful);
1129
1130#ifdef CONFIG_NVM_PBLK_DEBUG
1131	pblk_info(pblk, "exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
1132#endif
1133
1134	pblk_free(pblk);
1135}
1136
1137static sector_t pblk_capacity(void *private)
1138{
1139	struct pblk *pblk = private;
1140
1141	return pblk->capacity * NR_PHY_IN_LOG;
1142}
1143
1144static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
1145		       int flags)
1146{
1147	struct nvm_geo *geo = &dev->geo;
1148	struct request_queue *bqueue = dev->q;
1149	struct request_queue *tqueue = tdisk->queue;
1150	struct pblk *pblk;
1151	int ret;
1152
1153	pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
1154	if (!pblk)
1155		return ERR_PTR(-ENOMEM);
1156
1157	pblk->dev = dev;
1158	pblk->disk = tdisk;
1159	pblk->state = PBLK_STATE_RUNNING;
1160	trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1161	pblk->gc.gc_enabled = 0;
1162
1163	if (!(geo->version == NVM_OCSSD_SPEC_12 ||
1164					geo->version == NVM_OCSSD_SPEC_20)) {
1165		pblk_err(pblk, "OCSSD version not supported (%u)\n",
1166							geo->version);
1167		kfree(pblk);
1168		return ERR_PTR(-EINVAL);
1169	}
1170
1171	if (geo->ext) {
1172		pblk_err(pblk, "extended metadata not supported\n");
1173		kfree(pblk);
1174		return ERR_PTR(-EINVAL);
1175	}
1176
1177	spin_lock_init(&pblk->resubmit_lock);
1178	spin_lock_init(&pblk->trans_lock);
1179	spin_lock_init(&pblk->lock);
1180
1181#ifdef CONFIG_NVM_PBLK_DEBUG
1182	atomic_long_set(&pblk->inflight_writes, 0);
1183	atomic_long_set(&pblk->padded_writes, 0);
1184	atomic_long_set(&pblk->padded_wb, 0);
1185	atomic_long_set(&pblk->req_writes, 0);
1186	atomic_long_set(&pblk->sub_writes, 0);
1187	atomic_long_set(&pblk->sync_writes, 0);
1188	atomic_long_set(&pblk->inflight_reads, 0);
1189	atomic_long_set(&pblk->cache_reads, 0);
1190	atomic_long_set(&pblk->sync_reads, 0);
1191	atomic_long_set(&pblk->recov_writes, 0);
1192	atomic_long_set(&pblk->recov_writes, 0);
1193	atomic_long_set(&pblk->recov_gc_writes, 0);
1194	atomic_long_set(&pblk->recov_gc_reads, 0);
1195#endif
1196
1197	atomic_long_set(&pblk->read_failed, 0);
1198	atomic_long_set(&pblk->read_empty, 0);
1199	atomic_long_set(&pblk->read_high_ecc, 0);
1200	atomic_long_set(&pblk->read_failed_gc, 0);
1201	atomic_long_set(&pblk->write_failed, 0);
1202	atomic_long_set(&pblk->erase_failed, 0);
1203
1204	ret = pblk_core_init(pblk);
1205	if (ret) {
1206		pblk_err(pblk, "could not initialize core\n");
1207		goto fail;
1208	}
1209
1210	ret = pblk_lines_init(pblk);
1211	if (ret) {
1212		pblk_err(pblk, "could not initialize lines\n");
1213		goto fail_free_core;
1214	}
1215
1216	ret = pblk_rwb_init(pblk);
1217	if (ret) {
1218		pblk_err(pblk, "could not initialize write buffer\n");
1219		goto fail_free_lines;
1220	}
1221
1222	ret = pblk_l2p_init(pblk, flags & NVM_TARGET_FACTORY);
1223	if (ret) {
1224		pblk_err(pblk, "could not initialize maps\n");
1225		goto fail_free_rwb;
1226	}
1227
1228	ret = pblk_writer_init(pblk);
1229	if (ret) {
1230		if (ret != -EINTR)
1231			pblk_err(pblk, "could not initialize write thread\n");
1232		goto fail_free_l2p;
1233	}
1234
1235	ret = pblk_gc_init(pblk);
1236	if (ret) {
1237		pblk_err(pblk, "could not initialize gc\n");
1238		goto fail_stop_writer;
1239	}
1240
1241	/* inherit the size from the underlying device */
1242	blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1243	blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1244
1245	blk_queue_write_cache(tqueue, true, false);
1246
1247	tqueue->limits.discard_granularity = geo->clba * geo->csecs;
1248	tqueue->limits.discard_alignment = 0;
1249	blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
1250	blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue);
1251
1252	pblk_info(pblk, "luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
1253			geo->all_luns, pblk->l_mg.nr_lines,
1254			(unsigned long long)pblk->capacity,
1255			pblk->rwb.nr_entries);
1256
1257	wake_up_process(pblk->writer_ts);
1258
1259	/* Check if we need to start GC */
1260	pblk_gc_should_kick(pblk);
1261
1262	return pblk;
1263
1264fail_stop_writer:
1265	pblk_writer_stop(pblk);
1266fail_free_l2p:
1267	pblk_l2p_free(pblk);
1268fail_free_rwb:
1269	pblk_rwb_free(pblk);
1270fail_free_lines:
1271	pblk_lines_free(pblk);
1272fail_free_core:
1273	pblk_core_free(pblk);
1274fail:
1275	kfree(pblk);
1276	return ERR_PTR(ret);
1277}
1278
1279/* physical block device target */
1280static struct nvm_tgt_type tt_pblk = {
1281	.name		= "pblk",
1282	.version	= {1, 0, 0},
1283
1284	.make_rq	= pblk_make_rq,
1285	.capacity	= pblk_capacity,
1286
1287	.init		= pblk_init,
1288	.exit		= pblk_exit,
1289
1290	.sysfs_init	= pblk_sysfs_init,
1291	.sysfs_exit	= pblk_sysfs_exit,
1292	.owner		= THIS_MODULE,
1293};
1294
1295static int __init pblk_module_init(void)
1296{
1297	int ret;
1298
1299	ret = bioset_init(&pblk_bio_set, BIO_POOL_SIZE, 0, 0);
1300	if (ret)
1301		return ret;
1302	ret = nvm_register_tgt_type(&tt_pblk);
1303	if (ret)
1304		bioset_exit(&pblk_bio_set);
1305	return ret;
1306}
1307
1308static void pblk_module_exit(void)
1309{
1310	bioset_exit(&pblk_bio_set);
1311	nvm_unregister_tgt_type(&tt_pblk);
1312}
1313
1314module_init(pblk_module_init);
1315module_exit(pblk_module_exit);
1316MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
1317MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
1318MODULE_LICENSE("GPL v2");
1319MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");