Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
   4 * Initial release: Matias Bjorling <m@bjorling.me>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6
   7#define pr_fmt(fmt) "nvm: " fmt
   8
   9#include <linux/list.h>
  10#include <linux/types.h>
  11#include <linux/sem.h>
  12#include <linux/bitmap.h>
  13#include <linux/module.h>
  14#include <linux/moduleparam.h>
  15#include <linux/miscdevice.h>
  16#include <linux/lightnvm.h>
  17#include <linux/sched/sysctl.h>
  18
  19static LIST_HEAD(nvm_tgt_types);
  20static DECLARE_RWSEM(nvm_tgtt_lock);
 
  21static LIST_HEAD(nvm_devices);
  22static DECLARE_RWSEM(nvm_lock);
  23
  24/* Map between virtual and physical channel and lun */
  25struct nvm_ch_map {
  26	int ch_off;
  27	int num_lun;
  28	int *lun_offs;
  29};
  30
  31struct nvm_dev_map {
  32	struct nvm_ch_map *chnls;
  33	int num_ch;
  34};
  35
  36static void nvm_free(struct kref *ref);
  37
  38static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
  39{
  40	struct nvm_target *tgt;
  41
  42	list_for_each_entry(tgt, &dev->targets, list)
  43		if (!strcmp(name, tgt->disk->disk_name))
  44			return tgt;
  45
  46	return NULL;
  47}
  48
  49static bool nvm_target_exists(const char *name)
  50{
  51	struct nvm_dev *dev;
  52	struct nvm_target *tgt;
  53	bool ret = false;
  54
  55	down_write(&nvm_lock);
  56	list_for_each_entry(dev, &nvm_devices, devices) {
  57		mutex_lock(&dev->mlock);
  58		list_for_each_entry(tgt, &dev->targets, list) {
  59			if (!strcmp(name, tgt->disk->disk_name)) {
  60				ret = true;
  61				mutex_unlock(&dev->mlock);
  62				goto out;
  63			}
  64		}
  65		mutex_unlock(&dev->mlock);
  66	}
  67
  68out:
  69	up_write(&nvm_lock);
  70	return ret;
  71}
 
  72
  73static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
  74{
  75	int i;
  76
  77	for (i = lun_begin; i <= lun_end; i++) {
  78		if (test_and_set_bit(i, dev->lun_map)) {
  79			pr_err("lun %d already allocated\n", i);
  80			goto err;
  81		}
  82	}
  83
  84	return 0;
  85err:
  86	while (--i >= lun_begin)
  87		clear_bit(i, dev->lun_map);
 
 
  88
  89	return -EBUSY;
  90}
 
  91
  92static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
  93				 int lun_end)
  94{
  95	int i;
 
  96
  97	for (i = lun_begin; i <= lun_end; i++)
  98		WARN_ON(!test_and_clear_bit(i, dev->lun_map));
 
  99}
 
 100
 101static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
 
 102{
 103	struct nvm_dev *dev = tgt_dev->parent;
 104	struct nvm_dev_map *dev_map = tgt_dev->map;
 105	int i, j;
 106
 107	for (i = 0; i < dev_map->num_ch; i++) {
 108		struct nvm_ch_map *ch_map = &dev_map->chnls[i];
 109		int *lun_offs = ch_map->lun_offs;
 110		int ch = i + ch_map->ch_off;
 111
 112		if (clear) {
 113			for (j = 0; j < ch_map->num_lun; j++) {
 114				int lun = j + lun_offs[j];
 115				int lunid = (ch * dev->geo.num_lun) + lun;
 116
 117				WARN_ON(!test_and_clear_bit(lunid,
 118							dev->lun_map));
 119			}
 120		}
 
 121
 122		kfree(ch_map->lun_offs);
 123	}
 
 124
 125	kfree(dev_map->chnls);
 126	kfree(dev_map);
 
 127
 128	kfree(tgt_dev->luns);
 129	kfree(tgt_dev);
 130}
 131
 132static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
 133					      u16 lun_begin, u16 lun_end,
 134					      u16 op)
 135{
 136	struct nvm_tgt_dev *tgt_dev = NULL;
 137	struct nvm_dev_map *dev_rmap = dev->rmap;
 138	struct nvm_dev_map *dev_map;
 139	struct ppa_addr *luns;
 140	int num_lun = lun_end - lun_begin + 1;
 141	int luns_left = num_lun;
 142	int num_ch = num_lun / dev->geo.num_lun;
 143	int num_ch_mod = num_lun % dev->geo.num_lun;
 144	int bch = lun_begin / dev->geo.num_lun;
 145	int blun = lun_begin % dev->geo.num_lun;
 146	int lunid = 0;
 147	int lun_balanced = 1;
 148	int sec_per_lun, prev_num_lun;
 149	int i, j;
 150
 151	num_ch = (num_ch_mod == 0) ? num_ch : num_ch + 1;
 152
 153	dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
 154	if (!dev_map)
 155		goto err_dev;
 156
 157	dev_map->chnls = kcalloc(num_ch, sizeof(struct nvm_ch_map), GFP_KERNEL);
 158	if (!dev_map->chnls)
 159		goto err_chnls;
 160
 161	luns = kcalloc(num_lun, sizeof(struct ppa_addr), GFP_KERNEL);
 162	if (!luns)
 163		goto err_luns;
 164
 165	prev_num_lun = (luns_left > dev->geo.num_lun) ?
 166					dev->geo.num_lun : luns_left;
 167	for (i = 0; i < num_ch; i++) {
 168		struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
 169		int *lun_roffs = ch_rmap->lun_offs;
 170		struct nvm_ch_map *ch_map = &dev_map->chnls[i];
 171		int *lun_offs;
 172		int luns_in_chnl = (luns_left > dev->geo.num_lun) ?
 173					dev->geo.num_lun : luns_left;
 174
 175		if (lun_balanced && prev_num_lun != luns_in_chnl)
 176			lun_balanced = 0;
 177
 178		ch_map->ch_off = ch_rmap->ch_off = bch;
 179		ch_map->num_lun = luns_in_chnl;
 180
 181		lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
 182		if (!lun_offs)
 183			goto err_ch;
 184
 185		for (j = 0; j < luns_in_chnl; j++) {
 186			luns[lunid].ppa = 0;
 187			luns[lunid].a.ch = i;
 188			luns[lunid++].a.lun = j;
 189
 190			lun_offs[j] = blun;
 191			lun_roffs[j + blun] = blun;
 192		}
 193
 194		ch_map->lun_offs = lun_offs;
 
 
 195
 196		/* when starting a new channel, lun offset is reset */
 197		blun = 0;
 198		luns_left -= luns_in_chnl;
 
 
 
 
 199	}
 200
 201	dev_map->num_ch = num_ch;
 202
 203	tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
 204	if (!tgt_dev)
 205		goto err_ch;
 206
 207	/* Inherit device geometry from parent */
 208	memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
 209
 210	/* Target device only owns a portion of the physical device */
 211	tgt_dev->geo.num_ch = num_ch;
 212	tgt_dev->geo.num_lun = (lun_balanced) ? prev_num_lun : -1;
 213	tgt_dev->geo.all_luns = num_lun;
 214	tgt_dev->geo.all_chunks = num_lun * dev->geo.num_chk;
 215
 216	tgt_dev->geo.op = op;
 217
 218	sec_per_lun = dev->geo.clba * dev->geo.num_chk;
 219	tgt_dev->geo.total_secs = num_lun * sec_per_lun;
 220
 221	tgt_dev->q = dev->q;
 222	tgt_dev->map = dev_map;
 223	tgt_dev->luns = luns;
 224	tgt_dev->parent = dev;
 225
 226	return tgt_dev;
 227err_ch:
 228	while (--i >= 0)
 229		kfree(dev_map->chnls[i].lun_offs);
 230	kfree(luns);
 231err_luns:
 232	kfree(dev_map->chnls);
 233err_chnls:
 234	kfree(dev_map);
 235err_dev:
 236	return tgt_dev;
 237}
 238
 239static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
 240{
 241	struct nvm_tgt_type *tt;
 242
 243	list_for_each_entry(tt, &nvm_tgt_types, list)
 244		if (!strcmp(name, tt->name))
 245			return tt;
 246
 247	return NULL;
 248}
 
 
 
 
 
 249
 250static struct nvm_tgt_type *nvm_find_target_type(const char *name)
 251{
 252	struct nvm_tgt_type *tt;
 
 253
 254	down_write(&nvm_tgtt_lock);
 255	tt = __nvm_find_target_type(name);
 256	up_write(&nvm_tgtt_lock);
 
 257
 258	return tt;
 259}
 
 260
 261static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
 262				 int lun_end)
 263{
 264	if (lun_begin > lun_end || lun_end >= geo->all_luns) {
 265		pr_err("lun out of bound (%u:%u > %u)\n",
 266			lun_begin, lun_end, geo->all_luns - 1);
 267		return -EINVAL;
 268	}
 269
 270	return 0;
 
 
 271}
 
 272
 273static int __nvm_config_simple(struct nvm_dev *dev,
 274			       struct nvm_ioctl_create_simple *s)
 275{
 276	struct nvm_geo *geo = &dev->geo;
 277
 278	if (s->lun_begin == -1 && s->lun_end == -1) {
 279		s->lun_begin = 0;
 280		s->lun_end = geo->all_luns - 1;
 281	}
 282
 283	return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
 284}
 285
 286static int __nvm_config_extended(struct nvm_dev *dev,
 287				 struct nvm_ioctl_create_extended *e)
 288{
 289	if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
 290		e->lun_begin = 0;
 291		e->lun_end = dev->geo.all_luns - 1;
 292	}
 293
 294	/* op not set falls into target's default */
 295	if (e->op == 0xFFFF) {
 296		e->op = NVM_TARGET_DEFAULT_OP;
 297	} else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) {
 298		pr_err("invalid over provisioning value\n");
 299		return -EINVAL;
 
 
 
 
 
 300	}
 301
 302	return nvm_config_check_luns(&dev->geo, e->lun_begin, e->lun_end);
 303}
 304
 305static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
 
 306{
 307	struct nvm_ioctl_create_extended e;
 308	struct request_queue *tqueue;
 309	struct gendisk *tdisk;
 310	struct nvm_tgt_type *tt;
 311	struct nvm_target *t;
 312	struct nvm_tgt_dev *tgt_dev;
 313	void *targetdata;
 314	unsigned int mdts;
 315	int ret;
 316
 317	switch (create->conf.type) {
 318	case NVM_CONFIG_TYPE_SIMPLE:
 319		ret = __nvm_config_simple(dev, &create->conf.s);
 320		if (ret)
 321			return ret;
 322
 323		e.lun_begin = create->conf.s.lun_begin;
 324		e.lun_end = create->conf.s.lun_end;
 325		e.op = NVM_TARGET_DEFAULT_OP;
 326		break;
 327	case NVM_CONFIG_TYPE_EXTENDED:
 328		ret = __nvm_config_extended(dev, &create->conf.e);
 329		if (ret)
 330			return ret;
 331
 332		e = create->conf.e;
 333		break;
 334	default:
 335		pr_err("config type not valid\n");
 336		return -EINVAL;
 337	}
 338
 339	tt = nvm_find_target_type(create->tgttype);
 340	if (!tt) {
 341		pr_err("target type %s not found\n", create->tgttype);
 342		return -EINVAL;
 343	}
 344
 345	if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) {
 346		pr_err("device is incompatible with target L2P type.\n");
 347		return -EINVAL;
 348	}
 349
 350	if (nvm_target_exists(create->tgtname)) {
 351		pr_err("target name already exists (%s)\n",
 352							create->tgtname);
 
 353		return -EINVAL;
 354	}
 355
 356	ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
 357	if (ret)
 358		return ret;
 359
 360	t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
 361	if (!t) {
 362		ret = -ENOMEM;
 363		goto err_reserve;
 364	}
 365
 366	tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
 367	if (!tgt_dev) {
 368		pr_err("could not create target device\n");
 369		ret = -ENOMEM;
 370		goto err_t;
 371	}
 372
 373	tdisk = alloc_disk(0);
 374	if (!tdisk) {
 375		ret = -ENOMEM;
 376		goto err_dev;
 377	}
 378
 379	tqueue = blk_alloc_queue(dev->q->node);
 380	if (!tqueue) {
 381		ret = -ENOMEM;
 382		goto err_disk;
 383	}
 384
 385	strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
 386	tdisk->flags = GENHD_FL_EXT_DEVT;
 387	tdisk->major = 0;
 388	tdisk->first_minor = 0;
 389	tdisk->fops = tt->bops;
 390	tdisk->queue = tqueue;
 391
 392	targetdata = tt->init(tgt_dev, tdisk, create->flags);
 393	if (IS_ERR(targetdata)) {
 394		ret = PTR_ERR(targetdata);
 395		goto err_init;
 396	}
 397
 398	tdisk->private_data = targetdata;
 399	tqueue->queuedata = targetdata;
 400
 401	mdts = (dev->geo.csecs >> 9) * NVM_MAX_VLBA;
 402	if (dev->geo.mdts) {
 403		mdts = min_t(u32, dev->geo.mdts,
 404				(dev->geo.csecs >> 9) * NVM_MAX_VLBA);
 405	}
 406	blk_queue_max_hw_sectors(tqueue, mdts);
 407
 408	set_capacity(tdisk, tt->capacity(targetdata));
 409	add_disk(tdisk);
 410
 411	if (tt->sysfs_init && tt->sysfs_init(tdisk)) {
 412		ret = -ENOMEM;
 413		goto err_sysfs;
 
 
 414	}
 415
 416	t->type = tt;
 417	t->disk = tdisk;
 418	t->dev = tgt_dev;
 419
 420	mutex_lock(&dev->mlock);
 421	list_add_tail(&t->list, &dev->targets);
 422	mutex_unlock(&dev->mlock);
 423
 424	__module_get(tt->owner);
 425
 426	return 0;
 427err_sysfs:
 428	if (tt->exit)
 429		tt->exit(targetdata, true);
 430err_init:
 431	blk_cleanup_queue(tqueue);
 432	tdisk->queue = NULL;
 433err_disk:
 434	put_disk(tdisk);
 435err_dev:
 436	nvm_remove_tgt_dev(tgt_dev, 0);
 437err_t:
 438	kfree(t);
 439err_reserve:
 440	nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
 441	return ret;
 442}
 
 443
 444static void __nvm_remove_target(struct nvm_target *t, bool graceful)
 445{
 446	struct nvm_tgt_type *tt = t->type;
 447	struct gendisk *tdisk = t->disk;
 448	struct request_queue *q = tdisk->queue;
 449
 450	del_gendisk(tdisk);
 451	blk_cleanup_queue(q);
 452
 453	if (tt->sysfs_exit)
 454		tt->sysfs_exit(tdisk);
 455
 456	if (tt->exit)
 457		tt->exit(tdisk->private_data, graceful);
 458
 459	nvm_remove_tgt_dev(t->dev, 1);
 460	put_disk(tdisk);
 461	module_put(t->type->owner);
 462
 463	list_del(&t->list);
 464	kfree(t);
 465}
 
 466
 467/**
 468 * nvm_remove_tgt - Removes a target from the media manager
 469 * @remove:	ioctl structure with target name to remove.
 470 *
 471 * Returns:
 472 * 0: on success
 473 * 1: on not found
 474 * <0: on error
 475 */
 476static int nvm_remove_tgt(struct nvm_ioctl_remove *remove)
 477{
 478	struct nvm_target *t = NULL;
 479	struct nvm_dev *dev;
 480
 481	down_read(&nvm_lock);
 482	list_for_each_entry(dev, &nvm_devices, devices) {
 483		mutex_lock(&dev->mlock);
 484		t = nvm_find_target(dev, remove->tgtname);
 485		if (t) {
 486			mutex_unlock(&dev->mlock);
 487			break;
 488		}
 489		mutex_unlock(&dev->mlock);
 490	}
 491	up_read(&nvm_lock);
 492
 493	if (!t) {
 494		pr_err("failed to remove target %s\n",
 495				remove->tgtname);
 496		return 1;
 497	}
 498
 499	__nvm_remove_target(t, true);
 500	kref_put(&dev->ref, nvm_free);
 501
 502	return 0;
 503}
 
 504
 505static int nvm_register_map(struct nvm_dev *dev)
 506{
 507	struct nvm_dev_map *rmap;
 508	int i, j;
 509
 510	rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
 511	if (!rmap)
 512		goto err_rmap;
 513
 514	rmap->chnls = kcalloc(dev->geo.num_ch, sizeof(struct nvm_ch_map),
 515								GFP_KERNEL);
 516	if (!rmap->chnls)
 517		goto err_chnls;
 518
 519	for (i = 0; i < dev->geo.num_ch; i++) {
 520		struct nvm_ch_map *ch_rmap;
 521		int *lun_roffs;
 522		int luns_in_chnl = dev->geo.num_lun;
 523
 524		ch_rmap = &rmap->chnls[i];
 525
 526		ch_rmap->ch_off = -1;
 527		ch_rmap->num_lun = luns_in_chnl;
 528
 529		lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
 530		if (!lun_roffs)
 531			goto err_ch;
 532
 533		for (j = 0; j < luns_in_chnl; j++)
 534			lun_roffs[j] = -1;
 535
 536		ch_rmap->lun_offs = lun_roffs;
 537	}
 538
 539	dev->rmap = rmap;
 540
 541	return 0;
 542err_ch:
 543	while (--i >= 0)
 544		kfree(rmap->chnls[i].lun_offs);
 545err_chnls:
 546	kfree(rmap);
 547err_rmap:
 548	return -ENOMEM;
 549}
 
 550
 551static void nvm_unregister_map(struct nvm_dev *dev)
 
 552{
 553	struct nvm_dev_map *rmap = dev->rmap;
 554	int i;
 555
 556	for (i = 0; i < dev->geo.num_ch; i++)
 557		kfree(rmap->chnls[i].lun_offs);
 558
 559	kfree(rmap->chnls);
 560	kfree(rmap);
 561}
 
 562
 563static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
 564{
 565	struct nvm_dev_map *dev_map = tgt_dev->map;
 566	struct nvm_ch_map *ch_map = &dev_map->chnls[p->a.ch];
 567	int lun_off = ch_map->lun_offs[p->a.lun];
 568
 569	p->a.ch += ch_map->ch_off;
 570	p->a.lun += lun_off;
 571}
 
 572
 573static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
 574{
 575	struct nvm_dev *dev = tgt_dev->parent;
 576	struct nvm_dev_map *dev_rmap = dev->rmap;
 577	struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->a.ch];
 578	int lun_roff = ch_rmap->lun_offs[p->a.lun];
 579
 580	p->a.ch -= ch_rmap->ch_off;
 581	p->a.lun -= lun_roff;
 582}
 
 583
 584static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
 585				struct ppa_addr *ppa_list, int nr_ppas)
 586{
 587	int i;
 588
 589	for (i = 0; i < nr_ppas; i++) {
 590		nvm_map_to_dev(tgt_dev, &ppa_list[i]);
 591		ppa_list[i] = generic_to_dev_addr(tgt_dev->parent, ppa_list[i]);
 
 
 
 592	}
 593}
 
 594
 595static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
 596				struct ppa_addr *ppa_list, int nr_ppas)
 597{
 598	int i;
 599
 600	for (i = 0; i < nr_ppas; i++) {
 601		ppa_list[i] = dev_to_generic_addr(tgt_dev->parent, ppa_list[i]);
 602		nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
 
 
 
 603	}
 604}
 
 605
 606static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
 
 607{
 608	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
 609
 610	nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas);
 611}
 612
 613static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
 614{
 615	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
 616
 617	nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas);
 618}
 619
 620int nvm_register_tgt_type(struct nvm_tgt_type *tt)
 621{
 622	int ret = 0;
 623
 624	down_write(&nvm_tgtt_lock);
 625	if (__nvm_find_target_type(tt->name))
 626		ret = -EEXIST;
 627	else
 628		list_add(&tt->list, &nvm_tgt_types);
 629	up_write(&nvm_tgtt_lock);
 630
 631	return ret;
 632}
 633EXPORT_SYMBOL(nvm_register_tgt_type);
 634
 635void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
 636{
 637	if (!tt)
 638		return;
 639
 640	down_write(&nvm_tgtt_lock);
 641	list_del(&tt->list);
 642	up_write(&nvm_tgtt_lock);
 643}
 644EXPORT_SYMBOL(nvm_unregister_tgt_type);
 645
 646void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
 647							dma_addr_t *dma_handler)
 648{
 649	return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
 650								dma_handler);
 651}
 652EXPORT_SYMBOL(nvm_dev_dma_alloc);
 653
 654void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
 655{
 656	dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
 657}
 658EXPORT_SYMBOL(nvm_dev_dma_free);
 659
 660static struct nvm_dev *nvm_find_nvm_dev(const char *name)
 661{
 662	struct nvm_dev *dev;
 663
 664	list_for_each_entry(dev, &nvm_devices, devices)
 665		if (!strcmp(name, dev->name))
 666			return dev;
 667
 668	return NULL;
 669}
 670
 671static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
 672			const struct ppa_addr *ppas, int nr_ppas)
 673{
 674	struct nvm_dev *dev = tgt_dev->parent;
 675	struct nvm_geo *geo = &tgt_dev->geo;
 676	int i, plane_cnt, pl_idx;
 677	struct ppa_addr ppa;
 678
 679	if (geo->pln_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
 680		rqd->nr_ppas = nr_ppas;
 681		rqd->ppa_addr = ppas[0];
 682
 683		return 0;
 684	}
 685
 686	rqd->nr_ppas = nr_ppas;
 687	rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
 688	if (!rqd->ppa_list) {
 689		pr_err("failed to allocate dma memory\n");
 690		return -ENOMEM;
 691	}
 692
 693	plane_cnt = geo->pln_mode;
 694	rqd->nr_ppas *= plane_cnt;
 695
 696	for (i = 0; i < nr_ppas; i++) {
 697		for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
 698			ppa = ppas[i];
 699			ppa.g.pl = pl_idx;
 700			rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
 
 
 
 
 
 701		}
 702	}
 703
 704	return 0;
 705}
 
 706
 707static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
 708			struct nvm_rq *rqd)
 709{
 710	if (!rqd->ppa_list)
 711		return;
 712
 713	nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
 714}
 
 715
 716static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
 
 717{
 718	int flags = 0;
 
 719
 720	if (geo->version == NVM_OCSSD_SPEC_20)
 721		return 0;
 722
 723	if (rqd->is_seq)
 724		flags |= geo->pln_mode >> 1;
 725
 726	if (rqd->opcode == NVM_OP_PREAD)
 727		flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND);
 728	else if (rqd->opcode == NVM_OP_PWRITE)
 729		flags |= NVM_IO_SCRAMBLE_ENABLE;
 730
 731	return flags;
 732}
 
 733
 734int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, void *buf)
 735{
 736	struct nvm_dev *dev = tgt_dev->parent;
 737	int ret;
 738
 739	if (!dev->ops->submit_io)
 740		return -ENODEV;
 741
 742	nvm_rq_tgt_to_dev(tgt_dev, rqd);
 743
 744	rqd->dev = tgt_dev;
 745	rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
 746
 747	/* In case of error, fail with right address format */
 748	ret = dev->ops->submit_io(dev, rqd, buf);
 749	if (ret)
 750		nvm_rq_dev_to_tgt(tgt_dev, rqd);
 751	return ret;
 752}
 753EXPORT_SYMBOL(nvm_submit_io);
 754
 755static void nvm_sync_end_io(struct nvm_rq *rqd)
 756{
 757	struct completion *waiting = rqd->private;
 758
 759	complete(waiting);
 760}
 
 761
 762static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd,
 763			      void *buf)
 764{
 765	DECLARE_COMPLETION_ONSTACK(wait);
 766	int ret = 0;
 767
 768	rqd->end_io = nvm_sync_end_io;
 769	rqd->private = &wait;
 770
 771	ret = dev->ops->submit_io(dev, rqd, buf);
 772	if (ret)
 773		return ret;
 774
 775	wait_for_completion_io(&wait);
 776
 777	return 0;
 778}
 779
 780int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
 781		       void *buf)
 782{
 783	struct nvm_dev *dev = tgt_dev->parent;
 
 784	int ret;
 
 785
 786	if (!dev->ops->submit_io)
 787		return -ENODEV;
 788
 789	nvm_rq_tgt_to_dev(tgt_dev, rqd);
 790
 791	rqd->dev = tgt_dev;
 792	rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
 793
 794	ret = nvm_submit_io_wait(dev, rqd, buf);
 795
 796	return ret;
 797}
 798EXPORT_SYMBOL(nvm_submit_io_sync);
 799
 800void nvm_end_io(struct nvm_rq *rqd)
 801{
 802	struct nvm_tgt_dev *tgt_dev = rqd->dev;
 803
 804	/* Convert address space */
 805	if (tgt_dev)
 806		nvm_rq_dev_to_tgt(tgt_dev, rqd);
 807
 808	if (rqd->end_io)
 809		rqd->end_io(rqd);
 810}
 811EXPORT_SYMBOL(nvm_end_io);
 
 
 812
 813static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
 814{
 815	if (!dev->ops->submit_io)
 816		return -ENODEV;
 
 817
 818	rqd->dev = NULL;
 819	rqd->flags = nvm_set_flags(&dev->geo, rqd);
 
 
 
 
 
 
 820
 821	return nvm_submit_io_wait(dev, rqd, NULL);
 822}
 823
 824static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
 
 
 
 
 
 
 
 
 
 
 
 
 825{
 826	struct nvm_rq rqd = { NULL };
 827	struct bio bio;
 828	struct bio_vec bio_vec;
 829	struct page *page;
 830	int ret;
 831
 832	page = alloc_page(GFP_KERNEL);
 833	if (!page)
 834		return -ENOMEM;
 835
 836	bio_init(&bio, &bio_vec, 1);
 837	bio_add_page(&bio, page, PAGE_SIZE, 0);
 838	bio_set_op_attrs(&bio, REQ_OP_READ, 0);
 839
 840	rqd.bio = &bio;
 841	rqd.opcode = NVM_OP_PREAD;
 842	rqd.is_seq = 1;
 843	rqd.nr_ppas = 1;
 844	rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
 845
 846	ret = nvm_submit_io_sync_raw(dev, &rqd);
 847	if (ret)
 848		return ret;
 849
 850	__free_page(page);
 
 
 
 
 851
 852	return rqd.error;
 853}
 
 854
 855/*
 856 * Scans a 1.2 chunk first and last page to determine if its state.
 857 * If the chunk is found to be open, also scan it to update the write
 858 * pointer.
 
 
 
 
 
 
 859 */
 860static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa,
 861			     struct nvm_chk_meta *meta)
 862{
 863	struct nvm_geo *geo = &dev->geo;
 864	int ret, pg, pl;
 865
 866	/* sense first page */
 867	ret = nvm_bb_chunk_sense(dev, ppa);
 868	if (ret < 0) /* io error */
 869		return ret;
 870	else if (ret == 0) /* valid data */
 871		meta->state = NVM_CHK_ST_OPEN;
 872	else if (ret > 0) {
 873		/*
 874		 * If empty page, the chunk is free, else it is an
 875		 * actual io error. In that case, mark it offline.
 876		 */
 877		switch (ret) {
 878		case NVM_RSP_ERR_EMPTYPAGE:
 879			meta->state = NVM_CHK_ST_FREE;
 880			return 0;
 881		case NVM_RSP_ERR_FAILCRC:
 882		case NVM_RSP_ERR_FAILECC:
 883		case NVM_RSP_WARN_HIGHECC:
 884			meta->state = NVM_CHK_ST_OPEN;
 885			goto scan;
 886		default:
 887			return -ret; /* other io error */
 888		}
 889	}
 890
 891	/* sense last page */
 892	ppa.g.pg = geo->num_pg - 1;
 893	ppa.g.pl = geo->num_pln - 1;
 894
 895	ret = nvm_bb_chunk_sense(dev, ppa);
 896	if (ret < 0) /* io error */
 897		return ret;
 898	else if (ret == 0) { /* Chunk fully written */
 899		meta->state = NVM_CHK_ST_CLOSED;
 900		meta->wp = geo->clba;
 901		return 0;
 902	} else if (ret > 0) {
 903		switch (ret) {
 904		case NVM_RSP_ERR_EMPTYPAGE:
 905		case NVM_RSP_ERR_FAILCRC:
 906		case NVM_RSP_ERR_FAILECC:
 907		case NVM_RSP_WARN_HIGHECC:
 908			meta->state = NVM_CHK_ST_OPEN;
 909			break;
 910		default:
 911			return -ret; /* other io error */
 912		}
 913	}
 914
 915scan:
 916	/*
 917	 * chunk is open, we scan sequentially to update the write pointer.
 918	 * We make the assumption that targets write data across all planes
 919	 * before moving to the next page.
 920	 */
 921	for (pg = 0; pg < geo->num_pg; pg++) {
 922		for (pl = 0; pl < geo->num_pln; pl++) {
 923			ppa.g.pg = pg;
 924			ppa.g.pl = pl;
 925
 926			ret = nvm_bb_chunk_sense(dev, ppa);
 927			if (ret < 0) /* io error */
 928				return ret;
 929			else if (ret == 0) {
 930				meta->wp += geo->ws_min;
 931			} else if (ret > 0) {
 932				switch (ret) {
 933				case NVM_RSP_ERR_EMPTYPAGE:
 934					return 0;
 935				case NVM_RSP_ERR_FAILCRC:
 936				case NVM_RSP_ERR_FAILECC:
 937				case NVM_RSP_WARN_HIGHECC:
 938					meta->wp += geo->ws_min;
 939					break;
 940				default:
 941					return -ret; /* other io error */
 942				}
 943			}
 944		}
 945	}
 946
 947	return 0;
 948}
 
 949
 950/*
 951 * folds a bad block list from its plane representation to its
 952 * chunk representation.
 
 953 *
 954 * If any of the planes status are bad or grown bad, the chunk is marked
 955 * offline. If not bad, the first plane state acts as the chunk state.
 956 */
 957static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa,
 958			   u8 *blks, int nr_blks, struct nvm_chk_meta *meta)
 959{
 960	struct nvm_geo *geo = &dev->geo;
 961	int ret, blk, pl, offset, blktype;
 
 
 
 962
 963	for (blk = 0; blk < geo->num_chk; blk++) {
 964		offset = blk * geo->pln_mode;
 965		blktype = blks[offset];
 966
 967		for (pl = 0; pl < geo->pln_mode; pl++) {
 
 968			if (blks[offset + pl] &
 969					(NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
 970				blktype = blks[offset + pl];
 971				break;
 972			}
 973		}
 974
 975		ppa.g.blk = blk;
 976
 977		meta->wp = 0;
 978		meta->type = NVM_CHK_TP_W_SEQ;
 979		meta->wi = 0;
 980		meta->slba = generic_to_dev_addr(dev, ppa).ppa;
 981		meta->cnlb = dev->geo.clba;
 982
 983		if (blktype == NVM_BLK_T_FREE) {
 984			ret = nvm_bb_chunk_scan(dev, ppa, meta);
 985			if (ret)
 986				return ret;
 987		} else {
 988			meta->state = NVM_CHK_ST_OFFLINE;
 989		}
 990
 991		meta++;
 992	}
 993
 994	return 0;
 995}
 
 996
 997static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
 998			   int nchks, struct nvm_chk_meta *meta)
 999{
1000	struct nvm_geo *geo = &dev->geo;
1001	struct ppa_addr ppa;
1002	u8 *blks;
1003	int ch, lun, nr_blks;
1004	int ret = 0;
1005
1006	ppa.ppa = slba;
1007	ppa = dev_to_generic_addr(dev, ppa);
1008
1009	if (ppa.g.blk != 0)
1010		return -EINVAL;
1011
1012	if ((nchks % geo->num_chk) != 0)
1013		return -EINVAL;
1014
1015	nr_blks = geo->num_chk * geo->pln_mode;
1016
1017	blks = kmalloc(nr_blks, GFP_KERNEL);
1018	if (!blks)
1019		return -ENOMEM;
1020
1021	for (ch = ppa.g.ch; ch < geo->num_ch; ch++) {
1022		for (lun = ppa.g.lun; lun < geo->num_lun; lun++) {
1023			struct ppa_addr ppa_gen, ppa_dev;
1024
1025			if (!nchks)
1026				goto done;
1027
1028			ppa_gen.ppa = 0;
1029			ppa_gen.g.ch = ch;
1030			ppa_gen.g.lun = lun;
1031			ppa_dev = generic_to_dev_addr(dev, ppa_gen);
1032
1033			ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks);
1034			if (ret)
1035				goto done;
1036
1037			ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks,
1038									meta);
1039			if (ret)
1040				goto done;
1041
1042			meta += geo->num_chk;
1043			nchks -= geo->num_chk;
1044		}
1045	}
1046done:
1047	kfree(blks);
1048	return ret;
1049}
 
1050
1051int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
1052		       int nchks, struct nvm_chk_meta *meta)
1053{
1054	struct nvm_dev *dev = tgt_dev->parent;
 
1055
1056	nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
 
 
 
1057
1058	if (dev->geo.version == NVM_OCSSD_SPEC_12)
1059		return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta);
 
1060
1061	return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1062}
1063EXPORT_SYMBOL_GPL(nvm_get_chunk_meta);
1064
1065int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
1066		       int nr_ppas, int type)
1067{
1068	struct nvm_dev *dev = tgt_dev->parent;
1069	struct nvm_rq rqd;
1070	int ret;
1071
1072	if (dev->geo.version == NVM_OCSSD_SPEC_20)
1073		return 0;
1074
1075	if (nr_ppas > NVM_MAX_VLBA) {
1076		pr_err("unable to update all blocks atomically\n");
1077		return -EINVAL;
1078	}
1079
1080	memset(&rqd, 0, sizeof(struct nvm_rq));
1081
1082	nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
1083	nvm_rq_tgt_to_dev(tgt_dev, &rqd);
1084
1085	ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
1086	nvm_free_rqd_ppalist(tgt_dev, &rqd);
1087	if (ret)
1088		return -EINVAL;
 
 
 
 
 
 
 
 
 
1089
1090	return 0;
1091}
1092EXPORT_SYMBOL_GPL(nvm_set_chunk_meta);
1093
1094static int nvm_core_init(struct nvm_dev *dev)
1095{
 
 
1096	struct nvm_geo *geo = &dev->geo;
1097	int ret;
1098
1099	dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1100					sizeof(unsigned long), GFP_KERNEL);
1101	if (!dev->lun_map)
1102		return -ENOMEM;
1103
1104	INIT_LIST_HEAD(&dev->area_list);
1105	INIT_LIST_HEAD(&dev->targets);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1106	mutex_init(&dev->mlock);
1107	spin_lock_init(&dev->lock);
1108
1109	ret = nvm_register_map(dev);
1110	if (ret)
1111		goto err_fmtype;
1112
1113	return 0;
1114err_fmtype:
1115	kfree(dev->lun_map);
1116	return ret;
1117}
1118
1119static void nvm_free(struct kref *ref)
1120{
1121	struct nvm_dev *dev = container_of(ref, struct nvm_dev, ref);
 
 
 
 
 
 
 
 
 
 
 
 
1122
1123	if (dev->dma_pool)
1124		dev->ops->destroy_dma_pool(dev->dma_pool);
1125
1126	if (dev->rmap)
1127		nvm_unregister_map(dev);
1128
1129	kfree(dev->lun_map);
1130	kfree(dev);
1131}
1132
1133static int nvm_init(struct nvm_dev *dev)
1134{
1135	struct nvm_geo *geo = &dev->geo;
1136	int ret = -EINVAL;
1137
1138	if (dev->ops->identity(dev)) {
1139		pr_err("device could not be identified\n");
 
 
 
1140		goto err;
1141	}
1142
1143	pr_debug("ver:%u.%u nvm_vendor:%x\n", geo->major_ver_id,
1144			geo->minor_ver_id, geo->vmnt);
 
 
 
 
 
 
 
 
 
 
 
1145
1146	ret = nvm_core_init(dev);
1147	if (ret) {
1148		pr_err("could not initialize core structures.\n");
1149		goto err;
1150	}
1151
1152	pr_info("registered %s [%u/%u/%u/%u/%u]\n",
1153			dev->name, dev->geo.ws_min, dev->geo.ws_opt,
1154			dev->geo.num_chk, dev->geo.all_luns,
1155			dev->geo.num_ch);
1156	return 0;
1157err:
1158	pr_err("failed to initialize nvm\n");
1159	return ret;
1160}
1161
1162struct nvm_dev *nvm_alloc_dev(int node)
1163{
1164	struct nvm_dev *dev;
1165
1166	dev = kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
1167	if (dev)
1168		kref_init(&dev->ref);
1169
1170	return dev;
1171}
1172EXPORT_SYMBOL(nvm_alloc_dev);
1173
1174int nvm_register(struct nvm_dev *dev)
1175{
1176	int ret, exp_pool_size;
1177
1178	if (!dev->q || !dev->ops) {
1179		kref_put(&dev->ref, nvm_free);
1180		return -EINVAL;
 
 
 
 
 
1181	}
1182
1183	ret = nvm_init(dev);
1184	if (ret) {
1185		kref_put(&dev->ref, nvm_free);
1186		return ret;
 
 
 
1187	}
1188
1189	exp_pool_size = max_t(int, PAGE_SIZE,
1190			      (NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos)));
1191	exp_pool_size = round_up(exp_pool_size, PAGE_SIZE);
1192
1193	dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist",
1194						  exp_pool_size);
1195	if (!dev->dma_pool) {
1196		pr_err("could not create dma pool\n");
1197		kref_put(&dev->ref, nvm_free);
1198		return -ENOMEM;
1199	}
1200
1201	/* register device with a supported media manager */
1202	down_write(&nvm_lock);
 
 
1203	list_add(&dev->devices, &nvm_devices);
1204	up_write(&nvm_lock);
1205
1206	return 0;
 
 
 
1207}
1208EXPORT_SYMBOL(nvm_register);
1209
1210void nvm_unregister(struct nvm_dev *dev)
1211{
1212	struct nvm_target *t, *tmp;
1213
1214	mutex_lock(&dev->mlock);
1215	list_for_each_entry_safe(t, tmp, &dev->targets, list) {
1216		if (t->dev->parent != dev)
1217			continue;
1218		__nvm_remove_target(t, false);
1219		kref_put(&dev->ref, nvm_free);
1220	}
1221	mutex_unlock(&dev->mlock);
1222
1223	down_write(&nvm_lock);
1224	list_del(&dev->devices);
1225	up_write(&nvm_lock);
1226
1227	kref_put(&dev->ref, nvm_free);
1228}
1229EXPORT_SYMBOL(nvm_unregister);
1230
1231static int __nvm_configure_create(struct nvm_ioctl_create *create)
1232{
1233	struct nvm_dev *dev;
1234	int ret;
1235
1236	down_write(&nvm_lock);
1237	dev = nvm_find_nvm_dev(create->dev);
1238	up_write(&nvm_lock);
1239
1240	if (!dev) {
1241		pr_err("device not found\n");
1242		return -EINVAL;
1243	}
1244
1245	kref_get(&dev->ref);
1246	ret = nvm_create_tgt(dev, create);
1247	if (ret)
1248		kref_put(&dev->ref, nvm_free);
1249
1250	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
1251}
1252
1253static long nvm_ioctl_info(struct file *file, void __user *arg)
1254{
1255	struct nvm_ioctl_info *info;
1256	struct nvm_tgt_type *tt;
1257	int tgt_iter = 0;
1258
 
 
 
1259	info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
1260	if (IS_ERR(info))
1261		return -EFAULT;
1262
1263	info->version[0] = NVM_VERSION_MAJOR;
1264	info->version[1] = NVM_VERSION_MINOR;
1265	info->version[2] = NVM_VERSION_PATCH;
1266
1267	down_write(&nvm_tgtt_lock);
1268	list_for_each_entry(tt, &nvm_tgt_types, list) {
1269		struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
1270
1271		tgt->version[0] = tt->version[0];
1272		tgt->version[1] = tt->version[1];
1273		tgt->version[2] = tt->version[2];
1274		strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
1275
1276		tgt_iter++;
1277	}
1278
1279	info->tgtsize = tgt_iter;
1280	up_write(&nvm_tgtt_lock);
1281
1282	if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
1283		kfree(info);
1284		return -EFAULT;
1285	}
1286
1287	kfree(info);
1288	return 0;
1289}
1290
1291static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
1292{
1293	struct nvm_ioctl_get_devices *devices;
1294	struct nvm_dev *dev;
1295	int i = 0;
1296
 
 
 
1297	devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
1298	if (!devices)
1299		return -ENOMEM;
1300
1301	down_write(&nvm_lock);
1302	list_for_each_entry(dev, &nvm_devices, devices) {
1303		struct nvm_ioctl_device_info *info = &devices->info[i];
1304
1305		strlcpy(info->devname, dev->name, sizeof(info->devname));
 
 
 
 
 
 
 
 
1306
1307		/* kept for compatibility */
1308		info->bmversion[0] = 1;
1309		info->bmversion[1] = 0;
1310		info->bmversion[2] = 0;
1311		strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
1312		i++;
1313
1314		if (i > 31) {
1315			pr_err("max 31 devices can be reported.\n");
1316			break;
1317		}
1318	}
1319	up_write(&nvm_lock);
1320
1321	devices->nr_devices = i;
1322
1323	if (copy_to_user(arg, devices,
1324			 sizeof(struct nvm_ioctl_get_devices))) {
1325		kfree(devices);
1326		return -EFAULT;
1327	}
1328
1329	kfree(devices);
1330	return 0;
1331}
1332
1333static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
1334{
1335	struct nvm_ioctl_create create;
1336
 
 
 
1337	if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
1338		return -EFAULT;
1339
1340	if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
1341	    create.conf.e.rsv != 0) {
1342		pr_err("reserved config field in use\n");
1343		return -EINVAL;
1344	}
1345
1346	create.dev[DISK_NAME_LEN - 1] = '\0';
1347	create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1348	create.tgtname[DISK_NAME_LEN - 1] = '\0';
1349
1350	if (create.flags != 0) {
1351		__u32 flags = create.flags;
1352
1353		/* Check for valid flags */
1354		if (flags & NVM_TARGET_FACTORY)
1355			flags &= ~NVM_TARGET_FACTORY;
1356
1357		if (flags) {
1358			pr_err("flag not supported\n");
1359			return -EINVAL;
1360		}
1361	}
1362
1363	return __nvm_configure_create(&create);
1364}
1365
1366static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1367{
1368	struct nvm_ioctl_remove remove;
 
 
 
 
 
1369
1370	if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1371		return -EFAULT;
1372
1373	remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1374
1375	if (remove.flags != 0) {
1376		pr_err("no flags supported\n");
1377		return -EINVAL;
1378	}
1379
1380	return nvm_remove_tgt(&remove);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1381}
1382
1383/* kept for compatibility reasons */
1384static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1385{
1386	struct nvm_ioctl_dev_init init;
1387
 
 
 
1388	if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1389		return -EFAULT;
1390
1391	if (init.flags != 0) {
1392		pr_err("no flags supported\n");
1393		return -EINVAL;
1394	}
1395
1396	return 0;
 
 
1397}
1398
1399/* Kept for compatibility reasons */
1400static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1401{
1402	struct nvm_ioctl_dev_factory fact;
 
 
 
 
1403
1404	if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1405		return -EFAULT;
1406
1407	fact.dev[DISK_NAME_LEN - 1] = '\0';
1408
1409	if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1410		return -EINVAL;
1411
 
 
 
 
 
 
 
 
 
 
 
 
 
1412	return 0;
1413}
1414
1415static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1416{
1417	void __user *argp = (void __user *)arg;
1418
1419	if (!capable(CAP_SYS_ADMIN))
1420		return -EPERM;
1421
1422	switch (cmd) {
1423	case NVM_INFO:
1424		return nvm_ioctl_info(file, argp);
1425	case NVM_GET_DEVICES:
1426		return nvm_ioctl_get_devices(file, argp);
1427	case NVM_DEV_CREATE:
1428		return nvm_ioctl_dev_create(file, argp);
1429	case NVM_DEV_REMOVE:
1430		return nvm_ioctl_dev_remove(file, argp);
1431	case NVM_DEV_INIT:
1432		return nvm_ioctl_dev_init(file, argp);
1433	case NVM_DEV_FACTORY:
1434		return nvm_ioctl_dev_factory(file, argp);
1435	}
1436	return 0;
1437}
1438
1439static const struct file_operations _ctl_fops = {
1440	.open = nonseekable_open,
1441	.unlocked_ioctl = nvm_ctl_ioctl,
1442	.owner = THIS_MODULE,
1443	.llseek  = noop_llseek,
1444};
1445
1446static struct miscdevice _nvm_misc = {
1447	.minor		= MISC_DYNAMIC_MINOR,
1448	.name		= "lightnvm",
1449	.nodename	= "lightnvm/control",
1450	.fops		= &_ctl_fops,
1451};
1452builtin_misc_device(_nvm_misc);
v4.10.11
 
   1/*
   2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
   3 * Initial release: Matias Bjorling <m@bjorling.me>
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License version
   7 * 2 as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful, but
  10 * WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  12 * General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; see the file COPYING.  If not, write to
  16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
  17 * USA.
  18 *
  19 */
  20
 
 
  21#include <linux/list.h>
  22#include <linux/types.h>
  23#include <linux/sem.h>
  24#include <linux/bitmap.h>
 
  25#include <linux/moduleparam.h>
  26#include <linux/miscdevice.h>
  27#include <linux/lightnvm.h>
  28#include <linux/sched/sysctl.h>
  29
  30static LIST_HEAD(nvm_tgt_types);
  31static DECLARE_RWSEM(nvm_tgtt_lock);
  32static LIST_HEAD(nvm_mgrs);
  33static LIST_HEAD(nvm_devices);
  34static DECLARE_RWSEM(nvm_lock);
  35
  36struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  37{
  38	struct nvm_tgt_type *tmp, *tt = NULL;
 
 
  39
  40	if (lock)
  41		down_write(&nvm_tgtt_lock);
  42
  43	list_for_each_entry(tmp, &nvm_tgt_types, list)
  44		if (!strcmp(name, tmp->name)) {
  45			tt = tmp;
  46			break;
 
 
  47		}
 
 
  48
  49	if (lock)
  50		up_write(&nvm_tgtt_lock);
  51	return tt;
  52}
  53EXPORT_SYMBOL(nvm_find_target_type);
  54
  55int nvm_register_tgt_type(struct nvm_tgt_type *tt)
  56{
  57	int ret = 0;
 
 
 
 
 
 
 
  58
  59	down_write(&nvm_tgtt_lock);
  60	if (nvm_find_target_type(tt->name, 0))
  61		ret = -EEXIST;
  62	else
  63		list_add(&tt->list, &nvm_tgt_types);
  64	up_write(&nvm_tgtt_lock);
  65
  66	return ret;
  67}
  68EXPORT_SYMBOL(nvm_register_tgt_type);
  69
  70void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
 
  71{
  72	if (!tt)
  73		return;
  74
  75	down_write(&nvm_lock);
  76	list_del(&tt->list);
  77	up_write(&nvm_lock);
  78}
  79EXPORT_SYMBOL(nvm_unregister_tgt_type);
  80
  81void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
  82							dma_addr_t *dma_handler)
  83{
  84	return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
  85								dma_handler);
  86}
  87EXPORT_SYMBOL(nvm_dev_dma_alloc);
 
 
 
 
 
 
 
 
 
  88
  89void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
  90{
  91	dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
  92}
  93EXPORT_SYMBOL(nvm_dev_dma_free);
  94
  95static struct nvmm_type *nvm_find_mgr_type(const char *name)
  96{
  97	struct nvmm_type *mt;
  98
  99	list_for_each_entry(mt, &nvm_mgrs, list)
 100		if (!strcmp(name, mt->name))
 101			return mt;
 102
 103	return NULL;
 
 104}
 105
 106static struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
 107{
 108	struct nvmm_type *mt;
 109	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 110
 111	lockdep_assert_held(&nvm_lock);
 
 
 112
 113	list_for_each_entry(mt, &nvm_mgrs, list) {
 114		if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
 115			continue;
 116
 117		ret = mt->register_mgr(dev);
 118		if (ret < 0) {
 119			pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
 120								ret, dev->name);
 121			return NULL; /* initialization failed */
 122		} else if (ret > 0)
 123			return mt;
 124	}
 125
 126	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 127}
 128
 129int nvm_register_mgr(struct nvmm_type *mt)
 130{
 131	struct nvm_dev *dev;
 132	int ret = 0;
 
 
 
 133
 134	down_write(&nvm_lock);
 135	if (nvm_find_mgr_type(mt->name)) {
 136		ret = -EEXIST;
 137		goto finish;
 138	} else {
 139		list_add(&mt->list, &nvm_mgrs);
 140	}
 141
 142	/* try to register media mgr if any device have none configured */
 143	list_for_each_entry(dev, &nvm_devices, devices) {
 144		if (dev->mt)
 145			continue;
 146
 147		dev->mt = nvm_init_mgr(dev);
 148	}
 149finish:
 150	up_write(&nvm_lock);
 151
 152	return ret;
 153}
 154EXPORT_SYMBOL(nvm_register_mgr);
 155
 156void nvm_unregister_mgr(struct nvmm_type *mt)
 
 157{
 158	if (!mt)
 159		return;
 
 
 
 160
 161	down_write(&nvm_lock);
 162	list_del(&mt->list);
 163	up_write(&nvm_lock);
 164}
 165EXPORT_SYMBOL(nvm_unregister_mgr);
 166
 167static struct nvm_dev *nvm_find_nvm_dev(const char *name)
 
 168{
 169	struct nvm_dev *dev;
 170
 171	list_for_each_entry(dev, &nvm_devices, devices)
 172		if (!strcmp(name, dev->name))
 173			return dev;
 
 174
 175	return NULL;
 176}
 177
 178static void nvm_tgt_generic_to_addr_mode(struct nvm_tgt_dev *tgt_dev,
 179					 struct nvm_rq *rqd)
 180{
 181	struct nvm_dev *dev = tgt_dev->parent;
 182	int i;
 
 
 183
 184	if (rqd->nr_ppas > 1) {
 185		for (i = 0; i < rqd->nr_ppas; i++) {
 186			rqd->ppa_list[i] = dev->mt->trans_ppa(tgt_dev,
 187					rqd->ppa_list[i], TRANS_TGT_TO_DEV);
 188			rqd->ppa_list[i] = generic_to_dev_addr(dev,
 189							rqd->ppa_list[i]);
 190		}
 191	} else {
 192		rqd->ppa_addr = dev->mt->trans_ppa(tgt_dev, rqd->ppa_addr,
 193						TRANS_TGT_TO_DEV);
 194		rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
 195	}
 
 
 196}
 197
 198int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
 199								int type)
 200{
 201	struct nvm_rq rqd;
 
 
 
 
 
 
 
 202	int ret;
 203
 204	if (nr_ppas > dev->ops->max_phys_sect) {
 205		pr_err("nvm: unable to update all sysblocks atomically\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 206		return -EINVAL;
 207	}
 208
 209	memset(&rqd, 0, sizeof(struct nvm_rq));
 
 
 
 
 210
 211	nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
 212	nvm_generic_to_addr_mode(dev, &rqd);
 
 
 213
 214	ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
 215	nvm_free_rqd_ppalist(dev, &rqd);
 216	if (ret) {
 217		pr_err("nvm: sysblk failed bb mark\n");
 218		return -EINVAL;
 219	}
 220
 221	return 0;
 222}
 223EXPORT_SYMBOL(nvm_set_bb_tbl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 224
 225int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
 226		       int nr_ppas, int type)
 227{
 228	struct nvm_dev *dev = tgt_dev->parent;
 229	struct nvm_rq rqd;
 230	int ret;
 231
 232	if (nr_ppas > dev->ops->max_phys_sect) {
 233		pr_err("nvm: unable to update all blocks atomically\n");
 234		return -EINVAL;
 
 235	}
 236
 237	memset(&rqd, 0, sizeof(struct nvm_rq));
 
 
 
 
 
 
 
 
 238
 239	nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
 240	nvm_tgt_generic_to_addr_mode(tgt_dev, &rqd);
 241
 242	ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
 243	nvm_free_rqd_ppalist(dev, &rqd);
 244	if (ret) {
 245		pr_err("nvm: sysblk failed bb mark\n");
 246		return -EINVAL;
 247	}
 248
 
 
 
 
 
 
 
 
 
 
 249	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 250}
 251EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
 252
 253int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev)
 254{
 255	struct nvm_dev *dev = tgt_dev->parent;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 256
 257	return dev->ops->max_phys_sect;
 
 258}
 259EXPORT_SYMBOL(nvm_max_phys_sects);
 260
 261int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
 
 
 
 
 
 
 
 
 
 262{
 263	struct nvm_dev *dev = tgt_dev->parent;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 264
 265	return dev->mt->submit_io(tgt_dev, rqd);
 266}
 267EXPORT_SYMBOL(nvm_submit_io);
 268
 269int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p, int flags)
 270{
 271	struct nvm_dev *dev = tgt_dev->parent;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 272
 273	return dev->mt->erase_blk(tgt_dev, p, flags);
 
 
 
 
 
 
 
 274}
 275EXPORT_SYMBOL(nvm_erase_blk);
 276
 277int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
 278		    nvm_l2p_update_fn *update_l2p, void *priv)
 279{
 280	struct nvm_dev *dev = tgt_dev->parent;
 
 281
 282	if (!dev->ops->get_l2p_tbl)
 283		return 0;
 284
 285	return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
 
 286}
 287EXPORT_SYMBOL(nvm_get_l2p_tbl);
 288
 289int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
 290{
 291	struct nvm_dev *dev = tgt_dev->parent;
 
 
 292
 293	return dev->mt->get_area(dev, lba, len);
 
 294}
 295EXPORT_SYMBOL(nvm_get_area);
 296
 297void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t lba)
 298{
 299	struct nvm_dev *dev = tgt_dev->parent;
 
 
 
 300
 301	dev->mt->put_area(dev, lba);
 
 302}
 303EXPORT_SYMBOL(nvm_put_area);
 304
 305void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
 
 306{
 307	int i;
 308
 309	if (rqd->nr_ppas > 1) {
 310		for (i = 0; i < rqd->nr_ppas; i++)
 311			rqd->ppa_list[i] = dev_to_generic_addr(dev,
 312							rqd->ppa_list[i]);
 313	} else {
 314		rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
 315	}
 316}
 317EXPORT_SYMBOL(nvm_addr_to_generic_mode);
 318
 319void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
 
 320{
 321	int i;
 322
 323	if (rqd->nr_ppas > 1) {
 324		for (i = 0; i < rqd->nr_ppas; i++)
 325			rqd->ppa_list[i] = generic_to_dev_addr(dev,
 326							rqd->ppa_list[i]);
 327	} else {
 328		rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
 329	}
 330}
 331EXPORT_SYMBOL(nvm_generic_to_addr_mode);
 332
 333int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
 334			const struct ppa_addr *ppas, int nr_ppas, int vblk)
 335{
 336	struct nvm_geo *geo = &dev->geo;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 337	int i, plane_cnt, pl_idx;
 338	struct ppa_addr ppa;
 339
 340	if ((!vblk || geo->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
 341		rqd->nr_ppas = nr_ppas;
 342		rqd->ppa_addr = ppas[0];
 343
 344		return 0;
 345	}
 346
 347	rqd->nr_ppas = nr_ppas;
 348	rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
 349	if (!rqd->ppa_list) {
 350		pr_err("nvm: failed to allocate dma memory\n");
 351		return -ENOMEM;
 352	}
 353
 354	if (!vblk) {
 355		for (i = 0; i < nr_ppas; i++)
 356			rqd->ppa_list[i] = ppas[i];
 357	} else {
 358		plane_cnt = geo->plane_mode;
 359		rqd->nr_ppas *= plane_cnt;
 360
 361		for (i = 0; i < nr_ppas; i++) {
 362			for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
 363				ppa = ppas[i];
 364				ppa.g.pl = pl_idx;
 365				rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
 366			}
 367		}
 368	}
 369
 370	return 0;
 371}
 372EXPORT_SYMBOL(nvm_set_rqd_ppalist);
 373
 374void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
 
 375{
 376	if (!rqd->ppa_list)
 377		return;
 378
 379	nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
 380}
 381EXPORT_SYMBOL(nvm_free_rqd_ppalist);
 382
 383int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
 384								int flags)
 385{
 386	struct nvm_rq rqd;
 387	int ret;
 388
 389	if (!dev->ops->erase_block)
 390		return 0;
 391
 392	memset(&rqd, 0, sizeof(struct nvm_rq));
 
 
 
 
 
 
 393
 394	ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
 395	if (ret)
 396		return ret;
 397
 398	nvm_generic_to_addr_mode(dev, &rqd);
 
 
 
 399
 400	rqd.flags = flags;
 
 401
 402	ret = dev->ops->erase_block(dev, &rqd);
 403
 404	nvm_free_rqd_ppalist(dev, &rqd);
 
 405
 
 
 
 
 406	return ret;
 407}
 408EXPORT_SYMBOL(nvm_erase_ppa);
 409
 410void nvm_end_io(struct nvm_rq *rqd, int error)
 411{
 412	rqd->error = error;
 413	rqd->end_io(rqd);
 
 414}
 415EXPORT_SYMBOL(nvm_end_io);
 416
 417static void nvm_end_io_sync(struct nvm_rq *rqd)
 
 418{
 419	struct completion *waiting = rqd->wait;
 
 420
 421	rqd->wait = NULL;
 
 422
 423	complete(waiting);
 
 
 
 
 
 
 424}
 425
 426static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
 427						int flags, void *buf, int len)
 428{
 429	DECLARE_COMPLETION_ONSTACK(wait);
 430	struct bio *bio;
 431	int ret;
 432	unsigned long hang_check;
 433
 434	bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
 435	if (IS_ERR_OR_NULL(bio))
 436		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 437
 438	nvm_generic_to_addr_mode(dev, rqd);
 
 
 439
 440	rqd->dev = NULL;
 441	rqd->opcode = opcode;
 442	rqd->flags = flags;
 443	rqd->bio = bio;
 444	rqd->wait = &wait;
 445	rqd->end_io = nvm_end_io_sync;
 446
 447	ret = dev->ops->submit_io(dev, rqd);
 448	if (ret) {
 449		bio_put(bio);
 450		return ret;
 451	}
 452
 453	/* Prevent hang_check timer from firing at us during very long I/O */
 454	hang_check = sysctl_hung_task_timeout_secs;
 455	if (hang_check)
 456		while (!wait_for_completion_io_timeout(&wait,
 457							hang_check * (HZ/2)))
 458			;
 459	else
 460		wait_for_completion_io(&wait);
 461
 462	return rqd->error;
 463}
 464
 465/**
 466 * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must
 467 *			 take to free ppa list if necessary.
 468 * @dev:	device
 469 * @ppa_list:	user created ppa_list
 470 * @nr_ppas:	length of ppa_list
 471 * @opcode:	device opcode
 472 * @flags:	device flags
 473 * @buf:	data buffer
 474 * @len:	data buffer length
 475 */
 476int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
 477			int nr_ppas, int opcode, int flags, void *buf, int len)
 478{
 479	struct nvm_rq rqd;
 
 
 
 
 
 
 
 
 480
 481	if (dev->ops->max_phys_sect < nr_ppas)
 482		return -EINVAL;
 
 
 
 
 
 
 
 483
 484	memset(&rqd, 0, sizeof(struct nvm_rq));
 
 
 485
 486	rqd.nr_ppas = nr_ppas;
 487	if (nr_ppas > 1)
 488		rqd.ppa_list = ppa_list;
 489	else
 490		rqd.ppa_addr = ppa_list[0];
 491
 492	return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
 493}
 494EXPORT_SYMBOL(nvm_submit_ppa_list);
 495
 496/**
 497 * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded
 498 *		    as single, dual, quad plane PPAs depending on device type.
 499 * @dev:	device
 500 * @ppa:	user created ppa_list
 501 * @nr_ppas:	length of ppa_list
 502 * @opcode:	device opcode
 503 * @flags:	device flags
 504 * @buf:	data buffer
 505 * @len:	data buffer length
 506 */
 507int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
 508				int opcode, int flags, void *buf, int len)
 509{
 510	struct nvm_rq rqd;
 511	int ret;
 512
 513	memset(&rqd, 0, sizeof(struct nvm_rq));
 514	ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1);
 515	if (ret)
 516		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 517
 518	ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 519
 520	nvm_free_rqd_ppalist(dev, &rqd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 521
 522	return ret;
 523}
 524EXPORT_SYMBOL(nvm_submit_ppa);
 525
 526/*
 527 * folds a bad block list from its plane representation to its virtual
 528 * block representation. The fold is done in place and reduced size is
 529 * returned.
 530 *
 531 * If any of the planes status are bad or grown bad block, the virtual block
 532 * is marked bad. If not bad, the first plane state acts as the block state.
 533 */
 534int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
 
 535{
 536	struct nvm_geo *geo = &dev->geo;
 537	int blk, offset, pl, blktype;
 538
 539	if (nr_blks != geo->blks_per_lun * geo->plane_mode)
 540		return -EINVAL;
 541
 542	for (blk = 0; blk < geo->blks_per_lun; blk++) {
 543		offset = blk * geo->plane_mode;
 544		blktype = blks[offset];
 545
 546		/* Bad blocks on any planes take precedence over other types */
 547		for (pl = 0; pl < geo->plane_mode; pl++) {
 548			if (blks[offset + pl] &
 549					(NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
 550				blktype = blks[offset + pl];
 551				break;
 552			}
 553		}
 554
 555		blks[blk] = blktype;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 556	}
 557
 558	return geo->blks_per_lun;
 559}
 560EXPORT_SYMBOL(nvm_bb_tbl_fold);
 561
 562int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
 
 563{
 564	ppa = generic_to_dev_addr(dev, ppa);
 
 
 
 
 
 
 
 
 
 
 
 
 
 565
 566	return dev->ops->get_bb_tbl(dev, ppa, blks);
 567}
 568EXPORT_SYMBOL(nvm_get_bb_tbl);
 
 
 569
 570int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
 571		       u8 *blks)
 572{
 573	struct nvm_dev *dev = tgt_dev->parent;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 574
 575	ppa = dev->mt->trans_ppa(tgt_dev, ppa, TRANS_TGT_TO_DEV);
 576	return nvm_get_bb_tbl(dev, ppa, blks);
 
 
 
 
 
 577}
 578EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
 579
 580static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
 
 581{
 582	struct nvm_geo *geo = &dev->geo;
 583	int i;
 584
 585	dev->lps_per_blk = geo->pgs_per_blk;
 586	dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
 587	if (!dev->lptbl)
 588		return -ENOMEM;
 589
 590	/* Just a linear array */
 591	for (i = 0; i < dev->lps_per_blk; i++)
 592		dev->lptbl[i] = i;
 593
 594	return 0;
 595}
 
 596
 597static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
 
 598{
 599	int i, p;
 600	struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
 
 601
 602	if (!mlc->num_pairs)
 603		return 0;
 604
 605	dev->lps_per_blk = mlc->num_pairs;
 606	dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
 607	if (!dev->lptbl)
 608		return -ENOMEM;
 
 
 
 
 
 609
 610	/* The lower page table encoding consists of a list of bytes, where each
 611	 * has a lower and an upper half. The first half byte maintains the
 612	 * increment value and every value after is an offset added to the
 613	 * previous incrementation value
 614	 */
 615	dev->lptbl[0] = mlc->pairs[0] & 0xF;
 616	for (i = 1; i < dev->lps_per_blk; i++) {
 617		p = mlc->pairs[i >> 1];
 618		if (i & 0x1) /* upper */
 619			dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
 620		else /* lower */
 621			dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
 622	}
 623
 624	return 0;
 625}
 
 626
 627static int nvm_core_init(struct nvm_dev *dev)
 628{
 629	struct nvm_id *id = &dev->identity;
 630	struct nvm_id_group *grp = &id->groups[0];
 631	struct nvm_geo *geo = &dev->geo;
 632	int ret;
 633
 634	/* Whole device values */
 635	geo->nr_chnls = grp->num_ch;
 636	geo->luns_per_chnl = grp->num_lun;
 637
 638	/* Generic device values */
 639	geo->pgs_per_blk = grp->num_pg;
 640	geo->blks_per_lun = grp->num_blk;
 641	geo->nr_planes = grp->num_pln;
 642	geo->fpg_size = grp->fpg_sz;
 643	geo->pfpg_size = grp->fpg_sz * grp->num_pln;
 644	geo->sec_size = grp->csecs;
 645	geo->oob_size = grp->sos;
 646	geo->sec_per_pg = grp->fpg_sz / grp->csecs;
 647	geo->mccap = grp->mccap;
 648	memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
 649
 650	geo->plane_mode = NVM_PLANE_SINGLE;
 651	geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
 652
 653	if (grp->mpos & 0x020202)
 654		geo->plane_mode = NVM_PLANE_DOUBLE;
 655	if (grp->mpos & 0x040404)
 656		geo->plane_mode = NVM_PLANE_QUAD;
 657
 658	if (grp->mtype != 0) {
 659		pr_err("nvm: memory type not supported\n");
 660		return -EINVAL;
 661	}
 662
 663	/* calculated values */
 664	geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
 665	geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
 666	geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
 667	geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
 668
 669	dev->total_secs = geo->nr_luns * geo->sec_per_lun;
 670	dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
 671					sizeof(unsigned long), GFP_KERNEL);
 672	if (!dev->lun_map)
 673		return -ENOMEM;
 674
 675	switch (grp->fmtype) {
 676	case NVM_ID_FMTYPE_SLC:
 677		if (nvm_init_slc_tbl(dev, grp)) {
 678			ret = -ENOMEM;
 679			goto err_fmtype;
 680		}
 681		break;
 682	case NVM_ID_FMTYPE_MLC:
 683		if (nvm_init_mlc_tbl(dev, grp)) {
 684			ret = -ENOMEM;
 685			goto err_fmtype;
 686		}
 687		break;
 688	default:
 689		pr_err("nvm: flash type not supported\n");
 690		ret = -EINVAL;
 691		goto err_fmtype;
 692	}
 693
 694	mutex_init(&dev->mlock);
 695	spin_lock_init(&dev->lock);
 696
 697	blk_queue_logical_block_size(dev->q, geo->sec_size);
 
 
 698
 699	return 0;
 700err_fmtype:
 701	kfree(dev->lun_map);
 702	return ret;
 703}
 704
 705static void nvm_free_mgr(struct nvm_dev *dev)
 706{
 707	if (!dev->mt)
 708		return;
 709
 710	dev->mt->unregister_mgr(dev);
 711	dev->mt = NULL;
 712}
 713
 714void nvm_free(struct nvm_dev *dev)
 715{
 716	if (!dev)
 717		return;
 718
 719	nvm_free_mgr(dev);
 720
 721	if (dev->dma_pool)
 722		dev->ops->destroy_dma_pool(dev->dma_pool);
 723
 724	kfree(dev->lptbl);
 
 
 725	kfree(dev->lun_map);
 726	kfree(dev);
 727}
 728
 729static int nvm_init(struct nvm_dev *dev)
 730{
 731	struct nvm_geo *geo = &dev->geo;
 732	int ret = -EINVAL;
 733
 734	if (!dev->q || !dev->ops)
 735		return ret;
 736
 737	if (dev->ops->identity(dev, &dev->identity)) {
 738		pr_err("nvm: device could not be identified\n");
 739		goto err;
 740	}
 741
 742	pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
 743			dev->identity.ver_id, dev->identity.vmnt,
 744							dev->identity.cgrps);
 745
 746	if (dev->identity.ver_id != 1) {
 747		pr_err("nvm: device not supported by kernel.");
 748		goto err;
 749	}
 750
 751	if (dev->identity.cgrps != 1) {
 752		pr_err("nvm: only one group configuration supported.");
 753		goto err;
 754	}
 755
 756	ret = nvm_core_init(dev);
 757	if (ret) {
 758		pr_err("nvm: could not initialize core structures.\n");
 759		goto err;
 760	}
 761
 762	pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
 763			dev->name, geo->sec_per_pg, geo->nr_planes,
 764			geo->pgs_per_blk, geo->blks_per_lun,
 765			geo->nr_luns, geo->nr_chnls);
 766	return 0;
 767err:
 768	pr_err("nvm: failed to initialize nvm\n");
 769	return ret;
 770}
 771
 772struct nvm_dev *nvm_alloc_dev(int node)
 773{
 774	return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
 
 
 
 
 
 
 775}
 776EXPORT_SYMBOL(nvm_alloc_dev);
 777
 778int nvm_register(struct nvm_dev *dev)
 779{
 780	int ret;
 781
 782	ret = nvm_init(dev);
 783	if (ret)
 784		goto err_init;
 785
 786	if (dev->ops->max_phys_sect > 256) {
 787		pr_info("nvm: max sectors supported is 256.\n");
 788		ret = -EINVAL;
 789		goto err_init;
 790	}
 791
 792	if (dev->ops->max_phys_sect > 1) {
 793		dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
 794		if (!dev->dma_pool) {
 795			pr_err("nvm: could not create dma pool\n");
 796			ret = -ENOMEM;
 797			goto err_init;
 798		}
 799	}
 800
 801	if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
 802		ret = nvm_get_sysblock(dev, &dev->sb);
 803		if (!ret)
 804			pr_err("nvm: device not initialized.\n");
 805		else if (ret < 0)
 806			pr_err("nvm: err (%d) on device initialization\n", ret);
 
 
 
 
 807	}
 808
 809	/* register device with a supported media manager */
 810	down_write(&nvm_lock);
 811	if (ret > 0)
 812		dev->mt = nvm_init_mgr(dev);
 813	list_add(&dev->devices, &nvm_devices);
 814	up_write(&nvm_lock);
 815
 816	return 0;
 817err_init:
 818	kfree(dev->lun_map);
 819	return ret;
 820}
 821EXPORT_SYMBOL(nvm_register);
 822
 823void nvm_unregister(struct nvm_dev *dev)
 824{
 
 
 
 
 
 
 
 
 
 
 
 825	down_write(&nvm_lock);
 826	list_del(&dev->devices);
 827	up_write(&nvm_lock);
 828
 829	nvm_free(dev);
 830}
 831EXPORT_SYMBOL(nvm_unregister);
 832
 833static int __nvm_configure_create(struct nvm_ioctl_create *create)
 834{
 835	struct nvm_dev *dev;
 836	struct nvm_ioctl_create_simple *s;
 837
 838	down_write(&nvm_lock);
 839	dev = nvm_find_nvm_dev(create->dev);
 840	up_write(&nvm_lock);
 841
 842	if (!dev) {
 843		pr_err("nvm: device not found\n");
 844		return -EINVAL;
 845	}
 846
 847	if (!dev->mt) {
 848		pr_info("nvm: device has no media manager registered.\n");
 849		return -ENODEV;
 850	}
 851
 852	if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
 853		pr_err("nvm: config type not valid\n");
 854		return -EINVAL;
 855	}
 856	s = &create->conf.s;
 857
 858	if (s->lun_begin > s->lun_end || s->lun_end > dev->geo.nr_luns) {
 859		pr_err("nvm: lun out of bound (%u:%u > %u)\n",
 860			s->lun_begin, s->lun_end, dev->geo.nr_luns);
 861		return -EINVAL;
 862	}
 863
 864	return dev->mt->create_tgt(dev, create);
 865}
 866
 867static long nvm_ioctl_info(struct file *file, void __user *arg)
 868{
 869	struct nvm_ioctl_info *info;
 870	struct nvm_tgt_type *tt;
 871	int tgt_iter = 0;
 872
 873	if (!capable(CAP_SYS_ADMIN))
 874		return -EPERM;
 875
 876	info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
 877	if (IS_ERR(info))
 878		return -EFAULT;
 879
 880	info->version[0] = NVM_VERSION_MAJOR;
 881	info->version[1] = NVM_VERSION_MINOR;
 882	info->version[2] = NVM_VERSION_PATCH;
 883
 884	down_write(&nvm_lock);
 885	list_for_each_entry(tt, &nvm_tgt_types, list) {
 886		struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
 887
 888		tgt->version[0] = tt->version[0];
 889		tgt->version[1] = tt->version[1];
 890		tgt->version[2] = tt->version[2];
 891		strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
 892
 893		tgt_iter++;
 894	}
 895
 896	info->tgtsize = tgt_iter;
 897	up_write(&nvm_lock);
 898
 899	if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
 900		kfree(info);
 901		return -EFAULT;
 902	}
 903
 904	kfree(info);
 905	return 0;
 906}
 907
 908static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
 909{
 910	struct nvm_ioctl_get_devices *devices;
 911	struct nvm_dev *dev;
 912	int i = 0;
 913
 914	if (!capable(CAP_SYS_ADMIN))
 915		return -EPERM;
 916
 917	devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
 918	if (!devices)
 919		return -ENOMEM;
 920
 921	down_write(&nvm_lock);
 922	list_for_each_entry(dev, &nvm_devices, devices) {
 923		struct nvm_ioctl_device_info *info = &devices->info[i];
 924
 925		sprintf(info->devname, "%s", dev->name);
 926		if (dev->mt) {
 927			info->bmversion[0] = dev->mt->version[0];
 928			info->bmversion[1] = dev->mt->version[1];
 929			info->bmversion[2] = dev->mt->version[2];
 930			sprintf(info->bmname, "%s", dev->mt->name);
 931		} else {
 932			sprintf(info->bmname, "none");
 933		}
 934
 
 
 
 
 
 935		i++;
 
 936		if (i > 31) {
 937			pr_err("nvm: max 31 devices can be reported.\n");
 938			break;
 939		}
 940	}
 941	up_write(&nvm_lock);
 942
 943	devices->nr_devices = i;
 944
 945	if (copy_to_user(arg, devices,
 946			 sizeof(struct nvm_ioctl_get_devices))) {
 947		kfree(devices);
 948		return -EFAULT;
 949	}
 950
 951	kfree(devices);
 952	return 0;
 953}
 954
 955static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
 956{
 957	struct nvm_ioctl_create create;
 958
 959	if (!capable(CAP_SYS_ADMIN))
 960		return -EPERM;
 961
 962	if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
 963		return -EFAULT;
 964
 
 
 
 
 
 
 965	create.dev[DISK_NAME_LEN - 1] = '\0';
 966	create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
 967	create.tgtname[DISK_NAME_LEN - 1] = '\0';
 968
 969	if (create.flags != 0) {
 970		pr_err("nvm: no flags supported\n");
 971		return -EINVAL;
 
 
 
 
 
 
 
 
 972	}
 973
 974	return __nvm_configure_create(&create);
 975}
 976
 977static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
 978{
 979	struct nvm_ioctl_remove remove;
 980	struct nvm_dev *dev;
 981	int ret = 0;
 982
 983	if (!capable(CAP_SYS_ADMIN))
 984		return -EPERM;
 985
 986	if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
 987		return -EFAULT;
 988
 989	remove.tgtname[DISK_NAME_LEN - 1] = '\0';
 990
 991	if (remove.flags != 0) {
 992		pr_err("nvm: no flags supported\n");
 993		return -EINVAL;
 994	}
 995
 996	list_for_each_entry(dev, &nvm_devices, devices) {
 997		ret = dev->mt->remove_tgt(dev, &remove);
 998		if (!ret)
 999			break;
1000	}
1001
1002	return ret;
1003}
1004
1005static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
1006{
1007	info->seqnr = 1;
1008	info->erase_cnt = 0;
1009	info->version = 1;
1010}
1011
1012static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
1013{
1014	struct nvm_dev *dev;
1015	struct nvm_sb_info info;
1016	int ret;
1017
1018	down_write(&nvm_lock);
1019	dev = nvm_find_nvm_dev(init->dev);
1020	up_write(&nvm_lock);
1021	if (!dev) {
1022		pr_err("nvm: device not found\n");
1023		return -EINVAL;
1024	}
1025
1026	nvm_setup_nvm_sb_info(&info);
1027
1028	strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
1029	info.fs_ppa.ppa = -1;
1030
1031	if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
1032		ret = nvm_init_sysblock(dev, &info);
1033		if (ret)
1034			return ret;
1035	}
1036
1037	memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
1038
1039	down_write(&nvm_lock);
1040	dev->mt = nvm_init_mgr(dev);
1041	up_write(&nvm_lock);
1042
1043	return 0;
1044}
1045
 
1046static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1047{
1048	struct nvm_ioctl_dev_init init;
1049
1050	if (!capable(CAP_SYS_ADMIN))
1051		return -EPERM;
1052
1053	if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1054		return -EFAULT;
1055
1056	if (init.flags != 0) {
1057		pr_err("nvm: no flags supported\n");
1058		return -EINVAL;
1059	}
1060
1061	init.dev[DISK_NAME_LEN - 1] = '\0';
1062
1063	return __nvm_ioctl_dev_init(&init);
1064}
1065
 
1066static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1067{
1068	struct nvm_ioctl_dev_factory fact;
1069	struct nvm_dev *dev;
1070
1071	if (!capable(CAP_SYS_ADMIN))
1072		return -EPERM;
1073
1074	if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1075		return -EFAULT;
1076
1077	fact.dev[DISK_NAME_LEN - 1] = '\0';
1078
1079	if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1080		return -EINVAL;
1081
1082	down_write(&nvm_lock);
1083	dev = nvm_find_nvm_dev(fact.dev);
1084	up_write(&nvm_lock);
1085	if (!dev) {
1086		pr_err("nvm: device not found\n");
1087		return -EINVAL;
1088	}
1089
1090	nvm_free_mgr(dev);
1091
1092	if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
1093		return nvm_dev_factory(dev, fact.flags);
1094
1095	return 0;
1096}
1097
1098static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1099{
1100	void __user *argp = (void __user *)arg;
 
 
 
1101
1102	switch (cmd) {
1103	case NVM_INFO:
1104		return nvm_ioctl_info(file, argp);
1105	case NVM_GET_DEVICES:
1106		return nvm_ioctl_get_devices(file, argp);
1107	case NVM_DEV_CREATE:
1108		return nvm_ioctl_dev_create(file, argp);
1109	case NVM_DEV_REMOVE:
1110		return nvm_ioctl_dev_remove(file, argp);
1111	case NVM_DEV_INIT:
1112		return nvm_ioctl_dev_init(file, argp);
1113	case NVM_DEV_FACTORY:
1114		return nvm_ioctl_dev_factory(file, argp);
1115	}
1116	return 0;
1117}
1118
1119static const struct file_operations _ctl_fops = {
1120	.open = nonseekable_open,
1121	.unlocked_ioctl = nvm_ctl_ioctl,
1122	.owner = THIS_MODULE,
1123	.llseek  = noop_llseek,
1124};
1125
1126static struct miscdevice _nvm_misc = {
1127	.minor		= MISC_DYNAMIC_MINOR,
1128	.name		= "lightnvm",
1129	.nodename	= "lightnvm/control",
1130	.fops		= &_ctl_fops,
1131};
1132builtin_misc_device(_nvm_misc);