Loading...
1/*
2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <m@bjorling.me>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17 * USA.
18 *
19 */
20
21#include <linux/list.h>
22#include <linux/types.h>
23#include <linux/sem.h>
24#include <linux/bitmap.h>
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/miscdevice.h>
28#include <linux/lightnvm.h>
29#include <linux/sched/sysctl.h>
30
31static LIST_HEAD(nvm_tgt_types);
32static DECLARE_RWSEM(nvm_tgtt_lock);
33static LIST_HEAD(nvm_devices);
34static DECLARE_RWSEM(nvm_lock);
35
36/* Map between virtual and physical channel and lun */
37struct nvm_ch_map {
38 int ch_off;
39 int num_lun;
40 int *lun_offs;
41};
42
43struct nvm_dev_map {
44 struct nvm_ch_map *chnls;
45 int num_ch;
46};
47
48static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
49{
50 struct nvm_target *tgt;
51
52 list_for_each_entry(tgt, &dev->targets, list)
53 if (!strcmp(name, tgt->disk->disk_name))
54 return tgt;
55
56 return NULL;
57}
58
59static bool nvm_target_exists(const char *name)
60{
61 struct nvm_dev *dev;
62 struct nvm_target *tgt;
63 bool ret = false;
64
65 down_write(&nvm_lock);
66 list_for_each_entry(dev, &nvm_devices, devices) {
67 mutex_lock(&dev->mlock);
68 list_for_each_entry(tgt, &dev->targets, list) {
69 if (!strcmp(name, tgt->disk->disk_name)) {
70 ret = true;
71 mutex_unlock(&dev->mlock);
72 goto out;
73 }
74 }
75 mutex_unlock(&dev->mlock);
76 }
77
78out:
79 up_write(&nvm_lock);
80 return ret;
81}
82
83static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
84{
85 int i;
86
87 for (i = lun_begin; i <= lun_end; i++) {
88 if (test_and_set_bit(i, dev->lun_map)) {
89 pr_err("nvm: lun %d already allocated\n", i);
90 goto err;
91 }
92 }
93
94 return 0;
95err:
96 while (--i >= lun_begin)
97 clear_bit(i, dev->lun_map);
98
99 return -EBUSY;
100}
101
102static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
103 int lun_end)
104{
105 int i;
106
107 for (i = lun_begin; i <= lun_end; i++)
108 WARN_ON(!test_and_clear_bit(i, dev->lun_map));
109}
110
111static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
112{
113 struct nvm_dev *dev = tgt_dev->parent;
114 struct nvm_dev_map *dev_map = tgt_dev->map;
115 int i, j;
116
117 for (i = 0; i < dev_map->num_ch; i++) {
118 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
119 int *lun_offs = ch_map->lun_offs;
120 int ch = i + ch_map->ch_off;
121
122 if (clear) {
123 for (j = 0; j < ch_map->num_lun; j++) {
124 int lun = j + lun_offs[j];
125 int lunid = (ch * dev->geo.num_lun) + lun;
126
127 WARN_ON(!test_and_clear_bit(lunid,
128 dev->lun_map));
129 }
130 }
131
132 kfree(ch_map->lun_offs);
133 }
134
135 kfree(dev_map->chnls);
136 kfree(dev_map);
137
138 kfree(tgt_dev->luns);
139 kfree(tgt_dev);
140}
141
142static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
143 u16 lun_begin, u16 lun_end,
144 u16 op)
145{
146 struct nvm_tgt_dev *tgt_dev = NULL;
147 struct nvm_dev_map *dev_rmap = dev->rmap;
148 struct nvm_dev_map *dev_map;
149 struct ppa_addr *luns;
150 int num_lun = lun_end - lun_begin + 1;
151 int luns_left = num_lun;
152 int num_ch = num_lun / dev->geo.num_lun;
153 int num_ch_mod = num_lun % dev->geo.num_lun;
154 int bch = lun_begin / dev->geo.num_lun;
155 int blun = lun_begin % dev->geo.num_lun;
156 int lunid = 0;
157 int lun_balanced = 1;
158 int sec_per_lun, prev_num_lun;
159 int i, j;
160
161 num_ch = (num_ch_mod == 0) ? num_ch : num_ch + 1;
162
163 dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
164 if (!dev_map)
165 goto err_dev;
166
167 dev_map->chnls = kcalloc(num_ch, sizeof(struct nvm_ch_map), GFP_KERNEL);
168 if (!dev_map->chnls)
169 goto err_chnls;
170
171 luns = kcalloc(num_lun, sizeof(struct ppa_addr), GFP_KERNEL);
172 if (!luns)
173 goto err_luns;
174
175 prev_num_lun = (luns_left > dev->geo.num_lun) ?
176 dev->geo.num_lun : luns_left;
177 for (i = 0; i < num_ch; i++) {
178 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
179 int *lun_roffs = ch_rmap->lun_offs;
180 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
181 int *lun_offs;
182 int luns_in_chnl = (luns_left > dev->geo.num_lun) ?
183 dev->geo.num_lun : luns_left;
184
185 if (lun_balanced && prev_num_lun != luns_in_chnl)
186 lun_balanced = 0;
187
188 ch_map->ch_off = ch_rmap->ch_off = bch;
189 ch_map->num_lun = luns_in_chnl;
190
191 lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
192 if (!lun_offs)
193 goto err_ch;
194
195 for (j = 0; j < luns_in_chnl; j++) {
196 luns[lunid].ppa = 0;
197 luns[lunid].a.ch = i;
198 luns[lunid++].a.lun = j;
199
200 lun_offs[j] = blun;
201 lun_roffs[j + blun] = blun;
202 }
203
204 ch_map->lun_offs = lun_offs;
205
206 /* when starting a new channel, lun offset is reset */
207 blun = 0;
208 luns_left -= luns_in_chnl;
209 }
210
211 dev_map->num_ch = num_ch;
212
213 tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
214 if (!tgt_dev)
215 goto err_ch;
216
217 /* Inherit device geometry from parent */
218 memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
219
220 /* Target device only owns a portion of the physical device */
221 tgt_dev->geo.num_ch = num_ch;
222 tgt_dev->geo.num_lun = (lun_balanced) ? prev_num_lun : -1;
223 tgt_dev->geo.all_luns = num_lun;
224 tgt_dev->geo.all_chunks = num_lun * dev->geo.num_chk;
225
226 tgt_dev->geo.op = op;
227
228 sec_per_lun = dev->geo.clba * dev->geo.num_chk;
229 tgt_dev->geo.total_secs = num_lun * sec_per_lun;
230
231 tgt_dev->q = dev->q;
232 tgt_dev->map = dev_map;
233 tgt_dev->luns = luns;
234 tgt_dev->parent = dev;
235
236 return tgt_dev;
237err_ch:
238 while (--i >= 0)
239 kfree(dev_map->chnls[i].lun_offs);
240 kfree(luns);
241err_luns:
242 kfree(dev_map->chnls);
243err_chnls:
244 kfree(dev_map);
245err_dev:
246 return tgt_dev;
247}
248
249static const struct block_device_operations nvm_fops = {
250 .owner = THIS_MODULE,
251};
252
253static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
254{
255 struct nvm_tgt_type *tt;
256
257 list_for_each_entry(tt, &nvm_tgt_types, list)
258 if (!strcmp(name, tt->name))
259 return tt;
260
261 return NULL;
262}
263
264static struct nvm_tgt_type *nvm_find_target_type(const char *name)
265{
266 struct nvm_tgt_type *tt;
267
268 down_write(&nvm_tgtt_lock);
269 tt = __nvm_find_target_type(name);
270 up_write(&nvm_tgtt_lock);
271
272 return tt;
273}
274
275static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
276 int lun_end)
277{
278 if (lun_begin > lun_end || lun_end >= geo->all_luns) {
279 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
280 lun_begin, lun_end, geo->all_luns - 1);
281 return -EINVAL;
282 }
283
284 return 0;
285}
286
287static int __nvm_config_simple(struct nvm_dev *dev,
288 struct nvm_ioctl_create_simple *s)
289{
290 struct nvm_geo *geo = &dev->geo;
291
292 if (s->lun_begin == -1 && s->lun_end == -1) {
293 s->lun_begin = 0;
294 s->lun_end = geo->all_luns - 1;
295 }
296
297 return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
298}
299
300static int __nvm_config_extended(struct nvm_dev *dev,
301 struct nvm_ioctl_create_extended *e)
302{
303 if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
304 e->lun_begin = 0;
305 e->lun_end = dev->geo.all_luns - 1;
306 }
307
308 /* op not set falls into target's default */
309 if (e->op == 0xFFFF) {
310 e->op = NVM_TARGET_DEFAULT_OP;
311 } else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) {
312 pr_err("nvm: invalid over provisioning value\n");
313 return -EINVAL;
314 }
315
316 return nvm_config_check_luns(&dev->geo, e->lun_begin, e->lun_end);
317}
318
319static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
320{
321 struct nvm_ioctl_create_extended e;
322 struct request_queue *tqueue;
323 struct gendisk *tdisk;
324 struct nvm_tgt_type *tt;
325 struct nvm_target *t;
326 struct nvm_tgt_dev *tgt_dev;
327 void *targetdata;
328 int ret;
329
330 switch (create->conf.type) {
331 case NVM_CONFIG_TYPE_SIMPLE:
332 ret = __nvm_config_simple(dev, &create->conf.s);
333 if (ret)
334 return ret;
335
336 e.lun_begin = create->conf.s.lun_begin;
337 e.lun_end = create->conf.s.lun_end;
338 e.op = NVM_TARGET_DEFAULT_OP;
339 break;
340 case NVM_CONFIG_TYPE_EXTENDED:
341 ret = __nvm_config_extended(dev, &create->conf.e);
342 if (ret)
343 return ret;
344
345 e = create->conf.e;
346 break;
347 default:
348 pr_err("nvm: config type not valid\n");
349 return -EINVAL;
350 }
351
352 tt = nvm_find_target_type(create->tgttype);
353 if (!tt) {
354 pr_err("nvm: target type %s not found\n", create->tgttype);
355 return -EINVAL;
356 }
357
358 if (nvm_target_exists(create->tgtname)) {
359 pr_err("nvm: target name already exists (%s)\n",
360 create->tgtname);
361 return -EINVAL;
362 }
363
364 ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
365 if (ret)
366 return ret;
367
368 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
369 if (!t) {
370 ret = -ENOMEM;
371 goto err_reserve;
372 }
373
374 tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
375 if (!tgt_dev) {
376 pr_err("nvm: could not create target device\n");
377 ret = -ENOMEM;
378 goto err_t;
379 }
380
381 tdisk = alloc_disk(0);
382 if (!tdisk) {
383 ret = -ENOMEM;
384 goto err_dev;
385 }
386
387 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node, NULL);
388 if (!tqueue) {
389 ret = -ENOMEM;
390 goto err_disk;
391 }
392 blk_queue_make_request(tqueue, tt->make_rq);
393
394 strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
395 tdisk->flags = GENHD_FL_EXT_DEVT;
396 tdisk->major = 0;
397 tdisk->first_minor = 0;
398 tdisk->fops = &nvm_fops;
399 tdisk->queue = tqueue;
400
401 targetdata = tt->init(tgt_dev, tdisk, create->flags);
402 if (IS_ERR(targetdata)) {
403 ret = PTR_ERR(targetdata);
404 goto err_init;
405 }
406
407 tdisk->private_data = targetdata;
408 tqueue->queuedata = targetdata;
409
410 blk_queue_max_hw_sectors(tqueue,
411 (dev->geo.csecs >> 9) * NVM_MAX_VLBA);
412
413 set_capacity(tdisk, tt->capacity(targetdata));
414 add_disk(tdisk);
415
416 if (tt->sysfs_init && tt->sysfs_init(tdisk)) {
417 ret = -ENOMEM;
418 goto err_sysfs;
419 }
420
421 t->type = tt;
422 t->disk = tdisk;
423 t->dev = tgt_dev;
424
425 mutex_lock(&dev->mlock);
426 list_add_tail(&t->list, &dev->targets);
427 mutex_unlock(&dev->mlock);
428
429 __module_get(tt->owner);
430
431 return 0;
432err_sysfs:
433 if (tt->exit)
434 tt->exit(targetdata);
435err_init:
436 blk_cleanup_queue(tqueue);
437 tdisk->queue = NULL;
438err_disk:
439 put_disk(tdisk);
440err_dev:
441 nvm_remove_tgt_dev(tgt_dev, 0);
442err_t:
443 kfree(t);
444err_reserve:
445 nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
446 return ret;
447}
448
449static void __nvm_remove_target(struct nvm_target *t)
450{
451 struct nvm_tgt_type *tt = t->type;
452 struct gendisk *tdisk = t->disk;
453 struct request_queue *q = tdisk->queue;
454
455 del_gendisk(tdisk);
456 blk_cleanup_queue(q);
457
458 if (tt->sysfs_exit)
459 tt->sysfs_exit(tdisk);
460
461 if (tt->exit)
462 tt->exit(tdisk->private_data);
463
464 nvm_remove_tgt_dev(t->dev, 1);
465 put_disk(tdisk);
466 module_put(t->type->owner);
467
468 list_del(&t->list);
469 kfree(t);
470}
471
472/**
473 * nvm_remove_tgt - Removes a target from the media manager
474 * @dev: device
475 * @remove: ioctl structure with target name to remove.
476 *
477 * Returns:
478 * 0: on success
479 * 1: on not found
480 * <0: on error
481 */
482static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
483{
484 struct nvm_target *t;
485
486 mutex_lock(&dev->mlock);
487 t = nvm_find_target(dev, remove->tgtname);
488 if (!t) {
489 mutex_unlock(&dev->mlock);
490 return 1;
491 }
492 __nvm_remove_target(t);
493 mutex_unlock(&dev->mlock);
494
495 return 0;
496}
497
498static int nvm_register_map(struct nvm_dev *dev)
499{
500 struct nvm_dev_map *rmap;
501 int i, j;
502
503 rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
504 if (!rmap)
505 goto err_rmap;
506
507 rmap->chnls = kcalloc(dev->geo.num_ch, sizeof(struct nvm_ch_map),
508 GFP_KERNEL);
509 if (!rmap->chnls)
510 goto err_chnls;
511
512 for (i = 0; i < dev->geo.num_ch; i++) {
513 struct nvm_ch_map *ch_rmap;
514 int *lun_roffs;
515 int luns_in_chnl = dev->geo.num_lun;
516
517 ch_rmap = &rmap->chnls[i];
518
519 ch_rmap->ch_off = -1;
520 ch_rmap->num_lun = luns_in_chnl;
521
522 lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
523 if (!lun_roffs)
524 goto err_ch;
525
526 for (j = 0; j < luns_in_chnl; j++)
527 lun_roffs[j] = -1;
528
529 ch_rmap->lun_offs = lun_roffs;
530 }
531
532 dev->rmap = rmap;
533
534 return 0;
535err_ch:
536 while (--i >= 0)
537 kfree(rmap->chnls[i].lun_offs);
538err_chnls:
539 kfree(rmap);
540err_rmap:
541 return -ENOMEM;
542}
543
544static void nvm_unregister_map(struct nvm_dev *dev)
545{
546 struct nvm_dev_map *rmap = dev->rmap;
547 int i;
548
549 for (i = 0; i < dev->geo.num_ch; i++)
550 kfree(rmap->chnls[i].lun_offs);
551
552 kfree(rmap->chnls);
553 kfree(rmap);
554}
555
556static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
557{
558 struct nvm_dev_map *dev_map = tgt_dev->map;
559 struct nvm_ch_map *ch_map = &dev_map->chnls[p->a.ch];
560 int lun_off = ch_map->lun_offs[p->a.lun];
561
562 p->a.ch += ch_map->ch_off;
563 p->a.lun += lun_off;
564}
565
566static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
567{
568 struct nvm_dev *dev = tgt_dev->parent;
569 struct nvm_dev_map *dev_rmap = dev->rmap;
570 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->a.ch];
571 int lun_roff = ch_rmap->lun_offs[p->a.lun];
572
573 p->a.ch -= ch_rmap->ch_off;
574 p->a.lun -= lun_roff;
575}
576
577static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
578 struct ppa_addr *ppa_list, int nr_ppas)
579{
580 int i;
581
582 for (i = 0; i < nr_ppas; i++) {
583 nvm_map_to_dev(tgt_dev, &ppa_list[i]);
584 ppa_list[i] = generic_to_dev_addr(tgt_dev->parent, ppa_list[i]);
585 }
586}
587
588static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
589 struct ppa_addr *ppa_list, int nr_ppas)
590{
591 int i;
592
593 for (i = 0; i < nr_ppas; i++) {
594 ppa_list[i] = dev_to_generic_addr(tgt_dev->parent, ppa_list[i]);
595 nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
596 }
597}
598
599static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
600{
601 if (rqd->nr_ppas == 1) {
602 nvm_ppa_tgt_to_dev(tgt_dev, &rqd->ppa_addr, 1);
603 return;
604 }
605
606 nvm_ppa_tgt_to_dev(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
607}
608
609static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
610{
611 if (rqd->nr_ppas == 1) {
612 nvm_ppa_dev_to_tgt(tgt_dev, &rqd->ppa_addr, 1);
613 return;
614 }
615
616 nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
617}
618
619int nvm_register_tgt_type(struct nvm_tgt_type *tt)
620{
621 int ret = 0;
622
623 down_write(&nvm_tgtt_lock);
624 if (__nvm_find_target_type(tt->name))
625 ret = -EEXIST;
626 else
627 list_add(&tt->list, &nvm_tgt_types);
628 up_write(&nvm_tgtt_lock);
629
630 return ret;
631}
632EXPORT_SYMBOL(nvm_register_tgt_type);
633
634void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
635{
636 if (!tt)
637 return;
638
639 down_write(&nvm_tgtt_lock);
640 list_del(&tt->list);
641 up_write(&nvm_tgtt_lock);
642}
643EXPORT_SYMBOL(nvm_unregister_tgt_type);
644
645void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
646 dma_addr_t *dma_handler)
647{
648 return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
649 dma_handler);
650}
651EXPORT_SYMBOL(nvm_dev_dma_alloc);
652
653void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
654{
655 dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
656}
657EXPORT_SYMBOL(nvm_dev_dma_free);
658
659static struct nvm_dev *nvm_find_nvm_dev(const char *name)
660{
661 struct nvm_dev *dev;
662
663 list_for_each_entry(dev, &nvm_devices, devices)
664 if (!strcmp(name, dev->name))
665 return dev;
666
667 return NULL;
668}
669
670static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
671 const struct ppa_addr *ppas, int nr_ppas)
672{
673 struct nvm_dev *dev = tgt_dev->parent;
674 struct nvm_geo *geo = &tgt_dev->geo;
675 int i, plane_cnt, pl_idx;
676 struct ppa_addr ppa;
677
678 if (geo->pln_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
679 rqd->nr_ppas = nr_ppas;
680 rqd->ppa_addr = ppas[0];
681
682 return 0;
683 }
684
685 rqd->nr_ppas = nr_ppas;
686 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
687 if (!rqd->ppa_list) {
688 pr_err("nvm: failed to allocate dma memory\n");
689 return -ENOMEM;
690 }
691
692 plane_cnt = geo->pln_mode;
693 rqd->nr_ppas *= plane_cnt;
694
695 for (i = 0; i < nr_ppas; i++) {
696 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
697 ppa = ppas[i];
698 ppa.g.pl = pl_idx;
699 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
700 }
701 }
702
703 return 0;
704}
705
706static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
707 struct nvm_rq *rqd)
708{
709 if (!rqd->ppa_list)
710 return;
711
712 nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
713}
714
715int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct nvm_chk_meta *meta,
716 struct ppa_addr ppa, int nchks)
717{
718 struct nvm_dev *dev = tgt_dev->parent;
719
720 nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
721
722 return dev->ops->get_chk_meta(tgt_dev->parent, meta,
723 (sector_t)ppa.ppa, nchks);
724}
725EXPORT_SYMBOL(nvm_get_chunk_meta);
726
727int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
728 int nr_ppas, int type)
729{
730 struct nvm_dev *dev = tgt_dev->parent;
731 struct nvm_rq rqd;
732 int ret;
733
734 if (nr_ppas > NVM_MAX_VLBA) {
735 pr_err("nvm: unable to update all blocks atomically\n");
736 return -EINVAL;
737 }
738
739 memset(&rqd, 0, sizeof(struct nvm_rq));
740
741 nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
742 nvm_rq_tgt_to_dev(tgt_dev, &rqd);
743
744 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
745 nvm_free_rqd_ppalist(tgt_dev, &rqd);
746 if (ret) {
747 pr_err("nvm: failed bb mark\n");
748 return -EINVAL;
749 }
750
751 return 0;
752}
753EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
754
755int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
756{
757 struct nvm_dev *dev = tgt_dev->parent;
758 int ret;
759
760 if (!dev->ops->submit_io)
761 return -ENODEV;
762
763 nvm_rq_tgt_to_dev(tgt_dev, rqd);
764
765 rqd->dev = tgt_dev;
766
767 /* In case of error, fail with right address format */
768 ret = dev->ops->submit_io(dev, rqd);
769 if (ret)
770 nvm_rq_dev_to_tgt(tgt_dev, rqd);
771 return ret;
772}
773EXPORT_SYMBOL(nvm_submit_io);
774
775int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
776{
777 struct nvm_dev *dev = tgt_dev->parent;
778 int ret;
779
780 if (!dev->ops->submit_io_sync)
781 return -ENODEV;
782
783 nvm_rq_tgt_to_dev(tgt_dev, rqd);
784
785 rqd->dev = tgt_dev;
786
787 /* In case of error, fail with right address format */
788 ret = dev->ops->submit_io_sync(dev, rqd);
789 nvm_rq_dev_to_tgt(tgt_dev, rqd);
790
791 return ret;
792}
793EXPORT_SYMBOL(nvm_submit_io_sync);
794
795void nvm_end_io(struct nvm_rq *rqd)
796{
797 struct nvm_tgt_dev *tgt_dev = rqd->dev;
798
799 /* Convert address space */
800 if (tgt_dev)
801 nvm_rq_dev_to_tgt(tgt_dev, rqd);
802
803 if (rqd->end_io)
804 rqd->end_io(rqd);
805}
806EXPORT_SYMBOL(nvm_end_io);
807
808/*
809 * folds a bad block list from its plane representation to its virtual
810 * block representation. The fold is done in place and reduced size is
811 * returned.
812 *
813 * If any of the planes status are bad or grown bad block, the virtual block
814 * is marked bad. If not bad, the first plane state acts as the block state.
815 */
816int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
817{
818 struct nvm_geo *geo = &dev->geo;
819 int blk, offset, pl, blktype;
820
821 if (nr_blks != geo->num_chk * geo->pln_mode)
822 return -EINVAL;
823
824 for (blk = 0; blk < geo->num_chk; blk++) {
825 offset = blk * geo->pln_mode;
826 blktype = blks[offset];
827
828 /* Bad blocks on any planes take precedence over other types */
829 for (pl = 0; pl < geo->pln_mode; pl++) {
830 if (blks[offset + pl] &
831 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
832 blktype = blks[offset + pl];
833 break;
834 }
835 }
836
837 blks[blk] = blktype;
838 }
839
840 return geo->num_chk;
841}
842EXPORT_SYMBOL(nvm_bb_tbl_fold);
843
844int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
845 u8 *blks)
846{
847 struct nvm_dev *dev = tgt_dev->parent;
848
849 nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
850
851 return dev->ops->get_bb_tbl(dev, ppa, blks);
852}
853EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
854
855static int nvm_core_init(struct nvm_dev *dev)
856{
857 struct nvm_geo *geo = &dev->geo;
858 int ret;
859
860 dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
861 sizeof(unsigned long), GFP_KERNEL);
862 if (!dev->lun_map)
863 return -ENOMEM;
864
865 INIT_LIST_HEAD(&dev->area_list);
866 INIT_LIST_HEAD(&dev->targets);
867 mutex_init(&dev->mlock);
868 spin_lock_init(&dev->lock);
869
870 ret = nvm_register_map(dev);
871 if (ret)
872 goto err_fmtype;
873
874 return 0;
875err_fmtype:
876 kfree(dev->lun_map);
877 return ret;
878}
879
880static void nvm_free(struct nvm_dev *dev)
881{
882 if (!dev)
883 return;
884
885 if (dev->dma_pool)
886 dev->ops->destroy_dma_pool(dev->dma_pool);
887
888 nvm_unregister_map(dev);
889 kfree(dev->lun_map);
890 kfree(dev);
891}
892
893static int nvm_init(struct nvm_dev *dev)
894{
895 struct nvm_geo *geo = &dev->geo;
896 int ret = -EINVAL;
897
898 if (dev->ops->identity(dev)) {
899 pr_err("nvm: device could not be identified\n");
900 goto err;
901 }
902
903 pr_debug("nvm: ver:%u.%u nvm_vendor:%x\n",
904 geo->major_ver_id, geo->minor_ver_id,
905 geo->vmnt);
906
907 ret = nvm_core_init(dev);
908 if (ret) {
909 pr_err("nvm: could not initialize core structures.\n");
910 goto err;
911 }
912
913 pr_info("nvm: registered %s [%u/%u/%u/%u/%u]\n",
914 dev->name, dev->geo.ws_min, dev->geo.ws_opt,
915 dev->geo.num_chk, dev->geo.all_luns,
916 dev->geo.num_ch);
917 return 0;
918err:
919 pr_err("nvm: failed to initialize nvm\n");
920 return ret;
921}
922
923struct nvm_dev *nvm_alloc_dev(int node)
924{
925 return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
926}
927EXPORT_SYMBOL(nvm_alloc_dev);
928
929int nvm_register(struct nvm_dev *dev)
930{
931 int ret;
932
933 if (!dev->q || !dev->ops)
934 return -EINVAL;
935
936 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
937 if (!dev->dma_pool) {
938 pr_err("nvm: could not create dma pool\n");
939 return -ENOMEM;
940 }
941
942 ret = nvm_init(dev);
943 if (ret)
944 goto err_init;
945
946 /* register device with a supported media manager */
947 down_write(&nvm_lock);
948 list_add(&dev->devices, &nvm_devices);
949 up_write(&nvm_lock);
950
951 return 0;
952err_init:
953 dev->ops->destroy_dma_pool(dev->dma_pool);
954 return ret;
955}
956EXPORT_SYMBOL(nvm_register);
957
958void nvm_unregister(struct nvm_dev *dev)
959{
960 struct nvm_target *t, *tmp;
961
962 mutex_lock(&dev->mlock);
963 list_for_each_entry_safe(t, tmp, &dev->targets, list) {
964 if (t->dev->parent != dev)
965 continue;
966 __nvm_remove_target(t);
967 }
968 mutex_unlock(&dev->mlock);
969
970 down_write(&nvm_lock);
971 list_del(&dev->devices);
972 up_write(&nvm_lock);
973
974 nvm_free(dev);
975}
976EXPORT_SYMBOL(nvm_unregister);
977
978static int __nvm_configure_create(struct nvm_ioctl_create *create)
979{
980 struct nvm_dev *dev;
981
982 down_write(&nvm_lock);
983 dev = nvm_find_nvm_dev(create->dev);
984 up_write(&nvm_lock);
985
986 if (!dev) {
987 pr_err("nvm: device not found\n");
988 return -EINVAL;
989 }
990
991 return nvm_create_tgt(dev, create);
992}
993
994static long nvm_ioctl_info(struct file *file, void __user *arg)
995{
996 struct nvm_ioctl_info *info;
997 struct nvm_tgt_type *tt;
998 int tgt_iter = 0;
999
1000 info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
1001 if (IS_ERR(info))
1002 return -EFAULT;
1003
1004 info->version[0] = NVM_VERSION_MAJOR;
1005 info->version[1] = NVM_VERSION_MINOR;
1006 info->version[2] = NVM_VERSION_PATCH;
1007
1008 down_write(&nvm_tgtt_lock);
1009 list_for_each_entry(tt, &nvm_tgt_types, list) {
1010 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
1011
1012 tgt->version[0] = tt->version[0];
1013 tgt->version[1] = tt->version[1];
1014 tgt->version[2] = tt->version[2];
1015 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
1016
1017 tgt_iter++;
1018 }
1019
1020 info->tgtsize = tgt_iter;
1021 up_write(&nvm_tgtt_lock);
1022
1023 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
1024 kfree(info);
1025 return -EFAULT;
1026 }
1027
1028 kfree(info);
1029 return 0;
1030}
1031
1032static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
1033{
1034 struct nvm_ioctl_get_devices *devices;
1035 struct nvm_dev *dev;
1036 int i = 0;
1037
1038 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
1039 if (!devices)
1040 return -ENOMEM;
1041
1042 down_write(&nvm_lock);
1043 list_for_each_entry(dev, &nvm_devices, devices) {
1044 struct nvm_ioctl_device_info *info = &devices->info[i];
1045
1046 strlcpy(info->devname, dev->name, sizeof(info->devname));
1047
1048 /* kept for compatibility */
1049 info->bmversion[0] = 1;
1050 info->bmversion[1] = 0;
1051 info->bmversion[2] = 0;
1052 strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
1053 i++;
1054
1055 if (i > 31) {
1056 pr_err("nvm: max 31 devices can be reported.\n");
1057 break;
1058 }
1059 }
1060 up_write(&nvm_lock);
1061
1062 devices->nr_devices = i;
1063
1064 if (copy_to_user(arg, devices,
1065 sizeof(struct nvm_ioctl_get_devices))) {
1066 kfree(devices);
1067 return -EFAULT;
1068 }
1069
1070 kfree(devices);
1071 return 0;
1072}
1073
1074static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
1075{
1076 struct nvm_ioctl_create create;
1077
1078 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
1079 return -EFAULT;
1080
1081 if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
1082 create.conf.e.rsv != 0) {
1083 pr_err("nvm: reserved config field in use\n");
1084 return -EINVAL;
1085 }
1086
1087 create.dev[DISK_NAME_LEN - 1] = '\0';
1088 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1089 create.tgtname[DISK_NAME_LEN - 1] = '\0';
1090
1091 if (create.flags != 0) {
1092 __u32 flags = create.flags;
1093
1094 /* Check for valid flags */
1095 if (flags & NVM_TARGET_FACTORY)
1096 flags &= ~NVM_TARGET_FACTORY;
1097
1098 if (flags) {
1099 pr_err("nvm: flag not supported\n");
1100 return -EINVAL;
1101 }
1102 }
1103
1104 return __nvm_configure_create(&create);
1105}
1106
1107static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1108{
1109 struct nvm_ioctl_remove remove;
1110 struct nvm_dev *dev;
1111 int ret = 0;
1112
1113 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1114 return -EFAULT;
1115
1116 remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1117
1118 if (remove.flags != 0) {
1119 pr_err("nvm: no flags supported\n");
1120 return -EINVAL;
1121 }
1122
1123 list_for_each_entry(dev, &nvm_devices, devices) {
1124 ret = nvm_remove_tgt(dev, &remove);
1125 if (!ret)
1126 break;
1127 }
1128
1129 return ret;
1130}
1131
1132/* kept for compatibility reasons */
1133static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1134{
1135 struct nvm_ioctl_dev_init init;
1136
1137 if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1138 return -EFAULT;
1139
1140 if (init.flags != 0) {
1141 pr_err("nvm: no flags supported\n");
1142 return -EINVAL;
1143 }
1144
1145 return 0;
1146}
1147
1148/* Kept for compatibility reasons */
1149static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1150{
1151 struct nvm_ioctl_dev_factory fact;
1152
1153 if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1154 return -EFAULT;
1155
1156 fact.dev[DISK_NAME_LEN - 1] = '\0';
1157
1158 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1159 return -EINVAL;
1160
1161 return 0;
1162}
1163
1164static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1165{
1166 void __user *argp = (void __user *)arg;
1167
1168 if (!capable(CAP_SYS_ADMIN))
1169 return -EPERM;
1170
1171 switch (cmd) {
1172 case NVM_INFO:
1173 return nvm_ioctl_info(file, argp);
1174 case NVM_GET_DEVICES:
1175 return nvm_ioctl_get_devices(file, argp);
1176 case NVM_DEV_CREATE:
1177 return nvm_ioctl_dev_create(file, argp);
1178 case NVM_DEV_REMOVE:
1179 return nvm_ioctl_dev_remove(file, argp);
1180 case NVM_DEV_INIT:
1181 return nvm_ioctl_dev_init(file, argp);
1182 case NVM_DEV_FACTORY:
1183 return nvm_ioctl_dev_factory(file, argp);
1184 }
1185 return 0;
1186}
1187
1188static const struct file_operations _ctl_fops = {
1189 .open = nonseekable_open,
1190 .unlocked_ioctl = nvm_ctl_ioctl,
1191 .owner = THIS_MODULE,
1192 .llseek = noop_llseek,
1193};
1194
1195static struct miscdevice _nvm_misc = {
1196 .minor = MISC_DYNAMIC_MINOR,
1197 .name = "lightnvm",
1198 .nodename = "lightnvm/control",
1199 .fops = &_ctl_fops,
1200};
1201builtin_misc_device(_nvm_misc);
1/*
2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <m@bjorling.me>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17 * USA.
18 *
19 */
20
21#include <linux/blkdev.h>
22#include <linux/blk-mq.h>
23#include <linux/list.h>
24#include <linux/types.h>
25#include <linux/sem.h>
26#include <linux/bitmap.h>
27#include <linux/module.h>
28#include <linux/miscdevice.h>
29#include <linux/lightnvm.h>
30#include <linux/sched/sysctl.h>
31#include <uapi/linux/lightnvm.h>
32
33static LIST_HEAD(nvm_targets);
34static LIST_HEAD(nvm_mgrs);
35static LIST_HEAD(nvm_devices);
36static DECLARE_RWSEM(nvm_lock);
37
38static struct nvm_tgt_type *nvm_find_target_type(const char *name)
39{
40 struct nvm_tgt_type *tt;
41
42 list_for_each_entry(tt, &nvm_targets, list)
43 if (!strcmp(name, tt->name))
44 return tt;
45
46 return NULL;
47}
48
49int nvm_register_target(struct nvm_tgt_type *tt)
50{
51 int ret = 0;
52
53 down_write(&nvm_lock);
54 if (nvm_find_target_type(tt->name))
55 ret = -EEXIST;
56 else
57 list_add(&tt->list, &nvm_targets);
58 up_write(&nvm_lock);
59
60 return ret;
61}
62EXPORT_SYMBOL(nvm_register_target);
63
64void nvm_unregister_target(struct nvm_tgt_type *tt)
65{
66 if (!tt)
67 return;
68
69 down_write(&nvm_lock);
70 list_del(&tt->list);
71 up_write(&nvm_lock);
72}
73EXPORT_SYMBOL(nvm_unregister_target);
74
75void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
76 dma_addr_t *dma_handler)
77{
78 return dev->ops->dev_dma_alloc(dev, dev->ppalist_pool, mem_flags,
79 dma_handler);
80}
81EXPORT_SYMBOL(nvm_dev_dma_alloc);
82
83void nvm_dev_dma_free(struct nvm_dev *dev, void *ppa_list,
84 dma_addr_t dma_handler)
85{
86 dev->ops->dev_dma_free(dev->ppalist_pool, ppa_list, dma_handler);
87}
88EXPORT_SYMBOL(nvm_dev_dma_free);
89
90static struct nvmm_type *nvm_find_mgr_type(const char *name)
91{
92 struct nvmm_type *mt;
93
94 list_for_each_entry(mt, &nvm_mgrs, list)
95 if (!strcmp(name, mt->name))
96 return mt;
97
98 return NULL;
99}
100
101struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
102{
103 struct nvmm_type *mt;
104 int ret;
105
106 lockdep_assert_held(&nvm_lock);
107
108 list_for_each_entry(mt, &nvm_mgrs, list) {
109 if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
110 continue;
111
112 ret = mt->register_mgr(dev);
113 if (ret < 0) {
114 pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
115 ret, dev->name);
116 return NULL; /* initialization failed */
117 } else if (ret > 0)
118 return mt;
119 }
120
121 return NULL;
122}
123
124int nvm_register_mgr(struct nvmm_type *mt)
125{
126 struct nvm_dev *dev;
127 int ret = 0;
128
129 down_write(&nvm_lock);
130 if (nvm_find_mgr_type(mt->name)) {
131 ret = -EEXIST;
132 goto finish;
133 } else {
134 list_add(&mt->list, &nvm_mgrs);
135 }
136
137 /* try to register media mgr if any device have none configured */
138 list_for_each_entry(dev, &nvm_devices, devices) {
139 if (dev->mt)
140 continue;
141
142 dev->mt = nvm_init_mgr(dev);
143 }
144finish:
145 up_write(&nvm_lock);
146
147 return ret;
148}
149EXPORT_SYMBOL(nvm_register_mgr);
150
151void nvm_unregister_mgr(struct nvmm_type *mt)
152{
153 if (!mt)
154 return;
155
156 down_write(&nvm_lock);
157 list_del(&mt->list);
158 up_write(&nvm_lock);
159}
160EXPORT_SYMBOL(nvm_unregister_mgr);
161
162static struct nvm_dev *nvm_find_nvm_dev(const char *name)
163{
164 struct nvm_dev *dev;
165
166 list_for_each_entry(dev, &nvm_devices, devices)
167 if (!strcmp(name, dev->name))
168 return dev;
169
170 return NULL;
171}
172
173struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *dev, struct nvm_lun *lun,
174 unsigned long flags)
175{
176 return dev->mt->get_blk_unlocked(dev, lun, flags);
177}
178EXPORT_SYMBOL(nvm_get_blk_unlocked);
179
180/* Assumes that all valid pages have already been moved on release to bm */
181void nvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
182{
183 return dev->mt->put_blk_unlocked(dev, blk);
184}
185EXPORT_SYMBOL(nvm_put_blk_unlocked);
186
187struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
188 unsigned long flags)
189{
190 return dev->mt->get_blk(dev, lun, flags);
191}
192EXPORT_SYMBOL(nvm_get_blk);
193
194/* Assumes that all valid pages have already been moved on release to bm */
195void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
196{
197 return dev->mt->put_blk(dev, blk);
198}
199EXPORT_SYMBOL(nvm_put_blk);
200
201int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
202{
203 return dev->mt->submit_io(dev, rqd);
204}
205EXPORT_SYMBOL(nvm_submit_io);
206
207int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
208{
209 return dev->mt->erase_blk(dev, blk, 0);
210}
211EXPORT_SYMBOL(nvm_erase_blk);
212
213void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
214{
215 int i;
216
217 if (rqd->nr_pages > 1) {
218 for (i = 0; i < rqd->nr_pages; i++)
219 rqd->ppa_list[i] = dev_to_generic_addr(dev,
220 rqd->ppa_list[i]);
221 } else {
222 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
223 }
224}
225EXPORT_SYMBOL(nvm_addr_to_generic_mode);
226
227void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
228{
229 int i;
230
231 if (rqd->nr_pages > 1) {
232 for (i = 0; i < rqd->nr_pages; i++)
233 rqd->ppa_list[i] = generic_to_dev_addr(dev,
234 rqd->ppa_list[i]);
235 } else {
236 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
237 }
238}
239EXPORT_SYMBOL(nvm_generic_to_addr_mode);
240
241int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
242 struct ppa_addr *ppas, int nr_ppas)
243{
244 int i, plane_cnt, pl_idx;
245
246 if (dev->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
247 rqd->nr_pages = 1;
248 rqd->ppa_addr = ppas[0];
249
250 return 0;
251 }
252
253 plane_cnt = dev->plane_mode;
254 rqd->nr_pages = plane_cnt * nr_ppas;
255
256 if (dev->ops->max_phys_sect < rqd->nr_pages)
257 return -EINVAL;
258
259 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
260 if (!rqd->ppa_list) {
261 pr_err("nvm: failed to allocate dma memory\n");
262 return -ENOMEM;
263 }
264
265 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
266 for (i = 0; i < nr_ppas; i++) {
267 ppas[i].g.pl = pl_idx;
268 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
269 }
270 }
271
272 return 0;
273}
274EXPORT_SYMBOL(nvm_set_rqd_ppalist);
275
276void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
277{
278 if (!rqd->ppa_list)
279 return;
280
281 nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
282}
283EXPORT_SYMBOL(nvm_free_rqd_ppalist);
284
285int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
286{
287 struct nvm_rq rqd;
288 int ret;
289
290 if (!dev->ops->erase_block)
291 return 0;
292
293 memset(&rqd, 0, sizeof(struct nvm_rq));
294
295 ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas);
296 if (ret)
297 return ret;
298
299 nvm_generic_to_addr_mode(dev, &rqd);
300
301 ret = dev->ops->erase_block(dev, &rqd);
302
303 nvm_free_rqd_ppalist(dev, &rqd);
304
305 return ret;
306}
307EXPORT_SYMBOL(nvm_erase_ppa);
308
309void nvm_end_io(struct nvm_rq *rqd, int error)
310{
311 rqd->error = error;
312 rqd->end_io(rqd);
313}
314EXPORT_SYMBOL(nvm_end_io);
315
316static void nvm_end_io_sync(struct nvm_rq *rqd)
317{
318 struct completion *waiting = rqd->wait;
319
320 rqd->wait = NULL;
321
322 complete(waiting);
323}
324
325int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
326 int opcode, int flags, void *buf, int len)
327{
328 DECLARE_COMPLETION_ONSTACK(wait);
329 struct nvm_rq rqd;
330 struct bio *bio;
331 int ret;
332 unsigned long hang_check;
333
334 bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
335 if (IS_ERR_OR_NULL(bio))
336 return -ENOMEM;
337
338 memset(&rqd, 0, sizeof(struct nvm_rq));
339 ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas);
340 if (ret) {
341 bio_put(bio);
342 return ret;
343 }
344
345 rqd.opcode = opcode;
346 rqd.bio = bio;
347 rqd.wait = &wait;
348 rqd.dev = dev;
349 rqd.end_io = nvm_end_io_sync;
350 rqd.flags = flags;
351 nvm_generic_to_addr_mode(dev, &rqd);
352
353 ret = dev->ops->submit_io(dev, &rqd);
354
355 /* Prevent hang_check timer from firing at us during very long I/O */
356 hang_check = sysctl_hung_task_timeout_secs;
357 if (hang_check)
358 while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
359 else
360 wait_for_completion_io(&wait);
361
362 nvm_free_rqd_ppalist(dev, &rqd);
363
364 return rqd.error;
365}
366EXPORT_SYMBOL(nvm_submit_ppa);
367
368static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
369{
370 int i;
371
372 dev->lps_per_blk = dev->pgs_per_blk;
373 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
374 if (!dev->lptbl)
375 return -ENOMEM;
376
377 /* Just a linear array */
378 for (i = 0; i < dev->lps_per_blk; i++)
379 dev->lptbl[i] = i;
380
381 return 0;
382}
383
384static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
385{
386 int i, p;
387 struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
388
389 if (!mlc->num_pairs)
390 return 0;
391
392 dev->lps_per_blk = mlc->num_pairs;
393 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
394 if (!dev->lptbl)
395 return -ENOMEM;
396
397 /* The lower page table encoding consists of a list of bytes, where each
398 * has a lower and an upper half. The first half byte maintains the
399 * increment value and every value after is an offset added to the
400 * previous incrementation value */
401 dev->lptbl[0] = mlc->pairs[0] & 0xF;
402 for (i = 1; i < dev->lps_per_blk; i++) {
403 p = mlc->pairs[i >> 1];
404 if (i & 0x1) /* upper */
405 dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
406 else /* lower */
407 dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
408 }
409
410 return 0;
411}
412
413static int nvm_core_init(struct nvm_dev *dev)
414{
415 struct nvm_id *id = &dev->identity;
416 struct nvm_id_group *grp = &id->groups[0];
417
418 /* device values */
419 dev->nr_chnls = grp->num_ch;
420 dev->luns_per_chnl = grp->num_lun;
421 dev->pgs_per_blk = grp->num_pg;
422 dev->blks_per_lun = grp->num_blk;
423 dev->nr_planes = grp->num_pln;
424 dev->sec_size = grp->csecs;
425 dev->oob_size = grp->sos;
426 dev->sec_per_pg = grp->fpg_sz / grp->csecs;
427 dev->mccap = grp->mccap;
428 memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
429
430 dev->plane_mode = NVM_PLANE_SINGLE;
431 dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
432
433 if (grp->mtype != 0) {
434 pr_err("nvm: memory type not supported\n");
435 return -EINVAL;
436 }
437
438 switch (grp->fmtype) {
439 case NVM_ID_FMTYPE_SLC:
440 if (nvm_init_slc_tbl(dev, grp))
441 return -ENOMEM;
442 break;
443 case NVM_ID_FMTYPE_MLC:
444 if (nvm_init_mlc_tbl(dev, grp))
445 return -ENOMEM;
446 break;
447 default:
448 pr_err("nvm: flash type not supported\n");
449 return -EINVAL;
450 }
451
452 if (!dev->lps_per_blk)
453 pr_info("nvm: lower page programming table missing\n");
454
455 if (grp->mpos & 0x020202)
456 dev->plane_mode = NVM_PLANE_DOUBLE;
457 if (grp->mpos & 0x040404)
458 dev->plane_mode = NVM_PLANE_QUAD;
459
460 /* calculated values */
461 dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
462 dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
463 dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
464 dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
465
466 dev->total_secs = dev->nr_luns * dev->sec_per_lun;
467 dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
468 sizeof(unsigned long), GFP_KERNEL);
469 if (!dev->lun_map)
470 return -ENOMEM;
471 INIT_LIST_HEAD(&dev->online_targets);
472 mutex_init(&dev->mlock);
473 spin_lock_init(&dev->lock);
474
475 return 0;
476}
477
478static void nvm_free(struct nvm_dev *dev)
479{
480 if (!dev)
481 return;
482
483 if (dev->mt)
484 dev->mt->unregister_mgr(dev);
485
486 kfree(dev->lptbl);
487}
488
489static int nvm_init(struct nvm_dev *dev)
490{
491 int ret = -EINVAL;
492
493 if (!dev->q || !dev->ops)
494 return ret;
495
496 if (dev->ops->identity(dev, &dev->identity)) {
497 pr_err("nvm: device could not be identified\n");
498 goto err;
499 }
500
501 pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
502 dev->identity.ver_id, dev->identity.vmnt,
503 dev->identity.cgrps);
504
505 if (dev->identity.ver_id != 1) {
506 pr_err("nvm: device not supported by kernel.");
507 goto err;
508 }
509
510 if (dev->identity.cgrps != 1) {
511 pr_err("nvm: only one group configuration supported.");
512 goto err;
513 }
514
515 ret = nvm_core_init(dev);
516 if (ret) {
517 pr_err("nvm: could not initialize core structures.\n");
518 goto err;
519 }
520
521 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
522 dev->name, dev->sec_per_pg, dev->nr_planes,
523 dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
524 dev->nr_chnls);
525 return 0;
526err:
527 pr_err("nvm: failed to initialize nvm\n");
528 return ret;
529}
530
531static void nvm_exit(struct nvm_dev *dev)
532{
533 if (dev->ppalist_pool)
534 dev->ops->destroy_dma_pool(dev->ppalist_pool);
535 nvm_free(dev);
536
537 pr_info("nvm: successfully unloaded\n");
538}
539
540int nvm_register(struct request_queue *q, char *disk_name,
541 struct nvm_dev_ops *ops)
542{
543 struct nvm_dev *dev;
544 int ret;
545
546 if (!ops->identity)
547 return -EINVAL;
548
549 dev = kzalloc(sizeof(struct nvm_dev), GFP_KERNEL);
550 if (!dev)
551 return -ENOMEM;
552
553 dev->q = q;
554 dev->ops = ops;
555 strncpy(dev->name, disk_name, DISK_NAME_LEN);
556
557 ret = nvm_init(dev);
558 if (ret)
559 goto err_init;
560
561 if (dev->ops->max_phys_sect > 256) {
562 pr_info("nvm: max sectors supported is 256.\n");
563 ret = -EINVAL;
564 goto err_init;
565 }
566
567 if (dev->ops->max_phys_sect > 1) {
568 dev->ppalist_pool = dev->ops->create_dma_pool(dev, "ppalist");
569 if (!dev->ppalist_pool) {
570 pr_err("nvm: could not create ppa pool\n");
571 ret = -ENOMEM;
572 goto err_init;
573 }
574 }
575
576 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
577 ret = nvm_get_sysblock(dev, &dev->sb);
578 if (!ret)
579 pr_err("nvm: device not initialized.\n");
580 else if (ret < 0)
581 pr_err("nvm: err (%d) on device initialization\n", ret);
582 }
583
584 /* register device with a supported media manager */
585 down_write(&nvm_lock);
586 if (ret > 0)
587 dev->mt = nvm_init_mgr(dev);
588 list_add(&dev->devices, &nvm_devices);
589 up_write(&nvm_lock);
590
591 return 0;
592err_init:
593 kfree(dev->lun_map);
594 kfree(dev);
595 return ret;
596}
597EXPORT_SYMBOL(nvm_register);
598
599void nvm_unregister(char *disk_name)
600{
601 struct nvm_dev *dev;
602
603 down_write(&nvm_lock);
604 dev = nvm_find_nvm_dev(disk_name);
605 if (!dev) {
606 pr_err("nvm: could not find device %s to unregister\n",
607 disk_name);
608 up_write(&nvm_lock);
609 return;
610 }
611
612 list_del(&dev->devices);
613 up_write(&nvm_lock);
614
615 nvm_exit(dev);
616 kfree(dev->lun_map);
617 kfree(dev);
618}
619EXPORT_SYMBOL(nvm_unregister);
620
621static const struct block_device_operations nvm_fops = {
622 .owner = THIS_MODULE,
623};
624
625static int nvm_create_target(struct nvm_dev *dev,
626 struct nvm_ioctl_create *create)
627{
628 struct nvm_ioctl_create_simple *s = &create->conf.s;
629 struct request_queue *tqueue;
630 struct gendisk *tdisk;
631 struct nvm_tgt_type *tt;
632 struct nvm_target *t;
633 void *targetdata;
634
635 if (!dev->mt) {
636 pr_info("nvm: device has no media manager registered.\n");
637 return -ENODEV;
638 }
639
640 down_write(&nvm_lock);
641 tt = nvm_find_target_type(create->tgttype);
642 if (!tt) {
643 pr_err("nvm: target type %s not found\n", create->tgttype);
644 up_write(&nvm_lock);
645 return -EINVAL;
646 }
647
648 list_for_each_entry(t, &dev->online_targets, list) {
649 if (!strcmp(create->tgtname, t->disk->disk_name)) {
650 pr_err("nvm: target name already exists.\n");
651 up_write(&nvm_lock);
652 return -EINVAL;
653 }
654 }
655 up_write(&nvm_lock);
656
657 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
658 if (!t)
659 return -ENOMEM;
660
661 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
662 if (!tqueue)
663 goto err_t;
664 blk_queue_make_request(tqueue, tt->make_rq);
665
666 tdisk = alloc_disk(0);
667 if (!tdisk)
668 goto err_queue;
669
670 sprintf(tdisk->disk_name, "%s", create->tgtname);
671 tdisk->flags = GENHD_FL_EXT_DEVT;
672 tdisk->major = 0;
673 tdisk->first_minor = 0;
674 tdisk->fops = &nvm_fops;
675 tdisk->queue = tqueue;
676
677 targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
678 if (IS_ERR(targetdata))
679 goto err_init;
680
681 tdisk->private_data = targetdata;
682 tqueue->queuedata = targetdata;
683
684 blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
685
686 set_capacity(tdisk, tt->capacity(targetdata));
687 add_disk(tdisk);
688
689 t->type = tt;
690 t->disk = tdisk;
691
692 down_write(&nvm_lock);
693 list_add_tail(&t->list, &dev->online_targets);
694 up_write(&nvm_lock);
695
696 return 0;
697err_init:
698 put_disk(tdisk);
699err_queue:
700 blk_cleanup_queue(tqueue);
701err_t:
702 kfree(t);
703 return -ENOMEM;
704}
705
706static void nvm_remove_target(struct nvm_target *t)
707{
708 struct nvm_tgt_type *tt = t->type;
709 struct gendisk *tdisk = t->disk;
710 struct request_queue *q = tdisk->queue;
711
712 lockdep_assert_held(&nvm_lock);
713
714 del_gendisk(tdisk);
715 blk_cleanup_queue(q);
716
717 if (tt->exit)
718 tt->exit(tdisk->private_data);
719
720 put_disk(tdisk);
721
722 list_del(&t->list);
723 kfree(t);
724}
725
726static int __nvm_configure_create(struct nvm_ioctl_create *create)
727{
728 struct nvm_dev *dev;
729 struct nvm_ioctl_create_simple *s;
730
731 down_write(&nvm_lock);
732 dev = nvm_find_nvm_dev(create->dev);
733 up_write(&nvm_lock);
734 if (!dev) {
735 pr_err("nvm: device not found\n");
736 return -EINVAL;
737 }
738
739 if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
740 pr_err("nvm: config type not valid\n");
741 return -EINVAL;
742 }
743 s = &create->conf.s;
744
745 if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
746 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
747 s->lun_begin, s->lun_end, dev->nr_luns);
748 return -EINVAL;
749 }
750
751 return nvm_create_target(dev, create);
752}
753
754static int __nvm_configure_remove(struct nvm_ioctl_remove *remove)
755{
756 struct nvm_target *t = NULL;
757 struct nvm_dev *dev;
758 int ret = -1;
759
760 down_write(&nvm_lock);
761 list_for_each_entry(dev, &nvm_devices, devices)
762 list_for_each_entry(t, &dev->online_targets, list) {
763 if (!strcmp(remove->tgtname, t->disk->disk_name)) {
764 nvm_remove_target(t);
765 ret = 0;
766 break;
767 }
768 }
769 up_write(&nvm_lock);
770
771 if (ret) {
772 pr_err("nvm: target \"%s\" doesn't exist.\n", remove->tgtname);
773 return -EINVAL;
774 }
775
776 return 0;
777}
778
779#ifdef CONFIG_NVM_DEBUG
780static int nvm_configure_show(const char *val)
781{
782 struct nvm_dev *dev;
783 char opcode, devname[DISK_NAME_LEN];
784 int ret;
785
786 ret = sscanf(val, "%c %32s", &opcode, devname);
787 if (ret != 2) {
788 pr_err("nvm: invalid command. Use \"opcode devicename\".\n");
789 return -EINVAL;
790 }
791
792 down_write(&nvm_lock);
793 dev = nvm_find_nvm_dev(devname);
794 up_write(&nvm_lock);
795 if (!dev) {
796 pr_err("nvm: device not found\n");
797 return -EINVAL;
798 }
799
800 if (!dev->mt)
801 return 0;
802
803 dev->mt->lun_info_print(dev);
804
805 return 0;
806}
807
808static int nvm_configure_remove(const char *val)
809{
810 struct nvm_ioctl_remove remove;
811 char opcode;
812 int ret;
813
814 ret = sscanf(val, "%c %256s", &opcode, remove.tgtname);
815 if (ret != 2) {
816 pr_err("nvm: invalid command. Use \"d targetname\".\n");
817 return -EINVAL;
818 }
819
820 remove.flags = 0;
821
822 return __nvm_configure_remove(&remove);
823}
824
825static int nvm_configure_create(const char *val)
826{
827 struct nvm_ioctl_create create;
828 char opcode;
829 int lun_begin, lun_end, ret;
830
831 ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
832 create.tgtname, create.tgttype,
833 &lun_begin, &lun_end);
834 if (ret != 6) {
835 pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
836 return -EINVAL;
837 }
838
839 create.flags = 0;
840 create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
841 create.conf.s.lun_begin = lun_begin;
842 create.conf.s.lun_end = lun_end;
843
844 return __nvm_configure_create(&create);
845}
846
847
848/* Exposes administrative interface through /sys/module/lnvm/configure_by_str */
849static int nvm_configure_by_str_event(const char *val,
850 const struct kernel_param *kp)
851{
852 char opcode;
853 int ret;
854
855 ret = sscanf(val, "%c", &opcode);
856 if (ret != 1) {
857 pr_err("nvm: string must have the format of \"cmd ...\"\n");
858 return -EINVAL;
859 }
860
861 switch (opcode) {
862 case 'a':
863 return nvm_configure_create(val);
864 case 'd':
865 return nvm_configure_remove(val);
866 case 's':
867 return nvm_configure_show(val);
868 default:
869 pr_err("nvm: invalid command\n");
870 return -EINVAL;
871 }
872
873 return 0;
874}
875
876static int nvm_configure_get(char *buf, const struct kernel_param *kp)
877{
878 int sz;
879 struct nvm_dev *dev;
880
881 sz = sprintf(buf, "available devices:\n");
882 down_write(&nvm_lock);
883 list_for_each_entry(dev, &nvm_devices, devices) {
884 if (sz > 4095 - DISK_NAME_LEN - 2)
885 break;
886 sz += sprintf(buf + sz, " %32s\n", dev->name);
887 }
888 up_write(&nvm_lock);
889
890 return sz;
891}
892
893static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = {
894 .set = nvm_configure_by_str_event,
895 .get = nvm_configure_get,
896};
897
898#undef MODULE_PARAM_PREFIX
899#define MODULE_PARAM_PREFIX "lnvm."
900
901module_param_cb(configure_debug, &nvm_configure_by_str_event_param_ops, NULL,
902 0644);
903
904#endif /* CONFIG_NVM_DEBUG */
905
906static long nvm_ioctl_info(struct file *file, void __user *arg)
907{
908 struct nvm_ioctl_info *info;
909 struct nvm_tgt_type *tt;
910 int tgt_iter = 0;
911
912 if (!capable(CAP_SYS_ADMIN))
913 return -EPERM;
914
915 info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
916 if (IS_ERR(info))
917 return -EFAULT;
918
919 info->version[0] = NVM_VERSION_MAJOR;
920 info->version[1] = NVM_VERSION_MINOR;
921 info->version[2] = NVM_VERSION_PATCH;
922
923 down_write(&nvm_lock);
924 list_for_each_entry(tt, &nvm_targets, list) {
925 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
926
927 tgt->version[0] = tt->version[0];
928 tgt->version[1] = tt->version[1];
929 tgt->version[2] = tt->version[2];
930 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
931
932 tgt_iter++;
933 }
934
935 info->tgtsize = tgt_iter;
936 up_write(&nvm_lock);
937
938 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
939 kfree(info);
940 return -EFAULT;
941 }
942
943 kfree(info);
944 return 0;
945}
946
947static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
948{
949 struct nvm_ioctl_get_devices *devices;
950 struct nvm_dev *dev;
951 int i = 0;
952
953 if (!capable(CAP_SYS_ADMIN))
954 return -EPERM;
955
956 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
957 if (!devices)
958 return -ENOMEM;
959
960 down_write(&nvm_lock);
961 list_for_each_entry(dev, &nvm_devices, devices) {
962 struct nvm_ioctl_device_info *info = &devices->info[i];
963
964 sprintf(info->devname, "%s", dev->name);
965 if (dev->mt) {
966 info->bmversion[0] = dev->mt->version[0];
967 info->bmversion[1] = dev->mt->version[1];
968 info->bmversion[2] = dev->mt->version[2];
969 sprintf(info->bmname, "%s", dev->mt->name);
970 } else {
971 sprintf(info->bmname, "none");
972 }
973
974 i++;
975 if (i > 31) {
976 pr_err("nvm: max 31 devices can be reported.\n");
977 break;
978 }
979 }
980 up_write(&nvm_lock);
981
982 devices->nr_devices = i;
983
984 if (copy_to_user(arg, devices,
985 sizeof(struct nvm_ioctl_get_devices))) {
986 kfree(devices);
987 return -EFAULT;
988 }
989
990 kfree(devices);
991 return 0;
992}
993
994static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
995{
996 struct nvm_ioctl_create create;
997
998 if (!capable(CAP_SYS_ADMIN))
999 return -EPERM;
1000
1001 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
1002 return -EFAULT;
1003
1004 create.dev[DISK_NAME_LEN - 1] = '\0';
1005 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1006 create.tgtname[DISK_NAME_LEN - 1] = '\0';
1007
1008 if (create.flags != 0) {
1009 pr_err("nvm: no flags supported\n");
1010 return -EINVAL;
1011 }
1012
1013 return __nvm_configure_create(&create);
1014}
1015
1016static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1017{
1018 struct nvm_ioctl_remove remove;
1019
1020 if (!capable(CAP_SYS_ADMIN))
1021 return -EPERM;
1022
1023 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1024 return -EFAULT;
1025
1026 remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1027
1028 if (remove.flags != 0) {
1029 pr_err("nvm: no flags supported\n");
1030 return -EINVAL;
1031 }
1032
1033 return __nvm_configure_remove(&remove);
1034}
1035
1036static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
1037{
1038 info->seqnr = 1;
1039 info->erase_cnt = 0;
1040 info->version = 1;
1041}
1042
1043static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
1044{
1045 struct nvm_dev *dev;
1046 struct nvm_sb_info info;
1047 int ret;
1048
1049 down_write(&nvm_lock);
1050 dev = nvm_find_nvm_dev(init->dev);
1051 up_write(&nvm_lock);
1052 if (!dev) {
1053 pr_err("nvm: device not found\n");
1054 return -EINVAL;
1055 }
1056
1057 nvm_setup_nvm_sb_info(&info);
1058
1059 strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
1060 info.fs_ppa.ppa = -1;
1061
1062 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
1063 ret = nvm_init_sysblock(dev, &info);
1064 if (ret)
1065 return ret;
1066 }
1067
1068 memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
1069
1070 down_write(&nvm_lock);
1071 dev->mt = nvm_init_mgr(dev);
1072 up_write(&nvm_lock);
1073
1074 return 0;
1075}
1076
1077static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1078{
1079 struct nvm_ioctl_dev_init init;
1080
1081 if (!capable(CAP_SYS_ADMIN))
1082 return -EPERM;
1083
1084 if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1085 return -EFAULT;
1086
1087 if (init.flags != 0) {
1088 pr_err("nvm: no flags supported\n");
1089 return -EINVAL;
1090 }
1091
1092 init.dev[DISK_NAME_LEN - 1] = '\0';
1093
1094 return __nvm_ioctl_dev_init(&init);
1095}
1096
1097static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1098{
1099 struct nvm_ioctl_dev_factory fact;
1100 struct nvm_dev *dev;
1101
1102 if (!capable(CAP_SYS_ADMIN))
1103 return -EPERM;
1104
1105 if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1106 return -EFAULT;
1107
1108 fact.dev[DISK_NAME_LEN - 1] = '\0';
1109
1110 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1111 return -EINVAL;
1112
1113 down_write(&nvm_lock);
1114 dev = nvm_find_nvm_dev(fact.dev);
1115 up_write(&nvm_lock);
1116 if (!dev) {
1117 pr_err("nvm: device not found\n");
1118 return -EINVAL;
1119 }
1120
1121 if (dev->mt) {
1122 dev->mt->unregister_mgr(dev);
1123 dev->mt = NULL;
1124 }
1125
1126 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
1127 return nvm_dev_factory(dev, fact.flags);
1128
1129 return 0;
1130}
1131
1132static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1133{
1134 void __user *argp = (void __user *)arg;
1135
1136 switch (cmd) {
1137 case NVM_INFO:
1138 return nvm_ioctl_info(file, argp);
1139 case NVM_GET_DEVICES:
1140 return nvm_ioctl_get_devices(file, argp);
1141 case NVM_DEV_CREATE:
1142 return nvm_ioctl_dev_create(file, argp);
1143 case NVM_DEV_REMOVE:
1144 return nvm_ioctl_dev_remove(file, argp);
1145 case NVM_DEV_INIT:
1146 return nvm_ioctl_dev_init(file, argp);
1147 case NVM_DEV_FACTORY:
1148 return nvm_ioctl_dev_factory(file, argp);
1149 }
1150 return 0;
1151}
1152
1153static const struct file_operations _ctl_fops = {
1154 .open = nonseekable_open,
1155 .unlocked_ioctl = nvm_ctl_ioctl,
1156 .owner = THIS_MODULE,
1157 .llseek = noop_llseek,
1158};
1159
1160static struct miscdevice _nvm_misc = {
1161 .minor = MISC_DYNAMIC_MINOR,
1162 .name = "lightnvm",
1163 .nodename = "lightnvm/control",
1164 .fops = &_ctl_fops,
1165};
1166
1167MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
1168
1169static int __init nvm_mod_init(void)
1170{
1171 int ret;
1172
1173 ret = misc_register(&_nvm_misc);
1174 if (ret)
1175 pr_err("nvm: misc_register failed for control device");
1176
1177 return ret;
1178}
1179
1180static void __exit nvm_mod_exit(void)
1181{
1182 misc_deregister(&_nvm_misc);
1183}
1184
1185MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
1186MODULE_LICENSE("GPL v2");
1187MODULE_VERSION("0.1");
1188module_init(nvm_mod_init);
1189module_exit(nvm_mod_exit);