Loading...
1/*
2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <m@bjorling.me>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17 * USA.
18 *
19 */
20
21#include <linux/list.h>
22#include <linux/types.h>
23#include <linux/sem.h>
24#include <linux/bitmap.h>
25#include <linux/moduleparam.h>
26#include <linux/miscdevice.h>
27#include <linux/lightnvm.h>
28#include <linux/sched/sysctl.h>
29
30static LIST_HEAD(nvm_tgt_types);
31static DECLARE_RWSEM(nvm_tgtt_lock);
32static LIST_HEAD(nvm_mgrs);
33static LIST_HEAD(nvm_devices);
34static DECLARE_RWSEM(nvm_lock);
35
36struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
37{
38 struct nvm_tgt_type *tmp, *tt = NULL;
39
40 if (lock)
41 down_write(&nvm_tgtt_lock);
42
43 list_for_each_entry(tmp, &nvm_tgt_types, list)
44 if (!strcmp(name, tmp->name)) {
45 tt = tmp;
46 break;
47 }
48
49 if (lock)
50 up_write(&nvm_tgtt_lock);
51 return tt;
52}
53EXPORT_SYMBOL(nvm_find_target_type);
54
55int nvm_register_tgt_type(struct nvm_tgt_type *tt)
56{
57 int ret = 0;
58
59 down_write(&nvm_tgtt_lock);
60 if (nvm_find_target_type(tt->name, 0))
61 ret = -EEXIST;
62 else
63 list_add(&tt->list, &nvm_tgt_types);
64 up_write(&nvm_tgtt_lock);
65
66 return ret;
67}
68EXPORT_SYMBOL(nvm_register_tgt_type);
69
70void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
71{
72 if (!tt)
73 return;
74
75 down_write(&nvm_lock);
76 list_del(&tt->list);
77 up_write(&nvm_lock);
78}
79EXPORT_SYMBOL(nvm_unregister_tgt_type);
80
81void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
82 dma_addr_t *dma_handler)
83{
84 return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
85 dma_handler);
86}
87EXPORT_SYMBOL(nvm_dev_dma_alloc);
88
89void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
90{
91 dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
92}
93EXPORT_SYMBOL(nvm_dev_dma_free);
94
95static struct nvmm_type *nvm_find_mgr_type(const char *name)
96{
97 struct nvmm_type *mt;
98
99 list_for_each_entry(mt, &nvm_mgrs, list)
100 if (!strcmp(name, mt->name))
101 return mt;
102
103 return NULL;
104}
105
106static struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
107{
108 struct nvmm_type *mt;
109 int ret;
110
111 lockdep_assert_held(&nvm_lock);
112
113 list_for_each_entry(mt, &nvm_mgrs, list) {
114 if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
115 continue;
116
117 ret = mt->register_mgr(dev);
118 if (ret < 0) {
119 pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
120 ret, dev->name);
121 return NULL; /* initialization failed */
122 } else if (ret > 0)
123 return mt;
124 }
125
126 return NULL;
127}
128
129int nvm_register_mgr(struct nvmm_type *mt)
130{
131 struct nvm_dev *dev;
132 int ret = 0;
133
134 down_write(&nvm_lock);
135 if (nvm_find_mgr_type(mt->name)) {
136 ret = -EEXIST;
137 goto finish;
138 } else {
139 list_add(&mt->list, &nvm_mgrs);
140 }
141
142 /* try to register media mgr if any device have none configured */
143 list_for_each_entry(dev, &nvm_devices, devices) {
144 if (dev->mt)
145 continue;
146
147 dev->mt = nvm_init_mgr(dev);
148 }
149finish:
150 up_write(&nvm_lock);
151
152 return ret;
153}
154EXPORT_SYMBOL(nvm_register_mgr);
155
156void nvm_unregister_mgr(struct nvmm_type *mt)
157{
158 if (!mt)
159 return;
160
161 down_write(&nvm_lock);
162 list_del(&mt->list);
163 up_write(&nvm_lock);
164}
165EXPORT_SYMBOL(nvm_unregister_mgr);
166
167static struct nvm_dev *nvm_find_nvm_dev(const char *name)
168{
169 struct nvm_dev *dev;
170
171 list_for_each_entry(dev, &nvm_devices, devices)
172 if (!strcmp(name, dev->name))
173 return dev;
174
175 return NULL;
176}
177
178static void nvm_tgt_generic_to_addr_mode(struct nvm_tgt_dev *tgt_dev,
179 struct nvm_rq *rqd)
180{
181 struct nvm_dev *dev = tgt_dev->parent;
182 int i;
183
184 if (rqd->nr_ppas > 1) {
185 for (i = 0; i < rqd->nr_ppas; i++) {
186 rqd->ppa_list[i] = dev->mt->trans_ppa(tgt_dev,
187 rqd->ppa_list[i], TRANS_TGT_TO_DEV);
188 rqd->ppa_list[i] = generic_to_dev_addr(dev,
189 rqd->ppa_list[i]);
190 }
191 } else {
192 rqd->ppa_addr = dev->mt->trans_ppa(tgt_dev, rqd->ppa_addr,
193 TRANS_TGT_TO_DEV);
194 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
195 }
196}
197
198int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
199 int type)
200{
201 struct nvm_rq rqd;
202 int ret;
203
204 if (nr_ppas > dev->ops->max_phys_sect) {
205 pr_err("nvm: unable to update all sysblocks atomically\n");
206 return -EINVAL;
207 }
208
209 memset(&rqd, 0, sizeof(struct nvm_rq));
210
211 nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
212 nvm_generic_to_addr_mode(dev, &rqd);
213
214 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
215 nvm_free_rqd_ppalist(dev, &rqd);
216 if (ret) {
217 pr_err("nvm: sysblk failed bb mark\n");
218 return -EINVAL;
219 }
220
221 return 0;
222}
223EXPORT_SYMBOL(nvm_set_bb_tbl);
224
225int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
226 int nr_ppas, int type)
227{
228 struct nvm_dev *dev = tgt_dev->parent;
229 struct nvm_rq rqd;
230 int ret;
231
232 if (nr_ppas > dev->ops->max_phys_sect) {
233 pr_err("nvm: unable to update all blocks atomically\n");
234 return -EINVAL;
235 }
236
237 memset(&rqd, 0, sizeof(struct nvm_rq));
238
239 nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
240 nvm_tgt_generic_to_addr_mode(tgt_dev, &rqd);
241
242 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
243 nvm_free_rqd_ppalist(dev, &rqd);
244 if (ret) {
245 pr_err("nvm: sysblk failed bb mark\n");
246 return -EINVAL;
247 }
248
249 return 0;
250}
251EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
252
253int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev)
254{
255 struct nvm_dev *dev = tgt_dev->parent;
256
257 return dev->ops->max_phys_sect;
258}
259EXPORT_SYMBOL(nvm_max_phys_sects);
260
261int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
262{
263 struct nvm_dev *dev = tgt_dev->parent;
264
265 return dev->mt->submit_io(tgt_dev, rqd);
266}
267EXPORT_SYMBOL(nvm_submit_io);
268
269int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p, int flags)
270{
271 struct nvm_dev *dev = tgt_dev->parent;
272
273 return dev->mt->erase_blk(tgt_dev, p, flags);
274}
275EXPORT_SYMBOL(nvm_erase_blk);
276
277int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
278 nvm_l2p_update_fn *update_l2p, void *priv)
279{
280 struct nvm_dev *dev = tgt_dev->parent;
281
282 if (!dev->ops->get_l2p_tbl)
283 return 0;
284
285 return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
286}
287EXPORT_SYMBOL(nvm_get_l2p_tbl);
288
289int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
290{
291 struct nvm_dev *dev = tgt_dev->parent;
292
293 return dev->mt->get_area(dev, lba, len);
294}
295EXPORT_SYMBOL(nvm_get_area);
296
297void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t lba)
298{
299 struct nvm_dev *dev = tgt_dev->parent;
300
301 dev->mt->put_area(dev, lba);
302}
303EXPORT_SYMBOL(nvm_put_area);
304
305void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
306{
307 int i;
308
309 if (rqd->nr_ppas > 1) {
310 for (i = 0; i < rqd->nr_ppas; i++)
311 rqd->ppa_list[i] = dev_to_generic_addr(dev,
312 rqd->ppa_list[i]);
313 } else {
314 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
315 }
316}
317EXPORT_SYMBOL(nvm_addr_to_generic_mode);
318
319void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
320{
321 int i;
322
323 if (rqd->nr_ppas > 1) {
324 for (i = 0; i < rqd->nr_ppas; i++)
325 rqd->ppa_list[i] = generic_to_dev_addr(dev,
326 rqd->ppa_list[i]);
327 } else {
328 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
329 }
330}
331EXPORT_SYMBOL(nvm_generic_to_addr_mode);
332
333int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
334 const struct ppa_addr *ppas, int nr_ppas, int vblk)
335{
336 struct nvm_geo *geo = &dev->geo;
337 int i, plane_cnt, pl_idx;
338 struct ppa_addr ppa;
339
340 if ((!vblk || geo->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
341 rqd->nr_ppas = nr_ppas;
342 rqd->ppa_addr = ppas[0];
343
344 return 0;
345 }
346
347 rqd->nr_ppas = nr_ppas;
348 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
349 if (!rqd->ppa_list) {
350 pr_err("nvm: failed to allocate dma memory\n");
351 return -ENOMEM;
352 }
353
354 if (!vblk) {
355 for (i = 0; i < nr_ppas; i++)
356 rqd->ppa_list[i] = ppas[i];
357 } else {
358 plane_cnt = geo->plane_mode;
359 rqd->nr_ppas *= plane_cnt;
360
361 for (i = 0; i < nr_ppas; i++) {
362 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
363 ppa = ppas[i];
364 ppa.g.pl = pl_idx;
365 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
366 }
367 }
368 }
369
370 return 0;
371}
372EXPORT_SYMBOL(nvm_set_rqd_ppalist);
373
374void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
375{
376 if (!rqd->ppa_list)
377 return;
378
379 nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
380}
381EXPORT_SYMBOL(nvm_free_rqd_ppalist);
382
383int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
384 int flags)
385{
386 struct nvm_rq rqd;
387 int ret;
388
389 if (!dev->ops->erase_block)
390 return 0;
391
392 memset(&rqd, 0, sizeof(struct nvm_rq));
393
394 ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
395 if (ret)
396 return ret;
397
398 nvm_generic_to_addr_mode(dev, &rqd);
399
400 rqd.flags = flags;
401
402 ret = dev->ops->erase_block(dev, &rqd);
403
404 nvm_free_rqd_ppalist(dev, &rqd);
405
406 return ret;
407}
408EXPORT_SYMBOL(nvm_erase_ppa);
409
410void nvm_end_io(struct nvm_rq *rqd, int error)
411{
412 rqd->error = error;
413 rqd->end_io(rqd);
414}
415EXPORT_SYMBOL(nvm_end_io);
416
417static void nvm_end_io_sync(struct nvm_rq *rqd)
418{
419 struct completion *waiting = rqd->wait;
420
421 rqd->wait = NULL;
422
423 complete(waiting);
424}
425
426static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
427 int flags, void *buf, int len)
428{
429 DECLARE_COMPLETION_ONSTACK(wait);
430 struct bio *bio;
431 int ret;
432 unsigned long hang_check;
433
434 bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
435 if (IS_ERR_OR_NULL(bio))
436 return -ENOMEM;
437
438 nvm_generic_to_addr_mode(dev, rqd);
439
440 rqd->dev = NULL;
441 rqd->opcode = opcode;
442 rqd->flags = flags;
443 rqd->bio = bio;
444 rqd->wait = &wait;
445 rqd->end_io = nvm_end_io_sync;
446
447 ret = dev->ops->submit_io(dev, rqd);
448 if (ret) {
449 bio_put(bio);
450 return ret;
451 }
452
453 /* Prevent hang_check timer from firing at us during very long I/O */
454 hang_check = sysctl_hung_task_timeout_secs;
455 if (hang_check)
456 while (!wait_for_completion_io_timeout(&wait,
457 hang_check * (HZ/2)))
458 ;
459 else
460 wait_for_completion_io(&wait);
461
462 return rqd->error;
463}
464
465/**
466 * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must
467 * take to free ppa list if necessary.
468 * @dev: device
469 * @ppa_list: user created ppa_list
470 * @nr_ppas: length of ppa_list
471 * @opcode: device opcode
472 * @flags: device flags
473 * @buf: data buffer
474 * @len: data buffer length
475 */
476int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
477 int nr_ppas, int opcode, int flags, void *buf, int len)
478{
479 struct nvm_rq rqd;
480
481 if (dev->ops->max_phys_sect < nr_ppas)
482 return -EINVAL;
483
484 memset(&rqd, 0, sizeof(struct nvm_rq));
485
486 rqd.nr_ppas = nr_ppas;
487 if (nr_ppas > 1)
488 rqd.ppa_list = ppa_list;
489 else
490 rqd.ppa_addr = ppa_list[0];
491
492 return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
493}
494EXPORT_SYMBOL(nvm_submit_ppa_list);
495
496/**
497 * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded
498 * as single, dual, quad plane PPAs depending on device type.
499 * @dev: device
500 * @ppa: user created ppa_list
501 * @nr_ppas: length of ppa_list
502 * @opcode: device opcode
503 * @flags: device flags
504 * @buf: data buffer
505 * @len: data buffer length
506 */
507int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
508 int opcode, int flags, void *buf, int len)
509{
510 struct nvm_rq rqd;
511 int ret;
512
513 memset(&rqd, 0, sizeof(struct nvm_rq));
514 ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1);
515 if (ret)
516 return ret;
517
518 ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
519
520 nvm_free_rqd_ppalist(dev, &rqd);
521
522 return ret;
523}
524EXPORT_SYMBOL(nvm_submit_ppa);
525
526/*
527 * folds a bad block list from its plane representation to its virtual
528 * block representation. The fold is done in place and reduced size is
529 * returned.
530 *
531 * If any of the planes status are bad or grown bad block, the virtual block
532 * is marked bad. If not bad, the first plane state acts as the block state.
533 */
534int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
535{
536 struct nvm_geo *geo = &dev->geo;
537 int blk, offset, pl, blktype;
538
539 if (nr_blks != geo->blks_per_lun * geo->plane_mode)
540 return -EINVAL;
541
542 for (blk = 0; blk < geo->blks_per_lun; blk++) {
543 offset = blk * geo->plane_mode;
544 blktype = blks[offset];
545
546 /* Bad blocks on any planes take precedence over other types */
547 for (pl = 0; pl < geo->plane_mode; pl++) {
548 if (blks[offset + pl] &
549 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
550 blktype = blks[offset + pl];
551 break;
552 }
553 }
554
555 blks[blk] = blktype;
556 }
557
558 return geo->blks_per_lun;
559}
560EXPORT_SYMBOL(nvm_bb_tbl_fold);
561
562int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
563{
564 ppa = generic_to_dev_addr(dev, ppa);
565
566 return dev->ops->get_bb_tbl(dev, ppa, blks);
567}
568EXPORT_SYMBOL(nvm_get_bb_tbl);
569
570int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
571 u8 *blks)
572{
573 struct nvm_dev *dev = tgt_dev->parent;
574
575 ppa = dev->mt->trans_ppa(tgt_dev, ppa, TRANS_TGT_TO_DEV);
576 return nvm_get_bb_tbl(dev, ppa, blks);
577}
578EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
579
580static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
581{
582 struct nvm_geo *geo = &dev->geo;
583 int i;
584
585 dev->lps_per_blk = geo->pgs_per_blk;
586 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
587 if (!dev->lptbl)
588 return -ENOMEM;
589
590 /* Just a linear array */
591 for (i = 0; i < dev->lps_per_blk; i++)
592 dev->lptbl[i] = i;
593
594 return 0;
595}
596
597static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
598{
599 int i, p;
600 struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
601
602 if (!mlc->num_pairs)
603 return 0;
604
605 dev->lps_per_blk = mlc->num_pairs;
606 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
607 if (!dev->lptbl)
608 return -ENOMEM;
609
610 /* The lower page table encoding consists of a list of bytes, where each
611 * has a lower and an upper half. The first half byte maintains the
612 * increment value and every value after is an offset added to the
613 * previous incrementation value
614 */
615 dev->lptbl[0] = mlc->pairs[0] & 0xF;
616 for (i = 1; i < dev->lps_per_blk; i++) {
617 p = mlc->pairs[i >> 1];
618 if (i & 0x1) /* upper */
619 dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
620 else /* lower */
621 dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
622 }
623
624 return 0;
625}
626
627static int nvm_core_init(struct nvm_dev *dev)
628{
629 struct nvm_id *id = &dev->identity;
630 struct nvm_id_group *grp = &id->groups[0];
631 struct nvm_geo *geo = &dev->geo;
632 int ret;
633
634 /* Whole device values */
635 geo->nr_chnls = grp->num_ch;
636 geo->luns_per_chnl = grp->num_lun;
637
638 /* Generic device values */
639 geo->pgs_per_blk = grp->num_pg;
640 geo->blks_per_lun = grp->num_blk;
641 geo->nr_planes = grp->num_pln;
642 geo->fpg_size = grp->fpg_sz;
643 geo->pfpg_size = grp->fpg_sz * grp->num_pln;
644 geo->sec_size = grp->csecs;
645 geo->oob_size = grp->sos;
646 geo->sec_per_pg = grp->fpg_sz / grp->csecs;
647 geo->mccap = grp->mccap;
648 memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
649
650 geo->plane_mode = NVM_PLANE_SINGLE;
651 geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
652
653 if (grp->mpos & 0x020202)
654 geo->plane_mode = NVM_PLANE_DOUBLE;
655 if (grp->mpos & 0x040404)
656 geo->plane_mode = NVM_PLANE_QUAD;
657
658 if (grp->mtype != 0) {
659 pr_err("nvm: memory type not supported\n");
660 return -EINVAL;
661 }
662
663 /* calculated values */
664 geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
665 geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
666 geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
667 geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
668
669 dev->total_secs = geo->nr_luns * geo->sec_per_lun;
670 dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
671 sizeof(unsigned long), GFP_KERNEL);
672 if (!dev->lun_map)
673 return -ENOMEM;
674
675 switch (grp->fmtype) {
676 case NVM_ID_FMTYPE_SLC:
677 if (nvm_init_slc_tbl(dev, grp)) {
678 ret = -ENOMEM;
679 goto err_fmtype;
680 }
681 break;
682 case NVM_ID_FMTYPE_MLC:
683 if (nvm_init_mlc_tbl(dev, grp)) {
684 ret = -ENOMEM;
685 goto err_fmtype;
686 }
687 break;
688 default:
689 pr_err("nvm: flash type not supported\n");
690 ret = -EINVAL;
691 goto err_fmtype;
692 }
693
694 mutex_init(&dev->mlock);
695 spin_lock_init(&dev->lock);
696
697 blk_queue_logical_block_size(dev->q, geo->sec_size);
698
699 return 0;
700err_fmtype:
701 kfree(dev->lun_map);
702 return ret;
703}
704
705static void nvm_free_mgr(struct nvm_dev *dev)
706{
707 if (!dev->mt)
708 return;
709
710 dev->mt->unregister_mgr(dev);
711 dev->mt = NULL;
712}
713
714void nvm_free(struct nvm_dev *dev)
715{
716 if (!dev)
717 return;
718
719 nvm_free_mgr(dev);
720
721 if (dev->dma_pool)
722 dev->ops->destroy_dma_pool(dev->dma_pool);
723
724 kfree(dev->lptbl);
725 kfree(dev->lun_map);
726 kfree(dev);
727}
728
729static int nvm_init(struct nvm_dev *dev)
730{
731 struct nvm_geo *geo = &dev->geo;
732 int ret = -EINVAL;
733
734 if (!dev->q || !dev->ops)
735 return ret;
736
737 if (dev->ops->identity(dev, &dev->identity)) {
738 pr_err("nvm: device could not be identified\n");
739 goto err;
740 }
741
742 pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
743 dev->identity.ver_id, dev->identity.vmnt,
744 dev->identity.cgrps);
745
746 if (dev->identity.ver_id != 1) {
747 pr_err("nvm: device not supported by kernel.");
748 goto err;
749 }
750
751 if (dev->identity.cgrps != 1) {
752 pr_err("nvm: only one group configuration supported.");
753 goto err;
754 }
755
756 ret = nvm_core_init(dev);
757 if (ret) {
758 pr_err("nvm: could not initialize core structures.\n");
759 goto err;
760 }
761
762 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
763 dev->name, geo->sec_per_pg, geo->nr_planes,
764 geo->pgs_per_blk, geo->blks_per_lun,
765 geo->nr_luns, geo->nr_chnls);
766 return 0;
767err:
768 pr_err("nvm: failed to initialize nvm\n");
769 return ret;
770}
771
772struct nvm_dev *nvm_alloc_dev(int node)
773{
774 return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
775}
776EXPORT_SYMBOL(nvm_alloc_dev);
777
778int nvm_register(struct nvm_dev *dev)
779{
780 int ret;
781
782 ret = nvm_init(dev);
783 if (ret)
784 goto err_init;
785
786 if (dev->ops->max_phys_sect > 256) {
787 pr_info("nvm: max sectors supported is 256.\n");
788 ret = -EINVAL;
789 goto err_init;
790 }
791
792 if (dev->ops->max_phys_sect > 1) {
793 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
794 if (!dev->dma_pool) {
795 pr_err("nvm: could not create dma pool\n");
796 ret = -ENOMEM;
797 goto err_init;
798 }
799 }
800
801 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
802 ret = nvm_get_sysblock(dev, &dev->sb);
803 if (!ret)
804 pr_err("nvm: device not initialized.\n");
805 else if (ret < 0)
806 pr_err("nvm: err (%d) on device initialization\n", ret);
807 }
808
809 /* register device with a supported media manager */
810 down_write(&nvm_lock);
811 if (ret > 0)
812 dev->mt = nvm_init_mgr(dev);
813 list_add(&dev->devices, &nvm_devices);
814 up_write(&nvm_lock);
815
816 return 0;
817err_init:
818 kfree(dev->lun_map);
819 return ret;
820}
821EXPORT_SYMBOL(nvm_register);
822
823void nvm_unregister(struct nvm_dev *dev)
824{
825 down_write(&nvm_lock);
826 list_del(&dev->devices);
827 up_write(&nvm_lock);
828
829 nvm_free(dev);
830}
831EXPORT_SYMBOL(nvm_unregister);
832
833static int __nvm_configure_create(struct nvm_ioctl_create *create)
834{
835 struct nvm_dev *dev;
836 struct nvm_ioctl_create_simple *s;
837
838 down_write(&nvm_lock);
839 dev = nvm_find_nvm_dev(create->dev);
840 up_write(&nvm_lock);
841
842 if (!dev) {
843 pr_err("nvm: device not found\n");
844 return -EINVAL;
845 }
846
847 if (!dev->mt) {
848 pr_info("nvm: device has no media manager registered.\n");
849 return -ENODEV;
850 }
851
852 if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
853 pr_err("nvm: config type not valid\n");
854 return -EINVAL;
855 }
856 s = &create->conf.s;
857
858 if (s->lun_begin > s->lun_end || s->lun_end > dev->geo.nr_luns) {
859 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
860 s->lun_begin, s->lun_end, dev->geo.nr_luns);
861 return -EINVAL;
862 }
863
864 return dev->mt->create_tgt(dev, create);
865}
866
867static long nvm_ioctl_info(struct file *file, void __user *arg)
868{
869 struct nvm_ioctl_info *info;
870 struct nvm_tgt_type *tt;
871 int tgt_iter = 0;
872
873 if (!capable(CAP_SYS_ADMIN))
874 return -EPERM;
875
876 info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
877 if (IS_ERR(info))
878 return -EFAULT;
879
880 info->version[0] = NVM_VERSION_MAJOR;
881 info->version[1] = NVM_VERSION_MINOR;
882 info->version[2] = NVM_VERSION_PATCH;
883
884 down_write(&nvm_lock);
885 list_for_each_entry(tt, &nvm_tgt_types, list) {
886 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
887
888 tgt->version[0] = tt->version[0];
889 tgt->version[1] = tt->version[1];
890 tgt->version[2] = tt->version[2];
891 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
892
893 tgt_iter++;
894 }
895
896 info->tgtsize = tgt_iter;
897 up_write(&nvm_lock);
898
899 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
900 kfree(info);
901 return -EFAULT;
902 }
903
904 kfree(info);
905 return 0;
906}
907
908static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
909{
910 struct nvm_ioctl_get_devices *devices;
911 struct nvm_dev *dev;
912 int i = 0;
913
914 if (!capable(CAP_SYS_ADMIN))
915 return -EPERM;
916
917 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
918 if (!devices)
919 return -ENOMEM;
920
921 down_write(&nvm_lock);
922 list_for_each_entry(dev, &nvm_devices, devices) {
923 struct nvm_ioctl_device_info *info = &devices->info[i];
924
925 sprintf(info->devname, "%s", dev->name);
926 if (dev->mt) {
927 info->bmversion[0] = dev->mt->version[0];
928 info->bmversion[1] = dev->mt->version[1];
929 info->bmversion[2] = dev->mt->version[2];
930 sprintf(info->bmname, "%s", dev->mt->name);
931 } else {
932 sprintf(info->bmname, "none");
933 }
934
935 i++;
936 if (i > 31) {
937 pr_err("nvm: max 31 devices can be reported.\n");
938 break;
939 }
940 }
941 up_write(&nvm_lock);
942
943 devices->nr_devices = i;
944
945 if (copy_to_user(arg, devices,
946 sizeof(struct nvm_ioctl_get_devices))) {
947 kfree(devices);
948 return -EFAULT;
949 }
950
951 kfree(devices);
952 return 0;
953}
954
955static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
956{
957 struct nvm_ioctl_create create;
958
959 if (!capable(CAP_SYS_ADMIN))
960 return -EPERM;
961
962 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
963 return -EFAULT;
964
965 create.dev[DISK_NAME_LEN - 1] = '\0';
966 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
967 create.tgtname[DISK_NAME_LEN - 1] = '\0';
968
969 if (create.flags != 0) {
970 pr_err("nvm: no flags supported\n");
971 return -EINVAL;
972 }
973
974 return __nvm_configure_create(&create);
975}
976
977static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
978{
979 struct nvm_ioctl_remove remove;
980 struct nvm_dev *dev;
981 int ret = 0;
982
983 if (!capable(CAP_SYS_ADMIN))
984 return -EPERM;
985
986 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
987 return -EFAULT;
988
989 remove.tgtname[DISK_NAME_LEN - 1] = '\0';
990
991 if (remove.flags != 0) {
992 pr_err("nvm: no flags supported\n");
993 return -EINVAL;
994 }
995
996 list_for_each_entry(dev, &nvm_devices, devices) {
997 ret = dev->mt->remove_tgt(dev, &remove);
998 if (!ret)
999 break;
1000 }
1001
1002 return ret;
1003}
1004
1005static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
1006{
1007 info->seqnr = 1;
1008 info->erase_cnt = 0;
1009 info->version = 1;
1010}
1011
1012static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
1013{
1014 struct nvm_dev *dev;
1015 struct nvm_sb_info info;
1016 int ret;
1017
1018 down_write(&nvm_lock);
1019 dev = nvm_find_nvm_dev(init->dev);
1020 up_write(&nvm_lock);
1021 if (!dev) {
1022 pr_err("nvm: device not found\n");
1023 return -EINVAL;
1024 }
1025
1026 nvm_setup_nvm_sb_info(&info);
1027
1028 strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
1029 info.fs_ppa.ppa = -1;
1030
1031 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
1032 ret = nvm_init_sysblock(dev, &info);
1033 if (ret)
1034 return ret;
1035 }
1036
1037 memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
1038
1039 down_write(&nvm_lock);
1040 dev->mt = nvm_init_mgr(dev);
1041 up_write(&nvm_lock);
1042
1043 return 0;
1044}
1045
1046static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1047{
1048 struct nvm_ioctl_dev_init init;
1049
1050 if (!capable(CAP_SYS_ADMIN))
1051 return -EPERM;
1052
1053 if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1054 return -EFAULT;
1055
1056 if (init.flags != 0) {
1057 pr_err("nvm: no flags supported\n");
1058 return -EINVAL;
1059 }
1060
1061 init.dev[DISK_NAME_LEN - 1] = '\0';
1062
1063 return __nvm_ioctl_dev_init(&init);
1064}
1065
1066static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1067{
1068 struct nvm_ioctl_dev_factory fact;
1069 struct nvm_dev *dev;
1070
1071 if (!capable(CAP_SYS_ADMIN))
1072 return -EPERM;
1073
1074 if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1075 return -EFAULT;
1076
1077 fact.dev[DISK_NAME_LEN - 1] = '\0';
1078
1079 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1080 return -EINVAL;
1081
1082 down_write(&nvm_lock);
1083 dev = nvm_find_nvm_dev(fact.dev);
1084 up_write(&nvm_lock);
1085 if (!dev) {
1086 pr_err("nvm: device not found\n");
1087 return -EINVAL;
1088 }
1089
1090 nvm_free_mgr(dev);
1091
1092 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
1093 return nvm_dev_factory(dev, fact.flags);
1094
1095 return 0;
1096}
1097
1098static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1099{
1100 void __user *argp = (void __user *)arg;
1101
1102 switch (cmd) {
1103 case NVM_INFO:
1104 return nvm_ioctl_info(file, argp);
1105 case NVM_GET_DEVICES:
1106 return nvm_ioctl_get_devices(file, argp);
1107 case NVM_DEV_CREATE:
1108 return nvm_ioctl_dev_create(file, argp);
1109 case NVM_DEV_REMOVE:
1110 return nvm_ioctl_dev_remove(file, argp);
1111 case NVM_DEV_INIT:
1112 return nvm_ioctl_dev_init(file, argp);
1113 case NVM_DEV_FACTORY:
1114 return nvm_ioctl_dev_factory(file, argp);
1115 }
1116 return 0;
1117}
1118
1119static const struct file_operations _ctl_fops = {
1120 .open = nonseekable_open,
1121 .unlocked_ioctl = nvm_ctl_ioctl,
1122 .owner = THIS_MODULE,
1123 .llseek = noop_llseek,
1124};
1125
1126static struct miscdevice _nvm_misc = {
1127 .minor = MISC_DYNAMIC_MINOR,
1128 .name = "lightnvm",
1129 .nodename = "lightnvm/control",
1130 .fops = &_ctl_fops,
1131};
1132builtin_misc_device(_nvm_misc);
1/*
2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <m@bjorling.me>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17 * USA.
18 *
19 */
20
21#include <linux/blkdev.h>
22#include <linux/blk-mq.h>
23#include <linux/list.h>
24#include <linux/types.h>
25#include <linux/sem.h>
26#include <linux/bitmap.h>
27#include <linux/module.h>
28#include <linux/miscdevice.h>
29#include <linux/lightnvm.h>
30#include <linux/sched/sysctl.h>
31#include <uapi/linux/lightnvm.h>
32
33static LIST_HEAD(nvm_targets);
34static LIST_HEAD(nvm_mgrs);
35static LIST_HEAD(nvm_devices);
36static DECLARE_RWSEM(nvm_lock);
37
38static struct nvm_tgt_type *nvm_find_target_type(const char *name)
39{
40 struct nvm_tgt_type *tt;
41
42 list_for_each_entry(tt, &nvm_targets, list)
43 if (!strcmp(name, tt->name))
44 return tt;
45
46 return NULL;
47}
48
49int nvm_register_target(struct nvm_tgt_type *tt)
50{
51 int ret = 0;
52
53 down_write(&nvm_lock);
54 if (nvm_find_target_type(tt->name))
55 ret = -EEXIST;
56 else
57 list_add(&tt->list, &nvm_targets);
58 up_write(&nvm_lock);
59
60 return ret;
61}
62EXPORT_SYMBOL(nvm_register_target);
63
64void nvm_unregister_target(struct nvm_tgt_type *tt)
65{
66 if (!tt)
67 return;
68
69 down_write(&nvm_lock);
70 list_del(&tt->list);
71 up_write(&nvm_lock);
72}
73EXPORT_SYMBOL(nvm_unregister_target);
74
75void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
76 dma_addr_t *dma_handler)
77{
78 return dev->ops->dev_dma_alloc(dev, dev->ppalist_pool, mem_flags,
79 dma_handler);
80}
81EXPORT_SYMBOL(nvm_dev_dma_alloc);
82
83void nvm_dev_dma_free(struct nvm_dev *dev, void *ppa_list,
84 dma_addr_t dma_handler)
85{
86 dev->ops->dev_dma_free(dev->ppalist_pool, ppa_list, dma_handler);
87}
88EXPORT_SYMBOL(nvm_dev_dma_free);
89
90static struct nvmm_type *nvm_find_mgr_type(const char *name)
91{
92 struct nvmm_type *mt;
93
94 list_for_each_entry(mt, &nvm_mgrs, list)
95 if (!strcmp(name, mt->name))
96 return mt;
97
98 return NULL;
99}
100
101struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
102{
103 struct nvmm_type *mt;
104 int ret;
105
106 lockdep_assert_held(&nvm_lock);
107
108 list_for_each_entry(mt, &nvm_mgrs, list) {
109 if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
110 continue;
111
112 ret = mt->register_mgr(dev);
113 if (ret < 0) {
114 pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
115 ret, dev->name);
116 return NULL; /* initialization failed */
117 } else if (ret > 0)
118 return mt;
119 }
120
121 return NULL;
122}
123
124int nvm_register_mgr(struct nvmm_type *mt)
125{
126 struct nvm_dev *dev;
127 int ret = 0;
128
129 down_write(&nvm_lock);
130 if (nvm_find_mgr_type(mt->name)) {
131 ret = -EEXIST;
132 goto finish;
133 } else {
134 list_add(&mt->list, &nvm_mgrs);
135 }
136
137 /* try to register media mgr if any device have none configured */
138 list_for_each_entry(dev, &nvm_devices, devices) {
139 if (dev->mt)
140 continue;
141
142 dev->mt = nvm_init_mgr(dev);
143 }
144finish:
145 up_write(&nvm_lock);
146
147 return ret;
148}
149EXPORT_SYMBOL(nvm_register_mgr);
150
151void nvm_unregister_mgr(struct nvmm_type *mt)
152{
153 if (!mt)
154 return;
155
156 down_write(&nvm_lock);
157 list_del(&mt->list);
158 up_write(&nvm_lock);
159}
160EXPORT_SYMBOL(nvm_unregister_mgr);
161
162static struct nvm_dev *nvm_find_nvm_dev(const char *name)
163{
164 struct nvm_dev *dev;
165
166 list_for_each_entry(dev, &nvm_devices, devices)
167 if (!strcmp(name, dev->name))
168 return dev;
169
170 return NULL;
171}
172
173struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *dev, struct nvm_lun *lun,
174 unsigned long flags)
175{
176 return dev->mt->get_blk_unlocked(dev, lun, flags);
177}
178EXPORT_SYMBOL(nvm_get_blk_unlocked);
179
180/* Assumes that all valid pages have already been moved on release to bm */
181void nvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
182{
183 return dev->mt->put_blk_unlocked(dev, blk);
184}
185EXPORT_SYMBOL(nvm_put_blk_unlocked);
186
187struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
188 unsigned long flags)
189{
190 return dev->mt->get_blk(dev, lun, flags);
191}
192EXPORT_SYMBOL(nvm_get_blk);
193
194/* Assumes that all valid pages have already been moved on release to bm */
195void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
196{
197 return dev->mt->put_blk(dev, blk);
198}
199EXPORT_SYMBOL(nvm_put_blk);
200
201int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
202{
203 return dev->mt->submit_io(dev, rqd);
204}
205EXPORT_SYMBOL(nvm_submit_io);
206
207int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
208{
209 return dev->mt->erase_blk(dev, blk, 0);
210}
211EXPORT_SYMBOL(nvm_erase_blk);
212
213void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
214{
215 int i;
216
217 if (rqd->nr_pages > 1) {
218 for (i = 0; i < rqd->nr_pages; i++)
219 rqd->ppa_list[i] = dev_to_generic_addr(dev,
220 rqd->ppa_list[i]);
221 } else {
222 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
223 }
224}
225EXPORT_SYMBOL(nvm_addr_to_generic_mode);
226
227void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
228{
229 int i;
230
231 if (rqd->nr_pages > 1) {
232 for (i = 0; i < rqd->nr_pages; i++)
233 rqd->ppa_list[i] = generic_to_dev_addr(dev,
234 rqd->ppa_list[i]);
235 } else {
236 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
237 }
238}
239EXPORT_SYMBOL(nvm_generic_to_addr_mode);
240
241int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
242 struct ppa_addr *ppas, int nr_ppas)
243{
244 int i, plane_cnt, pl_idx;
245
246 if (dev->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
247 rqd->nr_pages = 1;
248 rqd->ppa_addr = ppas[0];
249
250 return 0;
251 }
252
253 plane_cnt = dev->plane_mode;
254 rqd->nr_pages = plane_cnt * nr_ppas;
255
256 if (dev->ops->max_phys_sect < rqd->nr_pages)
257 return -EINVAL;
258
259 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
260 if (!rqd->ppa_list) {
261 pr_err("nvm: failed to allocate dma memory\n");
262 return -ENOMEM;
263 }
264
265 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
266 for (i = 0; i < nr_ppas; i++) {
267 ppas[i].g.pl = pl_idx;
268 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
269 }
270 }
271
272 return 0;
273}
274EXPORT_SYMBOL(nvm_set_rqd_ppalist);
275
276void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
277{
278 if (!rqd->ppa_list)
279 return;
280
281 nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
282}
283EXPORT_SYMBOL(nvm_free_rqd_ppalist);
284
285int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
286{
287 struct nvm_rq rqd;
288 int ret;
289
290 if (!dev->ops->erase_block)
291 return 0;
292
293 memset(&rqd, 0, sizeof(struct nvm_rq));
294
295 ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas);
296 if (ret)
297 return ret;
298
299 nvm_generic_to_addr_mode(dev, &rqd);
300
301 ret = dev->ops->erase_block(dev, &rqd);
302
303 nvm_free_rqd_ppalist(dev, &rqd);
304
305 return ret;
306}
307EXPORT_SYMBOL(nvm_erase_ppa);
308
309void nvm_end_io(struct nvm_rq *rqd, int error)
310{
311 rqd->error = error;
312 rqd->end_io(rqd);
313}
314EXPORT_SYMBOL(nvm_end_io);
315
316static void nvm_end_io_sync(struct nvm_rq *rqd)
317{
318 struct completion *waiting = rqd->wait;
319
320 rqd->wait = NULL;
321
322 complete(waiting);
323}
324
325int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
326 int opcode, int flags, void *buf, int len)
327{
328 DECLARE_COMPLETION_ONSTACK(wait);
329 struct nvm_rq rqd;
330 struct bio *bio;
331 int ret;
332 unsigned long hang_check;
333
334 bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
335 if (IS_ERR_OR_NULL(bio))
336 return -ENOMEM;
337
338 memset(&rqd, 0, sizeof(struct nvm_rq));
339 ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas);
340 if (ret) {
341 bio_put(bio);
342 return ret;
343 }
344
345 rqd.opcode = opcode;
346 rqd.bio = bio;
347 rqd.wait = &wait;
348 rqd.dev = dev;
349 rqd.end_io = nvm_end_io_sync;
350 rqd.flags = flags;
351 nvm_generic_to_addr_mode(dev, &rqd);
352
353 ret = dev->ops->submit_io(dev, &rqd);
354
355 /* Prevent hang_check timer from firing at us during very long I/O */
356 hang_check = sysctl_hung_task_timeout_secs;
357 if (hang_check)
358 while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
359 else
360 wait_for_completion_io(&wait);
361
362 nvm_free_rqd_ppalist(dev, &rqd);
363
364 return rqd.error;
365}
366EXPORT_SYMBOL(nvm_submit_ppa);
367
368static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
369{
370 int i;
371
372 dev->lps_per_blk = dev->pgs_per_blk;
373 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
374 if (!dev->lptbl)
375 return -ENOMEM;
376
377 /* Just a linear array */
378 for (i = 0; i < dev->lps_per_blk; i++)
379 dev->lptbl[i] = i;
380
381 return 0;
382}
383
384static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
385{
386 int i, p;
387 struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
388
389 if (!mlc->num_pairs)
390 return 0;
391
392 dev->lps_per_blk = mlc->num_pairs;
393 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
394 if (!dev->lptbl)
395 return -ENOMEM;
396
397 /* The lower page table encoding consists of a list of bytes, where each
398 * has a lower and an upper half. The first half byte maintains the
399 * increment value and every value after is an offset added to the
400 * previous incrementation value */
401 dev->lptbl[0] = mlc->pairs[0] & 0xF;
402 for (i = 1; i < dev->lps_per_blk; i++) {
403 p = mlc->pairs[i >> 1];
404 if (i & 0x1) /* upper */
405 dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
406 else /* lower */
407 dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
408 }
409
410 return 0;
411}
412
413static int nvm_core_init(struct nvm_dev *dev)
414{
415 struct nvm_id *id = &dev->identity;
416 struct nvm_id_group *grp = &id->groups[0];
417
418 /* device values */
419 dev->nr_chnls = grp->num_ch;
420 dev->luns_per_chnl = grp->num_lun;
421 dev->pgs_per_blk = grp->num_pg;
422 dev->blks_per_lun = grp->num_blk;
423 dev->nr_planes = grp->num_pln;
424 dev->sec_size = grp->csecs;
425 dev->oob_size = grp->sos;
426 dev->sec_per_pg = grp->fpg_sz / grp->csecs;
427 dev->mccap = grp->mccap;
428 memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
429
430 dev->plane_mode = NVM_PLANE_SINGLE;
431 dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
432
433 if (grp->mtype != 0) {
434 pr_err("nvm: memory type not supported\n");
435 return -EINVAL;
436 }
437
438 switch (grp->fmtype) {
439 case NVM_ID_FMTYPE_SLC:
440 if (nvm_init_slc_tbl(dev, grp))
441 return -ENOMEM;
442 break;
443 case NVM_ID_FMTYPE_MLC:
444 if (nvm_init_mlc_tbl(dev, grp))
445 return -ENOMEM;
446 break;
447 default:
448 pr_err("nvm: flash type not supported\n");
449 return -EINVAL;
450 }
451
452 if (!dev->lps_per_blk)
453 pr_info("nvm: lower page programming table missing\n");
454
455 if (grp->mpos & 0x020202)
456 dev->plane_mode = NVM_PLANE_DOUBLE;
457 if (grp->mpos & 0x040404)
458 dev->plane_mode = NVM_PLANE_QUAD;
459
460 /* calculated values */
461 dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
462 dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
463 dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
464 dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
465
466 dev->total_secs = dev->nr_luns * dev->sec_per_lun;
467 dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
468 sizeof(unsigned long), GFP_KERNEL);
469 if (!dev->lun_map)
470 return -ENOMEM;
471 INIT_LIST_HEAD(&dev->online_targets);
472 mutex_init(&dev->mlock);
473 spin_lock_init(&dev->lock);
474
475 return 0;
476}
477
478static void nvm_free(struct nvm_dev *dev)
479{
480 if (!dev)
481 return;
482
483 if (dev->mt)
484 dev->mt->unregister_mgr(dev);
485
486 kfree(dev->lptbl);
487}
488
489static int nvm_init(struct nvm_dev *dev)
490{
491 int ret = -EINVAL;
492
493 if (!dev->q || !dev->ops)
494 return ret;
495
496 if (dev->ops->identity(dev, &dev->identity)) {
497 pr_err("nvm: device could not be identified\n");
498 goto err;
499 }
500
501 pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
502 dev->identity.ver_id, dev->identity.vmnt,
503 dev->identity.cgrps);
504
505 if (dev->identity.ver_id != 1) {
506 pr_err("nvm: device not supported by kernel.");
507 goto err;
508 }
509
510 if (dev->identity.cgrps != 1) {
511 pr_err("nvm: only one group configuration supported.");
512 goto err;
513 }
514
515 ret = nvm_core_init(dev);
516 if (ret) {
517 pr_err("nvm: could not initialize core structures.\n");
518 goto err;
519 }
520
521 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
522 dev->name, dev->sec_per_pg, dev->nr_planes,
523 dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
524 dev->nr_chnls);
525 return 0;
526err:
527 pr_err("nvm: failed to initialize nvm\n");
528 return ret;
529}
530
531static void nvm_exit(struct nvm_dev *dev)
532{
533 if (dev->ppalist_pool)
534 dev->ops->destroy_dma_pool(dev->ppalist_pool);
535 nvm_free(dev);
536
537 pr_info("nvm: successfully unloaded\n");
538}
539
540int nvm_register(struct request_queue *q, char *disk_name,
541 struct nvm_dev_ops *ops)
542{
543 struct nvm_dev *dev;
544 int ret;
545
546 if (!ops->identity)
547 return -EINVAL;
548
549 dev = kzalloc(sizeof(struct nvm_dev), GFP_KERNEL);
550 if (!dev)
551 return -ENOMEM;
552
553 dev->q = q;
554 dev->ops = ops;
555 strncpy(dev->name, disk_name, DISK_NAME_LEN);
556
557 ret = nvm_init(dev);
558 if (ret)
559 goto err_init;
560
561 if (dev->ops->max_phys_sect > 256) {
562 pr_info("nvm: max sectors supported is 256.\n");
563 ret = -EINVAL;
564 goto err_init;
565 }
566
567 if (dev->ops->max_phys_sect > 1) {
568 dev->ppalist_pool = dev->ops->create_dma_pool(dev, "ppalist");
569 if (!dev->ppalist_pool) {
570 pr_err("nvm: could not create ppa pool\n");
571 ret = -ENOMEM;
572 goto err_init;
573 }
574 }
575
576 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
577 ret = nvm_get_sysblock(dev, &dev->sb);
578 if (!ret)
579 pr_err("nvm: device not initialized.\n");
580 else if (ret < 0)
581 pr_err("nvm: err (%d) on device initialization\n", ret);
582 }
583
584 /* register device with a supported media manager */
585 down_write(&nvm_lock);
586 if (ret > 0)
587 dev->mt = nvm_init_mgr(dev);
588 list_add(&dev->devices, &nvm_devices);
589 up_write(&nvm_lock);
590
591 return 0;
592err_init:
593 kfree(dev->lun_map);
594 kfree(dev);
595 return ret;
596}
597EXPORT_SYMBOL(nvm_register);
598
599void nvm_unregister(char *disk_name)
600{
601 struct nvm_dev *dev;
602
603 down_write(&nvm_lock);
604 dev = nvm_find_nvm_dev(disk_name);
605 if (!dev) {
606 pr_err("nvm: could not find device %s to unregister\n",
607 disk_name);
608 up_write(&nvm_lock);
609 return;
610 }
611
612 list_del(&dev->devices);
613 up_write(&nvm_lock);
614
615 nvm_exit(dev);
616 kfree(dev->lun_map);
617 kfree(dev);
618}
619EXPORT_SYMBOL(nvm_unregister);
620
621static const struct block_device_operations nvm_fops = {
622 .owner = THIS_MODULE,
623};
624
625static int nvm_create_target(struct nvm_dev *dev,
626 struct nvm_ioctl_create *create)
627{
628 struct nvm_ioctl_create_simple *s = &create->conf.s;
629 struct request_queue *tqueue;
630 struct gendisk *tdisk;
631 struct nvm_tgt_type *tt;
632 struct nvm_target *t;
633 void *targetdata;
634
635 if (!dev->mt) {
636 pr_info("nvm: device has no media manager registered.\n");
637 return -ENODEV;
638 }
639
640 down_write(&nvm_lock);
641 tt = nvm_find_target_type(create->tgttype);
642 if (!tt) {
643 pr_err("nvm: target type %s not found\n", create->tgttype);
644 up_write(&nvm_lock);
645 return -EINVAL;
646 }
647
648 list_for_each_entry(t, &dev->online_targets, list) {
649 if (!strcmp(create->tgtname, t->disk->disk_name)) {
650 pr_err("nvm: target name already exists.\n");
651 up_write(&nvm_lock);
652 return -EINVAL;
653 }
654 }
655 up_write(&nvm_lock);
656
657 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
658 if (!t)
659 return -ENOMEM;
660
661 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
662 if (!tqueue)
663 goto err_t;
664 blk_queue_make_request(tqueue, tt->make_rq);
665
666 tdisk = alloc_disk(0);
667 if (!tdisk)
668 goto err_queue;
669
670 sprintf(tdisk->disk_name, "%s", create->tgtname);
671 tdisk->flags = GENHD_FL_EXT_DEVT;
672 tdisk->major = 0;
673 tdisk->first_minor = 0;
674 tdisk->fops = &nvm_fops;
675 tdisk->queue = tqueue;
676
677 targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
678 if (IS_ERR(targetdata))
679 goto err_init;
680
681 tdisk->private_data = targetdata;
682 tqueue->queuedata = targetdata;
683
684 blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
685
686 set_capacity(tdisk, tt->capacity(targetdata));
687 add_disk(tdisk);
688
689 t->type = tt;
690 t->disk = tdisk;
691
692 down_write(&nvm_lock);
693 list_add_tail(&t->list, &dev->online_targets);
694 up_write(&nvm_lock);
695
696 return 0;
697err_init:
698 put_disk(tdisk);
699err_queue:
700 blk_cleanup_queue(tqueue);
701err_t:
702 kfree(t);
703 return -ENOMEM;
704}
705
706static void nvm_remove_target(struct nvm_target *t)
707{
708 struct nvm_tgt_type *tt = t->type;
709 struct gendisk *tdisk = t->disk;
710 struct request_queue *q = tdisk->queue;
711
712 lockdep_assert_held(&nvm_lock);
713
714 del_gendisk(tdisk);
715 blk_cleanup_queue(q);
716
717 if (tt->exit)
718 tt->exit(tdisk->private_data);
719
720 put_disk(tdisk);
721
722 list_del(&t->list);
723 kfree(t);
724}
725
726static int __nvm_configure_create(struct nvm_ioctl_create *create)
727{
728 struct nvm_dev *dev;
729 struct nvm_ioctl_create_simple *s;
730
731 down_write(&nvm_lock);
732 dev = nvm_find_nvm_dev(create->dev);
733 up_write(&nvm_lock);
734 if (!dev) {
735 pr_err("nvm: device not found\n");
736 return -EINVAL;
737 }
738
739 if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
740 pr_err("nvm: config type not valid\n");
741 return -EINVAL;
742 }
743 s = &create->conf.s;
744
745 if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
746 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
747 s->lun_begin, s->lun_end, dev->nr_luns);
748 return -EINVAL;
749 }
750
751 return nvm_create_target(dev, create);
752}
753
754static int __nvm_configure_remove(struct nvm_ioctl_remove *remove)
755{
756 struct nvm_target *t = NULL;
757 struct nvm_dev *dev;
758 int ret = -1;
759
760 down_write(&nvm_lock);
761 list_for_each_entry(dev, &nvm_devices, devices)
762 list_for_each_entry(t, &dev->online_targets, list) {
763 if (!strcmp(remove->tgtname, t->disk->disk_name)) {
764 nvm_remove_target(t);
765 ret = 0;
766 break;
767 }
768 }
769 up_write(&nvm_lock);
770
771 if (ret) {
772 pr_err("nvm: target \"%s\" doesn't exist.\n", remove->tgtname);
773 return -EINVAL;
774 }
775
776 return 0;
777}
778
779#ifdef CONFIG_NVM_DEBUG
780static int nvm_configure_show(const char *val)
781{
782 struct nvm_dev *dev;
783 char opcode, devname[DISK_NAME_LEN];
784 int ret;
785
786 ret = sscanf(val, "%c %32s", &opcode, devname);
787 if (ret != 2) {
788 pr_err("nvm: invalid command. Use \"opcode devicename\".\n");
789 return -EINVAL;
790 }
791
792 down_write(&nvm_lock);
793 dev = nvm_find_nvm_dev(devname);
794 up_write(&nvm_lock);
795 if (!dev) {
796 pr_err("nvm: device not found\n");
797 return -EINVAL;
798 }
799
800 if (!dev->mt)
801 return 0;
802
803 dev->mt->lun_info_print(dev);
804
805 return 0;
806}
807
808static int nvm_configure_remove(const char *val)
809{
810 struct nvm_ioctl_remove remove;
811 char opcode;
812 int ret;
813
814 ret = sscanf(val, "%c %256s", &opcode, remove.tgtname);
815 if (ret != 2) {
816 pr_err("nvm: invalid command. Use \"d targetname\".\n");
817 return -EINVAL;
818 }
819
820 remove.flags = 0;
821
822 return __nvm_configure_remove(&remove);
823}
824
825static int nvm_configure_create(const char *val)
826{
827 struct nvm_ioctl_create create;
828 char opcode;
829 int lun_begin, lun_end, ret;
830
831 ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
832 create.tgtname, create.tgttype,
833 &lun_begin, &lun_end);
834 if (ret != 6) {
835 pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
836 return -EINVAL;
837 }
838
839 create.flags = 0;
840 create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
841 create.conf.s.lun_begin = lun_begin;
842 create.conf.s.lun_end = lun_end;
843
844 return __nvm_configure_create(&create);
845}
846
847
848/* Exposes administrative interface through /sys/module/lnvm/configure_by_str */
849static int nvm_configure_by_str_event(const char *val,
850 const struct kernel_param *kp)
851{
852 char opcode;
853 int ret;
854
855 ret = sscanf(val, "%c", &opcode);
856 if (ret != 1) {
857 pr_err("nvm: string must have the format of \"cmd ...\"\n");
858 return -EINVAL;
859 }
860
861 switch (opcode) {
862 case 'a':
863 return nvm_configure_create(val);
864 case 'd':
865 return nvm_configure_remove(val);
866 case 's':
867 return nvm_configure_show(val);
868 default:
869 pr_err("nvm: invalid command\n");
870 return -EINVAL;
871 }
872
873 return 0;
874}
875
876static int nvm_configure_get(char *buf, const struct kernel_param *kp)
877{
878 int sz;
879 struct nvm_dev *dev;
880
881 sz = sprintf(buf, "available devices:\n");
882 down_write(&nvm_lock);
883 list_for_each_entry(dev, &nvm_devices, devices) {
884 if (sz > 4095 - DISK_NAME_LEN - 2)
885 break;
886 sz += sprintf(buf + sz, " %32s\n", dev->name);
887 }
888 up_write(&nvm_lock);
889
890 return sz;
891}
892
893static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = {
894 .set = nvm_configure_by_str_event,
895 .get = nvm_configure_get,
896};
897
898#undef MODULE_PARAM_PREFIX
899#define MODULE_PARAM_PREFIX "lnvm."
900
901module_param_cb(configure_debug, &nvm_configure_by_str_event_param_ops, NULL,
902 0644);
903
904#endif /* CONFIG_NVM_DEBUG */
905
906static long nvm_ioctl_info(struct file *file, void __user *arg)
907{
908 struct nvm_ioctl_info *info;
909 struct nvm_tgt_type *tt;
910 int tgt_iter = 0;
911
912 if (!capable(CAP_SYS_ADMIN))
913 return -EPERM;
914
915 info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
916 if (IS_ERR(info))
917 return -EFAULT;
918
919 info->version[0] = NVM_VERSION_MAJOR;
920 info->version[1] = NVM_VERSION_MINOR;
921 info->version[2] = NVM_VERSION_PATCH;
922
923 down_write(&nvm_lock);
924 list_for_each_entry(tt, &nvm_targets, list) {
925 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
926
927 tgt->version[0] = tt->version[0];
928 tgt->version[1] = tt->version[1];
929 tgt->version[2] = tt->version[2];
930 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
931
932 tgt_iter++;
933 }
934
935 info->tgtsize = tgt_iter;
936 up_write(&nvm_lock);
937
938 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
939 kfree(info);
940 return -EFAULT;
941 }
942
943 kfree(info);
944 return 0;
945}
946
947static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
948{
949 struct nvm_ioctl_get_devices *devices;
950 struct nvm_dev *dev;
951 int i = 0;
952
953 if (!capable(CAP_SYS_ADMIN))
954 return -EPERM;
955
956 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
957 if (!devices)
958 return -ENOMEM;
959
960 down_write(&nvm_lock);
961 list_for_each_entry(dev, &nvm_devices, devices) {
962 struct nvm_ioctl_device_info *info = &devices->info[i];
963
964 sprintf(info->devname, "%s", dev->name);
965 if (dev->mt) {
966 info->bmversion[0] = dev->mt->version[0];
967 info->bmversion[1] = dev->mt->version[1];
968 info->bmversion[2] = dev->mt->version[2];
969 sprintf(info->bmname, "%s", dev->mt->name);
970 } else {
971 sprintf(info->bmname, "none");
972 }
973
974 i++;
975 if (i > 31) {
976 pr_err("nvm: max 31 devices can be reported.\n");
977 break;
978 }
979 }
980 up_write(&nvm_lock);
981
982 devices->nr_devices = i;
983
984 if (copy_to_user(arg, devices,
985 sizeof(struct nvm_ioctl_get_devices))) {
986 kfree(devices);
987 return -EFAULT;
988 }
989
990 kfree(devices);
991 return 0;
992}
993
994static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
995{
996 struct nvm_ioctl_create create;
997
998 if (!capable(CAP_SYS_ADMIN))
999 return -EPERM;
1000
1001 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
1002 return -EFAULT;
1003
1004 create.dev[DISK_NAME_LEN - 1] = '\0';
1005 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1006 create.tgtname[DISK_NAME_LEN - 1] = '\0';
1007
1008 if (create.flags != 0) {
1009 pr_err("nvm: no flags supported\n");
1010 return -EINVAL;
1011 }
1012
1013 return __nvm_configure_create(&create);
1014}
1015
1016static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1017{
1018 struct nvm_ioctl_remove remove;
1019
1020 if (!capable(CAP_SYS_ADMIN))
1021 return -EPERM;
1022
1023 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1024 return -EFAULT;
1025
1026 remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1027
1028 if (remove.flags != 0) {
1029 pr_err("nvm: no flags supported\n");
1030 return -EINVAL;
1031 }
1032
1033 return __nvm_configure_remove(&remove);
1034}
1035
1036static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
1037{
1038 info->seqnr = 1;
1039 info->erase_cnt = 0;
1040 info->version = 1;
1041}
1042
1043static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
1044{
1045 struct nvm_dev *dev;
1046 struct nvm_sb_info info;
1047 int ret;
1048
1049 down_write(&nvm_lock);
1050 dev = nvm_find_nvm_dev(init->dev);
1051 up_write(&nvm_lock);
1052 if (!dev) {
1053 pr_err("nvm: device not found\n");
1054 return -EINVAL;
1055 }
1056
1057 nvm_setup_nvm_sb_info(&info);
1058
1059 strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
1060 info.fs_ppa.ppa = -1;
1061
1062 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
1063 ret = nvm_init_sysblock(dev, &info);
1064 if (ret)
1065 return ret;
1066 }
1067
1068 memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
1069
1070 down_write(&nvm_lock);
1071 dev->mt = nvm_init_mgr(dev);
1072 up_write(&nvm_lock);
1073
1074 return 0;
1075}
1076
1077static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1078{
1079 struct nvm_ioctl_dev_init init;
1080
1081 if (!capable(CAP_SYS_ADMIN))
1082 return -EPERM;
1083
1084 if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1085 return -EFAULT;
1086
1087 if (init.flags != 0) {
1088 pr_err("nvm: no flags supported\n");
1089 return -EINVAL;
1090 }
1091
1092 init.dev[DISK_NAME_LEN - 1] = '\0';
1093
1094 return __nvm_ioctl_dev_init(&init);
1095}
1096
1097static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1098{
1099 struct nvm_ioctl_dev_factory fact;
1100 struct nvm_dev *dev;
1101
1102 if (!capable(CAP_SYS_ADMIN))
1103 return -EPERM;
1104
1105 if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1106 return -EFAULT;
1107
1108 fact.dev[DISK_NAME_LEN - 1] = '\0';
1109
1110 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1111 return -EINVAL;
1112
1113 down_write(&nvm_lock);
1114 dev = nvm_find_nvm_dev(fact.dev);
1115 up_write(&nvm_lock);
1116 if (!dev) {
1117 pr_err("nvm: device not found\n");
1118 return -EINVAL;
1119 }
1120
1121 if (dev->mt) {
1122 dev->mt->unregister_mgr(dev);
1123 dev->mt = NULL;
1124 }
1125
1126 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
1127 return nvm_dev_factory(dev, fact.flags);
1128
1129 return 0;
1130}
1131
1132static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1133{
1134 void __user *argp = (void __user *)arg;
1135
1136 switch (cmd) {
1137 case NVM_INFO:
1138 return nvm_ioctl_info(file, argp);
1139 case NVM_GET_DEVICES:
1140 return nvm_ioctl_get_devices(file, argp);
1141 case NVM_DEV_CREATE:
1142 return nvm_ioctl_dev_create(file, argp);
1143 case NVM_DEV_REMOVE:
1144 return nvm_ioctl_dev_remove(file, argp);
1145 case NVM_DEV_INIT:
1146 return nvm_ioctl_dev_init(file, argp);
1147 case NVM_DEV_FACTORY:
1148 return nvm_ioctl_dev_factory(file, argp);
1149 }
1150 return 0;
1151}
1152
1153static const struct file_operations _ctl_fops = {
1154 .open = nonseekable_open,
1155 .unlocked_ioctl = nvm_ctl_ioctl,
1156 .owner = THIS_MODULE,
1157 .llseek = noop_llseek,
1158};
1159
1160static struct miscdevice _nvm_misc = {
1161 .minor = MISC_DYNAMIC_MINOR,
1162 .name = "lightnvm",
1163 .nodename = "lightnvm/control",
1164 .fops = &_ctl_fops,
1165};
1166
1167MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
1168
1169static int __init nvm_mod_init(void)
1170{
1171 int ret;
1172
1173 ret = misc_register(&_nvm_misc);
1174 if (ret)
1175 pr_err("nvm: misc_register failed for control device");
1176
1177 return ret;
1178}
1179
1180static void __exit nvm_mod_exit(void)
1181{
1182 misc_deregister(&_nvm_misc);
1183}
1184
1185MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
1186MODULE_LICENSE("GPL v2");
1187MODULE_VERSION("0.1");
1188module_init(nvm_mod_init);
1189module_exit(nvm_mod_exit);