Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef NVM_H
  3#define NVM_H
  4
  5#include <linux/blkdev.h>
  6#include <linux/types.h>
  7#include <uapi/linux/lightnvm.h>
  8
  9enum {
 10	NVM_IO_OK = 0,
 11	NVM_IO_REQUEUE = 1,
 12	NVM_IO_DONE = 2,
 13	NVM_IO_ERR = 3,
 14
 15	NVM_IOTYPE_NONE = 0,
 16	NVM_IOTYPE_GC = 1,
 17};
 18
 19/* common format */
 20#define NVM_GEN_CH_BITS  (8)
 21#define NVM_GEN_LUN_BITS (8)
 22#define NVM_GEN_BLK_BITS (16)
 23#define NVM_GEN_RESERVED (32)
 24
 25/* 1.2 format */
 26#define NVM_12_PG_BITS  (16)
 27#define NVM_12_PL_BITS  (4)
 28#define NVM_12_SEC_BITS (4)
 29#define NVM_12_RESERVED (8)
 30
 31/* 2.0 format */
 32#define NVM_20_SEC_BITS (24)
 33#define NVM_20_RESERVED (8)
 34
 35enum {
 36	NVM_OCSSD_SPEC_12 = 12,
 37	NVM_OCSSD_SPEC_20 = 20,
 38};
 39
 40struct ppa_addr {
 41	/* Generic structure for all addresses */
 42	union {
 43		/* generic device format */
 44		struct {
 45			u64 ch		: NVM_GEN_CH_BITS;
 46			u64 lun		: NVM_GEN_LUN_BITS;
 47			u64 blk		: NVM_GEN_BLK_BITS;
 48			u64 reserved	: NVM_GEN_RESERVED;
 49		} a;
 50
 51		/* 1.2 device format */
 52		struct {
 53			u64 ch		: NVM_GEN_CH_BITS;
 54			u64 lun		: NVM_GEN_LUN_BITS;
 55			u64 blk		: NVM_GEN_BLK_BITS;
 56			u64 pg		: NVM_12_PG_BITS;
 57			u64 pl		: NVM_12_PL_BITS;
 58			u64 sec		: NVM_12_SEC_BITS;
 59			u64 reserved	: NVM_12_RESERVED;
 60		} g;
 61
 62		/* 2.0 device format */
 63		struct {
 64			u64 grp		: NVM_GEN_CH_BITS;
 65			u64 pu		: NVM_GEN_LUN_BITS;
 66			u64 chk		: NVM_GEN_BLK_BITS;
 67			u64 sec		: NVM_20_SEC_BITS;
 68			u64 reserved	: NVM_20_RESERVED;
 69		} m;
 70
 71		struct {
 72			u64 line	: 63;
 73			u64 is_cached	: 1;
 74		} c;
 75
 76		u64 ppa;
 77	};
 78};
 79
 80struct nvm_rq;
 81struct nvm_id;
 82struct nvm_dev;
 83struct nvm_tgt_dev;
 84struct nvm_chk_meta;
 85
 86typedef int (nvm_id_fn)(struct nvm_dev *);
 87typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
 88typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
 89typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, struct nvm_chk_meta *,
 90								sector_t, int);
 91typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
 92typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *);
 93typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
 94typedef void (nvm_destroy_dma_pool_fn)(void *);
 95typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
 96								dma_addr_t *);
 97typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
 98
 99struct nvm_dev_ops {
100	nvm_id_fn		*identity;
101	nvm_op_bb_tbl_fn	*get_bb_tbl;
102	nvm_op_set_bb_fn	*set_bb_tbl;
103
104	nvm_get_chk_meta_fn	*get_chk_meta;
105
106	nvm_submit_io_fn	*submit_io;
107	nvm_submit_io_sync_fn	*submit_io_sync;
108
109	nvm_create_dma_pool_fn	*create_dma_pool;
110	nvm_destroy_dma_pool_fn	*destroy_dma_pool;
111	nvm_dev_dma_alloc_fn	*dev_dma_alloc;
112	nvm_dev_dma_free_fn	*dev_dma_free;
113};
114
115#ifdef CONFIG_NVM
116
117#include <linux/blkdev.h>
118#include <linux/file.h>
119#include <linux/dmapool.h>
120#include <uapi/linux/lightnvm.h>
121
122enum {
123	/* HW Responsibilities */
124	NVM_RSP_L2P	= 1 << 0,
125	NVM_RSP_ECC	= 1 << 1,
126
127	/* Physical Adressing Mode */
128	NVM_ADDRMODE_LINEAR	= 0,
129	NVM_ADDRMODE_CHANNEL	= 1,
130
131	/* Plane programming mode for LUN */
132	NVM_PLANE_SINGLE	= 1,
133	NVM_PLANE_DOUBLE	= 2,
134	NVM_PLANE_QUAD		= 4,
135
136	/* Status codes */
137	NVM_RSP_SUCCESS		= 0x0,
138	NVM_RSP_NOT_CHANGEABLE	= 0x1,
139	NVM_RSP_ERR_FAILWRITE	= 0x40ff,
140	NVM_RSP_ERR_EMPTYPAGE	= 0x42ff,
141	NVM_RSP_ERR_FAILECC	= 0x4281,
142	NVM_RSP_ERR_FAILCRC	= 0x4004,
143	NVM_RSP_WARN_HIGHECC	= 0x4700,
144
145	/* Device opcodes */
146	NVM_OP_PWRITE		= 0x91,
147	NVM_OP_PREAD		= 0x92,
148	NVM_OP_ERASE		= 0x90,
149
150	/* PPA Command Flags */
151	NVM_IO_SNGL_ACCESS	= 0x0,
152	NVM_IO_DUAL_ACCESS	= 0x1,
153	NVM_IO_QUAD_ACCESS	= 0x2,
154
155	/* NAND Access Modes */
156	NVM_IO_SUSPEND		= 0x80,
157	NVM_IO_SLC_MODE		= 0x100,
158	NVM_IO_SCRAMBLE_ENABLE	= 0x200,
159
160	/* Block Types */
161	NVM_BLK_T_FREE		= 0x0,
162	NVM_BLK_T_BAD		= 0x1,
163	NVM_BLK_T_GRWN_BAD	= 0x2,
164	NVM_BLK_T_DEV		= 0x4,
165	NVM_BLK_T_HOST		= 0x8,
166
167	/* Memory capabilities */
168	NVM_ID_CAP_SLC		= 0x1,
169	NVM_ID_CAP_CMD_SUSPEND	= 0x2,
170	NVM_ID_CAP_SCRAMBLE	= 0x4,
171	NVM_ID_CAP_ENCRYPT	= 0x8,
172
173	/* Memory types */
174	NVM_ID_FMTYPE_SLC	= 0,
175	NVM_ID_FMTYPE_MLC	= 1,
176
177	/* Device capabilities */
178	NVM_ID_DCAP_BBLKMGMT	= 0x1,
179	NVM_UD_DCAP_ECC		= 0x2,
180};
181
182struct nvm_id_lp_mlc {
183	u16	num_pairs;
184	u8	pairs[886];
185};
186
187struct nvm_id_lp_tbl {
188	__u8	id[8];
189	struct nvm_id_lp_mlc mlc;
190};
191
192struct nvm_addrf_12 {
193	u8	ch_len;
194	u8	lun_len;
195	u8	blk_len;
196	u8	pg_len;
197	u8	pln_len;
198	u8	sec_len;
199
200	u8	ch_offset;
201	u8	lun_offset;
202	u8	blk_offset;
203	u8	pg_offset;
204	u8	pln_offset;
205	u8	sec_offset;
206
207	u64	ch_mask;
208	u64	lun_mask;
209	u64	blk_mask;
210	u64	pg_mask;
211	u64	pln_mask;
212	u64	sec_mask;
213};
214
215struct nvm_addrf {
216	u8	ch_len;
217	u8	lun_len;
218	u8	chk_len;
219	u8	sec_len;
220	u8	rsv_len[2];
221
222	u8	ch_offset;
223	u8	lun_offset;
224	u8	chk_offset;
225	u8	sec_offset;
226	u8	rsv_off[2];
227
228	u64	ch_mask;
229	u64	lun_mask;
230	u64	chk_mask;
231	u64	sec_mask;
232	u64	rsv_mask[2];
233};
234
235enum {
236	/* Chunk states */
237	NVM_CHK_ST_FREE =	1 << 0,
238	NVM_CHK_ST_CLOSED =	1 << 1,
239	NVM_CHK_ST_OPEN =	1 << 2,
240	NVM_CHK_ST_OFFLINE =	1 << 3,
241
242	/* Chunk types */
243	NVM_CHK_TP_W_SEQ =	1 << 0,
244	NVM_CHK_TP_W_RAN =	1 << 1,
245	NVM_CHK_TP_SZ_SPEC =	1 << 4,
246};
247
248/*
249 * Note: The structure size is linked to nvme_nvm_chk_meta such that the same
250 * buffer can be used when converting from little endian to cpu addressing.
251 */
252struct nvm_chk_meta {
253	u8	state;
254	u8	type;
255	u8	wi;
256	u8	rsvd[5];
257	u64	slba;
258	u64	cnlb;
259	u64	wp;
260};
261
262struct nvm_target {
263	struct list_head list;
264	struct nvm_tgt_dev *dev;
265	struct nvm_tgt_type *type;
266	struct gendisk *disk;
267};
268
269#define ADDR_EMPTY (~0ULL)
270
271#define NVM_TARGET_DEFAULT_OP (101)
272#define NVM_TARGET_MIN_OP (3)
273#define NVM_TARGET_MAX_OP (80)
274
275#define NVM_VERSION_MAJOR 1
276#define NVM_VERSION_MINOR 0
277#define NVM_VERSION_PATCH 0
278
279#define NVM_MAX_VLBA (64) /* max logical blocks in a vector command */
280
281struct nvm_rq;
282typedef void (nvm_end_io_fn)(struct nvm_rq *);
283
284struct nvm_rq {
285	struct nvm_tgt_dev *dev;
286
287	struct bio *bio;
288
289	union {
290		struct ppa_addr ppa_addr;
291		dma_addr_t dma_ppa_list;
292	};
293
294	struct ppa_addr *ppa_list;
295
296	void *meta_list;
297	dma_addr_t dma_meta_list;
298
299	nvm_end_io_fn *end_io;
300
301	uint8_t opcode;
302	uint16_t nr_ppas;
303	uint16_t flags;
304
305	u64 ppa_status; /* ppa media status */
306	int error;
307
308	void *private;
309};
310
311static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
312{
313	return pdu - sizeof(struct nvm_rq);
314}
315
316static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
317{
318	return rqdata + 1;
319}
320
321enum {
322	NVM_BLK_ST_FREE =	0x1,	/* Free block */
323	NVM_BLK_ST_TGT =	0x2,	/* Block in use by target */
324	NVM_BLK_ST_BAD =	0x8,	/* Bad block */
325};
326
327/* Instance geometry */
328struct nvm_geo {
329	/* device reported version */
330	u8	major_ver_id;
331	u8	minor_ver_id;
332
333	/* kernel short version */
334	u8	version;
335
336	/* instance specific geometry */
337	int num_ch;
338	int num_lun;		/* per channel */
339
340	/* calculated values */
341	int all_luns;		/* across channels */
342	int all_chunks;		/* across channels */
343
344	int op;			/* over-provision in instance */
345
346	sector_t total_secs;	/* across channels */
347
348	/* chunk geometry */
349	u32	num_chk;	/* chunks per lun */
350	u32	clba;		/* sectors per chunk */
351	u16	csecs;		/* sector size */
352	u16	sos;		/* out-of-band area size */
353
354	/* device write constrains */
355	u32	ws_min;		/* minimum write size */
356	u32	ws_opt;		/* optimal write size */
357	u32	mw_cunits;	/* distance required for successful read */
358	u32	maxoc;		/* maximum open chunks */
359	u32	maxocpu;	/* maximum open chunks per parallel unit */
360
361	/* device capabilities */
362	u32	mccap;
363
364	/* device timings */
365	u32	trdt;		/* Avg. Tread (ns) */
366	u32	trdm;		/* Max Tread (ns) */
367	u32	tprt;		/* Avg. Tprog (ns) */
368	u32	tprm;		/* Max Tprog (ns) */
369	u32	tbet;		/* Avg. Terase (ns) */
370	u32	tbem;		/* Max Terase (ns) */
371
372	/* generic address format */
373	struct nvm_addrf addrf;
374
375	/* 1.2 compatibility */
376	u8	vmnt;
377	u32	cap;
378	u32	dom;
379
380	u8	mtype;
381	u8	fmtype;
382
383	u16	cpar;
384	u32	mpos;
385
386	u8	num_pln;
387	u8	pln_mode;
388	u16	num_pg;
389	u16	fpg_sz;
390};
391
392/* sub-device structure */
393struct nvm_tgt_dev {
394	/* Device information */
395	struct nvm_geo geo;
396
397	/* Base ppas for target LUNs */
398	struct ppa_addr *luns;
399
400	struct request_queue *q;
401
402	struct nvm_dev *parent;
403	void *map;
404};
405
406struct nvm_dev {
407	struct nvm_dev_ops *ops;
408
409	struct list_head devices;
410
411	/* Device information */
412	struct nvm_geo geo;
413
414	unsigned long *lun_map;
415	void *dma_pool;
416
417	/* Backend device */
418	struct request_queue *q;
419	char name[DISK_NAME_LEN];
420	void *private_data;
421
422	void *rmap;
423
424	struct mutex mlock;
425	spinlock_t lock;
426
427	/* target management */
428	struct list_head area_list;
429	struct list_head targets;
430};
431
432static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
433						  struct ppa_addr r)
434{
435	struct nvm_geo *geo = &dev->geo;
436	struct ppa_addr l;
437
438	if (geo->version == NVM_OCSSD_SPEC_12) {
439		struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
440
441		l.ppa = ((u64)r.g.ch) << ppaf->ch_offset;
442		l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset;
443		l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset;
444		l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset;
445		l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset;
446		l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset;
447	} else {
448		struct nvm_addrf *lbaf = &geo->addrf;
449
450		l.ppa = ((u64)r.m.grp) << lbaf->ch_offset;
451		l.ppa |= ((u64)r.m.pu) << lbaf->lun_offset;
452		l.ppa |= ((u64)r.m.chk) << lbaf->chk_offset;
453		l.ppa |= ((u64)r.m.sec) << lbaf->sec_offset;
454	}
455
456	return l;
457}
458
459static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
460						  struct ppa_addr r)
461{
462	struct nvm_geo *geo = &dev->geo;
463	struct ppa_addr l;
464
465	l.ppa = 0;
466
467	if (geo->version == NVM_OCSSD_SPEC_12) {
468		struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
469
470		l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset;
471		l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset;
472		l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset;
473		l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset;
474		l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset;
475		l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset;
476	} else {
477		struct nvm_addrf *lbaf = &geo->addrf;
478
479		l.m.grp = (r.ppa & lbaf->ch_mask) >> lbaf->ch_offset;
480		l.m.pu = (r.ppa & lbaf->lun_mask) >> lbaf->lun_offset;
481		l.m.chk = (r.ppa & lbaf->chk_mask) >> lbaf->chk_offset;
482		l.m.sec = (r.ppa & lbaf->sec_mask) >> lbaf->sec_offset;
483	}
484
485	return l;
486}
487
488typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
489typedef sector_t (nvm_tgt_capacity_fn)(void *);
490typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
491				int flags);
492typedef void (nvm_tgt_exit_fn)(void *);
493typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
494typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
495
496struct nvm_tgt_type {
497	const char *name;
498	unsigned int version[3];
499
500	/* target entry points */
501	nvm_tgt_make_rq_fn *make_rq;
502	nvm_tgt_capacity_fn *capacity;
503
504	/* module-specific init/teardown */
505	nvm_tgt_init_fn *init;
506	nvm_tgt_exit_fn *exit;
507
508	/* sysfs */
509	nvm_tgt_sysfs_init_fn *sysfs_init;
510	nvm_tgt_sysfs_exit_fn *sysfs_exit;
511
512	/* For internal use */
513	struct list_head list;
514	struct module *owner;
515};
516
517extern int nvm_register_tgt_type(struct nvm_tgt_type *);
518extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
519
520extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
521extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
522
523extern struct nvm_dev *nvm_alloc_dev(int);
524extern int nvm_register(struct nvm_dev *);
525extern void nvm_unregister(struct nvm_dev *);
526
527
528extern int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev,
529			      struct nvm_chk_meta *meta, struct ppa_addr ppa,
530			      int nchks);
531
532extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
533			      int, int);
534extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
535extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *);
536extern void nvm_end_io(struct nvm_rq *);
537extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
538extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *);
539
540#else /* CONFIG_NVM */
541struct nvm_dev_ops;
542
543static inline struct nvm_dev *nvm_alloc_dev(int node)
544{
545	return ERR_PTR(-EINVAL);
546}
547static inline int nvm_register(struct nvm_dev *dev)
548{
549	return -EINVAL;
550}
551static inline void nvm_unregister(struct nvm_dev *dev) {}
552#endif /* CONFIG_NVM */
553#endif /* LIGHTNVM.H */