Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2017 Free Electrons
  4 *
  5 * Authors:
  6 *	Boris Brezillon <boris.brezillon@free-electrons.com>
  7 *	Peter Pan <peterpandong@micron.com>
  8 */
  9
 10#define pr_fmt(fmt)	"nand: " fmt
 11
 12#include <linux/module.h>
 13#include <linux/mtd/nand.h>
 14
 15/**
 16 * nanddev_isbad() - Check if a block is bad
 17 * @nand: NAND device
 18 * @pos: position pointing to the block we want to check
 19 *
 20 * Return: true if the block is bad, false otherwise.
 21 */
 22bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
 23{
 24	if (mtd_check_expert_analysis_mode())
 25		return false;
 26
 27	if (nanddev_bbt_is_initialized(nand)) {
 28		unsigned int entry;
 29		int status;
 30
 31		entry = nanddev_bbt_pos_to_entry(nand, pos);
 32		status = nanddev_bbt_get_block_status(nand, entry);
 33		/* Lazy block status retrieval */
 34		if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
 35			if (nand->ops->isbad(nand, pos))
 36				status = NAND_BBT_BLOCK_FACTORY_BAD;
 37			else
 38				status = NAND_BBT_BLOCK_GOOD;
 39
 40			nanddev_bbt_set_block_status(nand, entry, status);
 41		}
 42
 43		if (status == NAND_BBT_BLOCK_WORN ||
 44		    status == NAND_BBT_BLOCK_FACTORY_BAD)
 45			return true;
 46
 47		return false;
 48	}
 49
 50	return nand->ops->isbad(nand, pos);
 51}
 52EXPORT_SYMBOL_GPL(nanddev_isbad);
 53
 54/**
 55 * nanddev_markbad() - Mark a block as bad
 56 * @nand: NAND device
 57 * @pos: position of the block to mark bad
 58 *
 59 * Mark a block bad. This function is updating the BBT if available and
 60 * calls the low-level markbad hook (nand->ops->markbad()).
 61 *
 62 * Return: 0 in case of success, a negative error code otherwise.
 63 */
 64int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos)
 65{
 66	struct mtd_info *mtd = nanddev_to_mtd(nand);
 67	unsigned int entry;
 68	int ret = 0;
 69
 70	if (nanddev_isbad(nand, pos))
 71		return 0;
 72
 73	ret = nand->ops->markbad(nand, pos);
 74	if (ret)
 75		pr_warn("failed to write BBM to block @%llx (err = %d)\n",
 76			nanddev_pos_to_offs(nand, pos), ret);
 77
 78	if (!nanddev_bbt_is_initialized(nand))
 79		goto out;
 80
 81	entry = nanddev_bbt_pos_to_entry(nand, pos);
 82	ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN);
 83	if (ret)
 84		goto out;
 85
 86	ret = nanddev_bbt_update(nand);
 87
 88out:
 89	if (!ret)
 90		mtd->ecc_stats.badblocks++;
 91
 92	return ret;
 93}
 94EXPORT_SYMBOL_GPL(nanddev_markbad);
 95
 96/**
 97 * nanddev_isreserved() - Check whether an eraseblock is reserved or not
 98 * @nand: NAND device
 99 * @pos: NAND position to test
100 *
101 * Checks whether the eraseblock pointed by @pos is reserved or not.
102 *
103 * Return: true if the eraseblock is reserved, false otherwise.
104 */
105bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos)
106{
107	unsigned int entry;
108	int status;
109
110	if (!nanddev_bbt_is_initialized(nand))
111		return false;
112
113	/* Return info from the table */
114	entry = nanddev_bbt_pos_to_entry(nand, pos);
115	status = nanddev_bbt_get_block_status(nand, entry);
116	return status == NAND_BBT_BLOCK_RESERVED;
117}
118EXPORT_SYMBOL_GPL(nanddev_isreserved);
119
120/**
121 * nanddev_erase() - Erase a NAND portion
122 * @nand: NAND device
123 * @pos: position of the block to erase
124 *
125 * Erases the block if it's not bad.
126 *
127 * Return: 0 in case of success, a negative error code otherwise.
128 */
129static int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos)
130{
131	if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) {
132		pr_warn("attempt to erase a bad/reserved block @%llx\n",
133			nanddev_pos_to_offs(nand, pos));
134		return -EIO;
135	}
136
137	return nand->ops->erase(nand, pos);
138}
 
139
140/**
141 * nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices
142 * @mtd: MTD device
143 * @einfo: erase request
144 *
145 * This is a simple mtd->_erase() implementation iterating over all blocks
146 * concerned by @einfo and calling nand->ops->erase() on each of them.
147 *
148 * Note that mtd->_erase should not be directly assigned to this helper,
149 * because there's no locking here. NAND specialized layers should instead
150 * implement there own wrapper around nanddev_mtd_erase() taking the
151 * appropriate lock before calling nanddev_mtd_erase().
152 *
153 * Return: 0 in case of success, a negative error code otherwise.
154 */
155int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
156{
157	struct nand_device *nand = mtd_to_nanddev(mtd);
158	struct nand_pos pos, last;
159	int ret;
160
161	nanddev_offs_to_pos(nand, einfo->addr, &pos);
162	nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last);
163	while (nanddev_pos_cmp(&pos, &last) <= 0) {
164		ret = nanddev_erase(nand, &pos);
165		if (ret) {
166			einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
167
168			return ret;
169		}
170
171		nanddev_pos_next_eraseblock(nand, &pos);
172	}
173
174	return 0;
175}
176EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
177
178/**
179 * nanddev_mtd_max_bad_blocks() - Get the maximum number of bad eraseblock on
180 *				  a specific region of the NAND device
181 * @mtd: MTD device
182 * @offs: offset of the NAND region
183 * @len: length of the NAND region
184 *
185 * Default implementation for mtd->_max_bad_blocks(). Only works if
186 * nand->memorg.max_bad_eraseblocks_per_lun is > 0.
187 *
188 * Return: a positive number encoding the maximum number of eraseblocks on a
189 * portion of memory, a negative error code otherwise.
190 */
191int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len)
192{
193	struct nand_device *nand = mtd_to_nanddev(mtd);
194	struct nand_pos pos, end;
195	unsigned int max_bb = 0;
196
197	if (!nand->memorg.max_bad_eraseblocks_per_lun)
198		return -ENOTSUPP;
199
200	nanddev_offs_to_pos(nand, offs, &pos);
201	nanddev_offs_to_pos(nand, offs + len, &end);
202
203	for (nanddev_offs_to_pos(nand, offs, &pos);
204	     nanddev_pos_cmp(&pos, &end) < 0;
205	     nanddev_pos_next_lun(nand, &pos))
206		max_bb += nand->memorg.max_bad_eraseblocks_per_lun;
207
208	return max_bb;
209}
210EXPORT_SYMBOL_GPL(nanddev_mtd_max_bad_blocks);
211
212/**
213 * nanddev_get_ecc_engine() - Find and get a suitable ECC engine
214 * @nand: NAND device
215 */
216static int nanddev_get_ecc_engine(struct nand_device *nand)
217{
218	int engine_type;
219
220	/* Read the user desires in terms of ECC engine/configuration */
221	of_get_nand_ecc_user_config(nand);
222
223	engine_type = nand->ecc.user_conf.engine_type;
224	if (engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
225		engine_type = nand->ecc.defaults.engine_type;
226
227	switch (engine_type) {
228	case NAND_ECC_ENGINE_TYPE_NONE:
229		return 0;
230	case NAND_ECC_ENGINE_TYPE_SOFT:
231		nand->ecc.engine = nand_ecc_get_sw_engine(nand);
232		break;
233	case NAND_ECC_ENGINE_TYPE_ON_DIE:
234		nand->ecc.engine = nand_ecc_get_on_die_hw_engine(nand);
235		break;
236	case NAND_ECC_ENGINE_TYPE_ON_HOST:
237		nand->ecc.engine = nand_ecc_get_on_host_hw_engine(nand);
238		if (PTR_ERR(nand->ecc.engine) == -EPROBE_DEFER)
239			return -EPROBE_DEFER;
240		break;
241	default:
242		pr_err("Missing ECC engine type\n");
243	}
244
245	if (!nand->ecc.engine)
246		return  -EINVAL;
247
248	return 0;
249}
250
251/**
252 * nanddev_put_ecc_engine() - Dettach and put the in-use ECC engine
253 * @nand: NAND device
254 */
255static int nanddev_put_ecc_engine(struct nand_device *nand)
256{
257	switch (nand->ecc.ctx.conf.engine_type) {
258	case NAND_ECC_ENGINE_TYPE_ON_HOST:
259		nand_ecc_put_on_host_hw_engine(nand);
260		break;
261	case NAND_ECC_ENGINE_TYPE_NONE:
262	case NAND_ECC_ENGINE_TYPE_SOFT:
263	case NAND_ECC_ENGINE_TYPE_ON_DIE:
264	default:
265		break;
266	}
267
268	return 0;
269}
270
271/**
272 * nanddev_find_ecc_configuration() - Find a suitable ECC configuration
273 * @nand: NAND device
274 */
275static int nanddev_find_ecc_configuration(struct nand_device *nand)
276{
277	int ret;
278
279	if (!nand->ecc.engine)
280		return -ENOTSUPP;
281
282	ret = nand_ecc_init_ctx(nand);
283	if (ret)
284		return ret;
285
286	if (!nand_ecc_is_strong_enough(nand))
287		pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
288			nand->mtd.name);
289
290	return 0;
291}
292
293/**
294 * nanddev_ecc_engine_init() - Initialize an ECC engine for the chip
295 * @nand: NAND device
296 */
297int nanddev_ecc_engine_init(struct nand_device *nand)
298{
299	int ret;
300
301	/* Look for the ECC engine to use */
302	ret = nanddev_get_ecc_engine(nand);
303	if (ret) {
304		if (ret != -EPROBE_DEFER)
305			pr_err("No ECC engine found\n");
306
307		return ret;
308	}
309
310	/* No ECC engine requested */
311	if (!nand->ecc.engine)
312		return 0;
313
314	/* Configure the engine: balance user input and chip requirements */
315	ret = nanddev_find_ecc_configuration(nand);
316	if (ret) {
317		pr_err("No suitable ECC configuration\n");
318		nanddev_put_ecc_engine(nand);
319
320		return ret;
321	}
322
323	return 0;
324}
325EXPORT_SYMBOL_GPL(nanddev_ecc_engine_init);
326
327/**
328 * nanddev_ecc_engine_cleanup() - Cleanup ECC engine initializations
329 * @nand: NAND device
330 */
331void nanddev_ecc_engine_cleanup(struct nand_device *nand)
332{
333	if (nand->ecc.engine)
334		nand_ecc_cleanup_ctx(nand);
335
336	nanddev_put_ecc_engine(nand);
337}
338EXPORT_SYMBOL_GPL(nanddev_ecc_engine_cleanup);
339
340/**
341 * nanddev_init() - Initialize a NAND device
342 * @nand: NAND device
343 * @ops: NAND device operations
344 * @owner: NAND device owner
345 *
346 * Initializes a NAND device object. Consistency checks are done on @ops and
347 * @nand->memorg. Also takes care of initializing the BBT.
348 *
349 * Return: 0 in case of success, a negative error code otherwise.
350 */
351int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
352		 struct module *owner)
353{
354	struct mtd_info *mtd = nanddev_to_mtd(nand);
355	struct nand_memory_organization *memorg = nanddev_get_memorg(nand);
356
357	if (!nand || !ops)
358		return -EINVAL;
359
360	if (!ops->erase || !ops->markbad || !ops->isbad)
361		return -EINVAL;
362
363	if (!memorg->bits_per_cell || !memorg->pagesize ||
364	    !memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun ||
365	    !memorg->planes_per_lun || !memorg->luns_per_target ||
366	    !memorg->ntargets)
367		return -EINVAL;
368
369	nand->rowconv.eraseblock_addr_shift =
370					fls(memorg->pages_per_eraseblock - 1);
371	nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) +
372				       nand->rowconv.eraseblock_addr_shift;
373
374	nand->ops = ops;
375
376	mtd->type = memorg->bits_per_cell == 1 ?
377		    MTD_NANDFLASH : MTD_MLCNANDFLASH;
378	mtd->flags = MTD_CAP_NANDFLASH;
379	mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock;
380	mtd->writesize = memorg->pagesize;
381	mtd->writebufsize = memorg->pagesize;
382	mtd->oobsize = memorg->oobsize;
383	mtd->size = nanddev_size(nand);
384	mtd->owner = owner;
385
386	return nanddev_bbt_init(nand);
387}
388EXPORT_SYMBOL_GPL(nanddev_init);
389
390/**
391 * nanddev_cleanup() - Release resources allocated in nanddev_init()
392 * @nand: NAND device
393 *
394 * Basically undoes what has been done in nanddev_init().
395 */
396void nanddev_cleanup(struct nand_device *nand)
397{
398	if (nanddev_bbt_is_initialized(nand))
399		nanddev_bbt_cleanup(nand);
400}
401EXPORT_SYMBOL_GPL(nanddev_cleanup);
402
403MODULE_DESCRIPTION("Generic NAND framework");
404MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
405MODULE_LICENSE("GPL v2");
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2017 Free Electrons
  4 *
  5 * Authors:
  6 *	Boris Brezillon <boris.brezillon@free-electrons.com>
  7 *	Peter Pan <peterpandong@micron.com>
  8 */
  9
 10#define pr_fmt(fmt)	"nand: " fmt
 11
 12#include <linux/module.h>
 13#include <linux/mtd/nand.h>
 14
 15/**
 16 * nanddev_isbad() - Check if a block is bad
 17 * @nand: NAND device
 18 * @pos: position pointing to the block we want to check
 19 *
 20 * Return: true if the block is bad, false otherwise.
 21 */
 22bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
 23{
 
 
 
 24	if (nanddev_bbt_is_initialized(nand)) {
 25		unsigned int entry;
 26		int status;
 27
 28		entry = nanddev_bbt_pos_to_entry(nand, pos);
 29		status = nanddev_bbt_get_block_status(nand, entry);
 30		/* Lazy block status retrieval */
 31		if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
 32			if (nand->ops->isbad(nand, pos))
 33				status = NAND_BBT_BLOCK_FACTORY_BAD;
 34			else
 35				status = NAND_BBT_BLOCK_GOOD;
 36
 37			nanddev_bbt_set_block_status(nand, entry, status);
 38		}
 39
 40		if (status == NAND_BBT_BLOCK_WORN ||
 41		    status == NAND_BBT_BLOCK_FACTORY_BAD)
 42			return true;
 43
 44		return false;
 45	}
 46
 47	return nand->ops->isbad(nand, pos);
 48}
 49EXPORT_SYMBOL_GPL(nanddev_isbad);
 50
 51/**
 52 * nanddev_markbad() - Mark a block as bad
 53 * @nand: NAND device
 54 * @pos: position of the block to mark bad
 55 *
 56 * Mark a block bad. This function is updating the BBT if available and
 57 * calls the low-level markbad hook (nand->ops->markbad()).
 58 *
 59 * Return: 0 in case of success, a negative error code otherwise.
 60 */
 61int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos)
 62{
 63	struct mtd_info *mtd = nanddev_to_mtd(nand);
 64	unsigned int entry;
 65	int ret = 0;
 66
 67	if (nanddev_isbad(nand, pos))
 68		return 0;
 69
 70	ret = nand->ops->markbad(nand, pos);
 71	if (ret)
 72		pr_warn("failed to write BBM to block @%llx (err = %d)\n",
 73			nanddev_pos_to_offs(nand, pos), ret);
 74
 75	if (!nanddev_bbt_is_initialized(nand))
 76		goto out;
 77
 78	entry = nanddev_bbt_pos_to_entry(nand, pos);
 79	ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN);
 80	if (ret)
 81		goto out;
 82
 83	ret = nanddev_bbt_update(nand);
 84
 85out:
 86	if (!ret)
 87		mtd->ecc_stats.badblocks++;
 88
 89	return ret;
 90}
 91EXPORT_SYMBOL_GPL(nanddev_markbad);
 92
 93/**
 94 * nanddev_isreserved() - Check whether an eraseblock is reserved or not
 95 * @nand: NAND device
 96 * @pos: NAND position to test
 97 *
 98 * Checks whether the eraseblock pointed by @pos is reserved or not.
 99 *
100 * Return: true if the eraseblock is reserved, false otherwise.
101 */
102bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos)
103{
104	unsigned int entry;
105	int status;
106
107	if (!nanddev_bbt_is_initialized(nand))
108		return false;
109
110	/* Return info from the table */
111	entry = nanddev_bbt_pos_to_entry(nand, pos);
112	status = nanddev_bbt_get_block_status(nand, entry);
113	return status == NAND_BBT_BLOCK_RESERVED;
114}
115EXPORT_SYMBOL_GPL(nanddev_isreserved);
116
117/**
118 * nanddev_erase() - Erase a NAND portion
119 * @nand: NAND device
120 * @pos: position of the block to erase
121 *
122 * Erases the block if it's not bad.
123 *
124 * Return: 0 in case of success, a negative error code otherwise.
125 */
126int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos)
127{
128	if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) {
129		pr_warn("attempt to erase a bad/reserved block @%llx\n",
130			nanddev_pos_to_offs(nand, pos));
131		return -EIO;
132	}
133
134	return nand->ops->erase(nand, pos);
135}
136EXPORT_SYMBOL_GPL(nanddev_erase);
137
138/**
139 * nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices
140 * @mtd: MTD device
141 * @einfo: erase request
142 *
143 * This is a simple mtd->_erase() implementation iterating over all blocks
144 * concerned by @einfo and calling nand->ops->erase() on each of them.
145 *
146 * Note that mtd->_erase should not be directly assigned to this helper,
147 * because there's no locking here. NAND specialized layers should instead
148 * implement there own wrapper around nanddev_mtd_erase() taking the
149 * appropriate lock before calling nanddev_mtd_erase().
150 *
151 * Return: 0 in case of success, a negative error code otherwise.
152 */
153int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
154{
155	struct nand_device *nand = mtd_to_nanddev(mtd);
156	struct nand_pos pos, last;
157	int ret;
158
159	nanddev_offs_to_pos(nand, einfo->addr, &pos);
160	nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last);
161	while (nanddev_pos_cmp(&pos, &last) <= 0) {
162		ret = nanddev_erase(nand, &pos);
163		if (ret) {
164			einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
165
166			return ret;
167		}
168
169		nanddev_pos_next_eraseblock(nand, &pos);
170	}
171
172	return 0;
173}
174EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
176/**
177 * nanddev_init() - Initialize a NAND device
178 * @nand: NAND device
179 * @ops: NAND device operations
180 * @owner: NAND device owner
181 *
182 * Initializes a NAND device object. Consistency checks are done on @ops and
183 * @nand->memorg. Also takes care of initializing the BBT.
184 *
185 * Return: 0 in case of success, a negative error code otherwise.
186 */
187int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
188		 struct module *owner)
189{
190	struct mtd_info *mtd = nanddev_to_mtd(nand);
191	struct nand_memory_organization *memorg = nanddev_get_memorg(nand);
192
193	if (!nand || !ops)
194		return -EINVAL;
195
196	if (!ops->erase || !ops->markbad || !ops->isbad)
197		return -EINVAL;
198
199	if (!memorg->bits_per_cell || !memorg->pagesize ||
200	    !memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun ||
201	    !memorg->planes_per_lun || !memorg->luns_per_target ||
202	    !memorg->ntargets)
203		return -EINVAL;
204
205	nand->rowconv.eraseblock_addr_shift =
206					fls(memorg->pages_per_eraseblock - 1);
207	nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) +
208				       nand->rowconv.eraseblock_addr_shift;
209
210	nand->ops = ops;
211
212	mtd->type = memorg->bits_per_cell == 1 ?
213		    MTD_NANDFLASH : MTD_MLCNANDFLASH;
214	mtd->flags = MTD_CAP_NANDFLASH;
215	mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock;
216	mtd->writesize = memorg->pagesize;
217	mtd->writebufsize = memorg->pagesize;
218	mtd->oobsize = memorg->oobsize;
219	mtd->size = nanddev_size(nand);
220	mtd->owner = owner;
221
222	return nanddev_bbt_init(nand);
223}
224EXPORT_SYMBOL_GPL(nanddev_init);
225
226/**
227 * nanddev_cleanup() - Release resources allocated in nanddev_init()
228 * @nand: NAND device
229 *
230 * Basically undoes what has been done in nanddev_init().
231 */
232void nanddev_cleanup(struct nand_device *nand)
233{
234	if (nanddev_bbt_is_initialized(nand))
235		nanddev_bbt_cleanup(nand);
236}
237EXPORT_SYMBOL_GPL(nanddev_cleanup);
238
239MODULE_DESCRIPTION("Generic NAND framework");
240MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
241MODULE_LICENSE("GPL v2");