Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * LPDDR flash memory device operations. This module provides read, write,
  4 * erase, lock/unlock support for LPDDR flash memories
  5 * (C) 2008 Korolev Alexey <akorolev@infradead.org>
  6 * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
  7 * Many thanks to Roman Borisov for initial enabling
  8 *
  9 * TODO:
 10 * Implement VPP management
 11 * Implement XIP support
 12 * Implement OTP support
 13 */
 14#include <linux/mtd/pfow.h>
 15#include <linux/mtd/qinfo.h>
 16#include <linux/slab.h>
 17#include <linux/module.h>
 18
 19static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
 20					size_t *retlen, u_char *buf);
 21static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to,
 22				size_t len, size_t *retlen, const u_char *buf);
 23static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
 24				unsigned long count, loff_t to, size_t *retlen);
 25static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr);
 26static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 27static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 28static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
 29			size_t *retlen, void **mtdbuf, resource_size_t *phys);
 30static int lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
 31static int get_chip(struct map_info *map, struct flchip *chip, int mode);
 32static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
 33static void put_chip(struct map_info *map, struct flchip *chip);
 34
 35struct mtd_info *lpddr_cmdset(struct map_info *map)
 36{
 37	struct lpddr_private *lpddr = map->fldrv_priv;
 38	struct flchip_shared *shared;
 39	struct flchip *chip;
 40	struct mtd_info *mtd;
 41	int numchips;
 42	int i, j;
 43
 44	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
 45	if (!mtd)
 46		return NULL;
 47	mtd->priv = map;
 48	mtd->type = MTD_NORFLASH;
 49
 50	/* Fill in the default mtd operations */
 51	mtd->_read = lpddr_read;
 52	mtd->type = MTD_NORFLASH;
 53	mtd->flags = MTD_CAP_NORFLASH;
 54	mtd->flags &= ~MTD_BIT_WRITEABLE;
 55	mtd->_erase = lpddr_erase;
 56	mtd->_write = lpddr_write_buffers;
 57	mtd->_writev = lpddr_writev;
 58	mtd->_lock = lpddr_lock;
 59	mtd->_unlock = lpddr_unlock;
 60	if (map_is_linear(map)) {
 61		mtd->_point = lpddr_point;
 62		mtd->_unpoint = lpddr_unpoint;
 63	}
 64	mtd->size = 1ULL << lpddr->qinfo->DevSizeShift;
 65	mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
 66	mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
 67
 68	shared = kmalloc_array(lpddr->numchips, sizeof(struct flchip_shared),
 69						GFP_KERNEL);
 70	if (!shared) {
 71		kfree(mtd);
 72		return NULL;
 73	}
 74
 75	chip = &lpddr->chips[0];
 76	numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum;
 77	for (i = 0; i < numchips; i++) {
 78		shared[i].writing = shared[i].erasing = NULL;
 79		mutex_init(&shared[i].lock);
 80		for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) {
 81			*chip = lpddr->chips[i];
 82			chip->start += j << lpddr->chipshift;
 83			chip->oldstate = chip->state = FL_READY;
 84			chip->priv = &shared[i];
 85			/* those should be reset too since
 86			   they create memory references. */
 87			init_waitqueue_head(&chip->wq);
 88			mutex_init(&chip->mutex);
 89			chip++;
 90		}
 91	}
 92
 93	return mtd;
 94}
 95EXPORT_SYMBOL(lpddr_cmdset);
 96
 97static void print_drs_error(unsigned int dsr)
 98{
 99	int prog_status = (dsr & DSR_RPS) >> 8;
100
101	if (!(dsr & DSR_AVAILABLE))
102		pr_notice("DSR.15: (0) Device not Available\n");
103	if ((prog_status & 0x03) == 0x03)
104		pr_notice("DSR.9,8: (11) Attempt to program invalid half with 41h command\n");
105	else if (prog_status & 0x02)
106		pr_notice("DSR.9,8: (10) Object Mode Program attempt in region with Control Mode data\n");
107	else if (prog_status &  0x01)
108		pr_notice("DSR.9,8: (01) Program attempt in region with Object Mode data\n");
109	if (!(dsr & DSR_READY_STATUS))
110		pr_notice("DSR.7: (0) Device is Busy\n");
111	if (dsr & DSR_ESS)
112		pr_notice("DSR.6: (1) Erase Suspended\n");
113	if (dsr & DSR_ERASE_STATUS)
114		pr_notice("DSR.5: (1) Erase/Blank check error\n");
115	if (dsr & DSR_PROGRAM_STATUS)
116		pr_notice("DSR.4: (1) Program Error\n");
117	if (dsr & DSR_VPPS)
118		pr_notice("DSR.3: (1) Vpp low detect, operation aborted\n");
119	if (dsr & DSR_PSS)
120		pr_notice("DSR.2: (1) Program suspended\n");
121	if (dsr & DSR_DPS)
122		pr_notice("DSR.1: (1) Aborted Erase/Program attempt on locked block\n");
123}
124
125static int wait_for_ready(struct map_info *map, struct flchip *chip,
126		unsigned int chip_op_time)
127{
128	unsigned int timeo, reset_timeo, sleep_time;
129	unsigned int dsr;
130	flstate_t chip_state = chip->state;
131	int ret = 0;
132
133	/* set our timeout to 8 times the expected delay */
134	timeo = chip_op_time * 8;
135	if (!timeo)
136		timeo = 500000;
137	reset_timeo = timeo;
138	sleep_time = chip_op_time / 2;
139
140	for (;;) {
141		dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
142		if (dsr & DSR_READY_STATUS)
143			break;
144		if (!timeo) {
145			printk(KERN_ERR "%s: Flash timeout error state %d \n",
146							map->name, chip_state);
147			ret = -ETIME;
148			break;
149		}
150
151		/* OK Still waiting. Drop the lock, wait a while and retry. */
152		mutex_unlock(&chip->mutex);
153		if (sleep_time >= 1000000/HZ) {
154			/*
155			 * Half of the normal delay still remaining
156			 * can be performed with a sleeping delay instead
157			 * of busy waiting.
158			 */
159			msleep(sleep_time/1000);
160			timeo -= sleep_time;
161			sleep_time = 1000000/HZ;
162		} else {
163			udelay(1);
164			cond_resched();
165			timeo--;
166		}
167		mutex_lock(&chip->mutex);
168
169		while (chip->state != chip_state) {
170			/* Someone's suspended the operation: sleep */
171			DECLARE_WAITQUEUE(wait, current);
172			set_current_state(TASK_UNINTERRUPTIBLE);
173			add_wait_queue(&chip->wq, &wait);
174			mutex_unlock(&chip->mutex);
175			schedule();
176			remove_wait_queue(&chip->wq, &wait);
177			mutex_lock(&chip->mutex);
178		}
179		if (chip->erase_suspended || chip->write_suspended)  {
180			/* Suspend has occurred while sleep: reset timeout */
181			timeo = reset_timeo;
182			chip->erase_suspended = chip->write_suspended = 0;
183		}
184	}
185	/* check status for errors */
186	if (dsr & DSR_ERR) {
187		/* Clear DSR*/
188		map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR);
189		printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n",
190				map->name, dsr);
191		print_drs_error(dsr);
192		ret = -EIO;
193	}
194	chip->state = FL_READY;
195	return ret;
196}
197
198static int get_chip(struct map_info *map, struct flchip *chip, int mode)
199{
200	int ret;
201	DECLARE_WAITQUEUE(wait, current);
202
203 retry:
204	if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)
205		&& chip->state != FL_SYNCING) {
206		/*
207		 * OK. We have possibility for contension on the write/erase
208		 * operations which are global to the real chip and not per
209		 * partition.  So let's fight it over in the partition which
210		 * currently has authority on the operation.
211		 *
212		 * The rules are as follows:
213		 *
214		 * - any write operation must own shared->writing.
215		 *
216		 * - any erase operation must own _both_ shared->writing and
217		 *   shared->erasing.
218		 *
219		 * - contension arbitration is handled in the owner's context.
220		 *
221		 * The 'shared' struct can be read and/or written only when
222		 * its lock is taken.
223		 */
224		struct flchip_shared *shared = chip->priv;
225		struct flchip *contender;
226		mutex_lock(&shared->lock);
227		contender = shared->writing;
228		if (contender && contender != chip) {
229			/*
230			 * The engine to perform desired operation on this
231			 * partition is already in use by someone else.
232			 * Let's fight over it in the context of the chip
233			 * currently using it.  If it is possible to suspend,
234			 * that other partition will do just that, otherwise
235			 * it'll happily send us to sleep.  In any case, when
236			 * get_chip returns success we're clear to go ahead.
237			 */
238			ret = mutex_trylock(&contender->mutex);
239			mutex_unlock(&shared->lock);
240			if (!ret)
241				goto retry;
242			mutex_unlock(&chip->mutex);
243			ret = chip_ready(map, contender, mode);
244			mutex_lock(&chip->mutex);
245
246			if (ret == -EAGAIN) {
247				mutex_unlock(&contender->mutex);
248				goto retry;
249			}
250			if (ret) {
251				mutex_unlock(&contender->mutex);
252				return ret;
253			}
254			mutex_lock(&shared->lock);
255
256			/* We should not own chip if it is already in FL_SYNCING
257			 * state. Put contender and retry. */
258			if (chip->state == FL_SYNCING) {
259				put_chip(map, contender);
260				mutex_unlock(&contender->mutex);
261				goto retry;
262			}
263			mutex_unlock(&contender->mutex);
264		}
265
266		/* Check if we have suspended erase on this chip.
267		   Must sleep in such a case. */
268		if (mode == FL_ERASING && shared->erasing
269		    && shared->erasing->oldstate == FL_ERASING) {
270			mutex_unlock(&shared->lock);
271			set_current_state(TASK_UNINTERRUPTIBLE);
272			add_wait_queue(&chip->wq, &wait);
273			mutex_unlock(&chip->mutex);
274			schedule();
275			remove_wait_queue(&chip->wq, &wait);
276			mutex_lock(&chip->mutex);
277			goto retry;
278		}
279
280		/* We now own it */
281		shared->writing = chip;
282		if (mode == FL_ERASING)
283			shared->erasing = chip;
284		mutex_unlock(&shared->lock);
285	}
286
287	ret = chip_ready(map, chip, mode);
288	if (ret == -EAGAIN)
289		goto retry;
290
291	return ret;
292}
293
294static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
295{
296	struct lpddr_private *lpddr = map->fldrv_priv;
297	int ret = 0;
298	DECLARE_WAITQUEUE(wait, current);
299
300	/* Prevent setting state FL_SYNCING for chip in suspended state. */
301	if (FL_SYNCING == mode && FL_READY != chip->oldstate)
302		goto sleep;
303
304	switch (chip->state) {
305	case FL_READY:
306	case FL_JEDEC_QUERY:
307		return 0;
308
309	case FL_ERASING:
310		if (!lpddr->qinfo->SuspEraseSupp ||
311			!(mode == FL_READY || mode == FL_POINT))
312			goto sleep;
313
314		map_write(map, CMD(LPDDR_SUSPEND),
315			map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND);
316		chip->oldstate = FL_ERASING;
317		chip->state = FL_ERASE_SUSPENDING;
318		ret = wait_for_ready(map, chip, 0);
319		if (ret) {
320			/* Oops. something got wrong. */
321			/* Resume and pretend we weren't here.  */
322			put_chip(map, chip);
323			printk(KERN_ERR "%s: suspend operation failed."
324					"State may be wrong \n", map->name);
325			return -EIO;
326		}
327		chip->erase_suspended = 1;
328		chip->state = FL_READY;
329		return 0;
330		/* Erase suspend */
331	case FL_POINT:
332		/* Only if there's no operation suspended... */
333		if (mode == FL_READY && chip->oldstate == FL_READY)
334			return 0;
335		fallthrough;
336	default:
337sleep:
338		set_current_state(TASK_UNINTERRUPTIBLE);
339		add_wait_queue(&chip->wq, &wait);
340		mutex_unlock(&chip->mutex);
341		schedule();
342		remove_wait_queue(&chip->wq, &wait);
343		mutex_lock(&chip->mutex);
344		return -EAGAIN;
345	}
346}
347
348static void put_chip(struct map_info *map, struct flchip *chip)
349{
350	if (chip->priv) {
351		struct flchip_shared *shared = chip->priv;
352		mutex_lock(&shared->lock);
353		if (shared->writing == chip && chip->oldstate == FL_READY) {
354			/* We own the ability to write, but we're done */
355			shared->writing = shared->erasing;
356			if (shared->writing && shared->writing != chip) {
357				/* give back the ownership */
358				struct flchip *loaner = shared->writing;
359				mutex_lock(&loaner->mutex);
360				mutex_unlock(&shared->lock);
361				mutex_unlock(&chip->mutex);
362				put_chip(map, loaner);
363				mutex_lock(&chip->mutex);
364				mutex_unlock(&loaner->mutex);
365				wake_up(&chip->wq);
366				return;
367			}
368			shared->erasing = NULL;
369			shared->writing = NULL;
370		} else if (shared->erasing == chip && shared->writing != chip) {
371			/*
372			 * We own the ability to erase without the ability
373			 * to write, which means the erase was suspended
374			 * and some other partition is currently writing.
375			 * Don't let the switch below mess things up since
376			 * we don't have ownership to resume anything.
377			 */
378			mutex_unlock(&shared->lock);
379			wake_up(&chip->wq);
380			return;
381		}
382		mutex_unlock(&shared->lock);
383	}
384
385	switch (chip->oldstate) {
386	case FL_ERASING:
387		map_write(map, CMD(LPDDR_RESUME),
388				map->pfow_base + PFOW_COMMAND_CODE);
389		map_write(map, CMD(LPDDR_START_EXECUTION),
390				map->pfow_base + PFOW_COMMAND_EXECUTE);
391		chip->oldstate = FL_READY;
392		chip->state = FL_ERASING;
393		break;
394	case FL_READY:
395		break;
396	default:
397		printk(KERN_ERR "%s: put_chip() called with oldstate %d!\n",
398				map->name, chip->oldstate);
399	}
400	wake_up(&chip->wq);
401}
402
403static int do_write_buffer(struct map_info *map, struct flchip *chip,
404			unsigned long adr, const struct kvec **pvec,
405			unsigned long *pvec_seek, int len)
406{
407	struct lpddr_private *lpddr = map->fldrv_priv;
408	map_word datum;
409	int ret, wbufsize, word_gap;
410	const struct kvec *vec;
411	unsigned long vec_seek;
412	unsigned long prog_buf_ofs;
413
414	wbufsize = 1 << lpddr->qinfo->BufSizeShift;
415
416	mutex_lock(&chip->mutex);
417	ret = get_chip(map, chip, FL_WRITING);
418	if (ret) {
419		mutex_unlock(&chip->mutex);
420		return ret;
421	}
422	/* Figure out the number of words to write */
423	word_gap = (-adr & (map_bankwidth(map)-1));
424	if (word_gap) {
425		word_gap = map_bankwidth(map) - word_gap;
426		adr -= word_gap;
427		datum = map_word_ff(map);
428	}
429	/* Write data */
430	/* Get the program buffer offset from PFOW register data first*/
431	prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map,
432				map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET));
433	vec = *pvec;
434	vec_seek = *pvec_seek;
435	do {
436		int n = map_bankwidth(map) - word_gap;
437
438		if (n > vec->iov_len - vec_seek)
439			n = vec->iov_len - vec_seek;
440		if (n > len)
441			n = len;
442
443		if (!word_gap && (len < map_bankwidth(map)))
444			datum = map_word_ff(map);
445
446		datum = map_word_load_partial(map, datum,
447				vec->iov_base + vec_seek, word_gap, n);
448
449		len -= n;
450		word_gap += n;
451		if (!len || word_gap == map_bankwidth(map)) {
452			map_write(map, datum, prog_buf_ofs);
453			prog_buf_ofs += map_bankwidth(map);
454			word_gap = 0;
455		}
456
457		vec_seek += n;
458		if (vec_seek == vec->iov_len) {
459			vec++;
460			vec_seek = 0;
461		}
462	} while (len);
463	*pvec = vec;
464	*pvec_seek = vec_seek;
465
466	/* GO GO GO */
467	send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL);
468	chip->state = FL_WRITING;
469	ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime));
470	if (ret)	{
471		printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n",
472			map->name, ret, adr);
473		goto out;
474	}
475
476 out:	put_chip(map, chip);
477	mutex_unlock(&chip->mutex);
478	return ret;
479}
480
481static int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
482{
483	struct map_info *map = mtd->priv;
484	struct lpddr_private *lpddr = map->fldrv_priv;
485	int chipnum = adr >> lpddr->chipshift;
486	struct flchip *chip = &lpddr->chips[chipnum];
487	int ret;
488
489	mutex_lock(&chip->mutex);
490	ret = get_chip(map, chip, FL_ERASING);
491	if (ret) {
492		mutex_unlock(&chip->mutex);
493		return ret;
494	}
495	send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
496	chip->state = FL_ERASING;
497	ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000);
498	if (ret) {
499		printk(KERN_WARNING"%s Erase block error %d at : %llx\n",
500			map->name, ret, adr);
501		goto out;
502	}
503 out:	put_chip(map, chip);
504	mutex_unlock(&chip->mutex);
505	return ret;
506}
507
508static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
509			size_t *retlen, u_char *buf)
510{
511	struct map_info *map = mtd->priv;
512	struct lpddr_private *lpddr = map->fldrv_priv;
513	int chipnum = adr >> lpddr->chipshift;
514	struct flchip *chip = &lpddr->chips[chipnum];
515	int ret = 0;
516
517	mutex_lock(&chip->mutex);
518	ret = get_chip(map, chip, FL_READY);
519	if (ret) {
520		mutex_unlock(&chip->mutex);
521		return ret;
522	}
523
524	map_copy_from(map, buf, adr, len);
525	*retlen = len;
526
527	put_chip(map, chip);
528	mutex_unlock(&chip->mutex);
529	return ret;
530}
531
532static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
533			size_t *retlen, void **mtdbuf, resource_size_t *phys)
534{
535	struct map_info *map = mtd->priv;
536	struct lpddr_private *lpddr = map->fldrv_priv;
537	int chipnum = adr >> lpddr->chipshift;
538	unsigned long ofs, last_end = 0;
539	struct flchip *chip = &lpddr->chips[chipnum];
540	int ret = 0;
541
542	if (!map->virt)
543		return -EINVAL;
544
545	/* ofs: offset within the first chip that the first read should start */
546	ofs = adr - (chipnum << lpddr->chipshift);
547	*mtdbuf = (void *)map->virt + chip->start + ofs;
548
549	while (len) {
550		unsigned long thislen;
551
552		if (chipnum >= lpddr->numchips)
553			break;
554
555		/* We cannot point across chips that are virtually disjoint */
556		if (!last_end)
557			last_end = chip->start;
558		else if (chip->start != last_end)
559			break;
560
561		if ((len + ofs - 1) >> lpddr->chipshift)
562			thislen = (1<<lpddr->chipshift) - ofs;
563		else
564			thislen = len;
565		/* get the chip */
566		mutex_lock(&chip->mutex);
567		ret = get_chip(map, chip, FL_POINT);
568		mutex_unlock(&chip->mutex);
569		if (ret)
570			break;
571
572		chip->state = FL_POINT;
573		chip->ref_point_counter++;
574		*retlen += thislen;
575		len -= thislen;
576
577		ofs = 0;
578		last_end += 1 << lpddr->chipshift;
579		chipnum++;
580		chip = &lpddr->chips[chipnum];
581	}
582	return 0;
583}
584
585static int lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
586{
587	struct map_info *map = mtd->priv;
588	struct lpddr_private *lpddr = map->fldrv_priv;
589	int chipnum = adr >> lpddr->chipshift, err = 0;
590	unsigned long ofs;
591
592	/* ofs: offset within the first chip that the first read should start */
593	ofs = adr - (chipnum << lpddr->chipshift);
594
595	while (len) {
596		unsigned long thislen;
597		struct flchip *chip;
598
599		chip = &lpddr->chips[chipnum];
600		if (chipnum >= lpddr->numchips)
601			break;
602
603		if ((len + ofs - 1) >> lpddr->chipshift)
604			thislen = (1<<lpddr->chipshift) - ofs;
605		else
606			thislen = len;
607
608		mutex_lock(&chip->mutex);
609		if (chip->state == FL_POINT) {
610			chip->ref_point_counter--;
611			if (chip->ref_point_counter == 0)
612				chip->state = FL_READY;
613		} else {
614			printk(KERN_WARNING "%s: Warning: unpoint called on non"
615					"pointed region\n", map->name);
616			err = -EINVAL;
617		}
618
619		put_chip(map, chip);
620		mutex_unlock(&chip->mutex);
621
622		len -= thislen;
623		ofs = 0;
624		chipnum++;
625	}
626
627	return err;
628}
629
630static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
631				size_t *retlen, const u_char *buf)
632{
633	struct kvec vec;
634
635	vec.iov_base = (void *) buf;
636	vec.iov_len = len;
637
638	return lpddr_writev(mtd, &vec, 1, to, retlen);
639}
640
641
642static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
643				unsigned long count, loff_t to, size_t *retlen)
644{
645	struct map_info *map = mtd->priv;
646	struct lpddr_private *lpddr = map->fldrv_priv;
647	int ret = 0;
648	int chipnum;
649	unsigned long ofs, vec_seek, i;
650	int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
651	size_t len = 0;
652
653	for (i = 0; i < count; i++)
654		len += vecs[i].iov_len;
655
656	if (!len)
657		return 0;
658
659	chipnum = to >> lpddr->chipshift;
660
661	ofs = to;
662	vec_seek = 0;
663
664	do {
665		/* We must not cross write block boundaries */
666		int size = wbufsize - (ofs & (wbufsize-1));
667
668		if (size > len)
669			size = len;
670
671		ret = do_write_buffer(map, &lpddr->chips[chipnum],
672					  ofs, &vecs, &vec_seek, size);
673		if (ret)
674			return ret;
675
676		ofs += size;
677		(*retlen) += size;
678		len -= size;
679
680		/* Be nice and reschedule with the chip in a usable
681		 * state for other processes */
682		cond_resched();
683
684	} while (len);
685
686	return 0;
687}
688
689static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
690{
691	unsigned long ofs, len;
692	int ret;
693	struct map_info *map = mtd->priv;
694	struct lpddr_private *lpddr = map->fldrv_priv;
695	int size = 1 << lpddr->qinfo->UniformBlockSizeShift;
696
697	ofs = instr->addr;
698	len = instr->len;
699
700	while (len > 0) {
701		ret = do_erase_oneblock(mtd, ofs);
702		if (ret)
703			return ret;
704		ofs += size;
705		len -= size;
706	}
707
708	return 0;
709}
710
711#define DO_XXLOCK_LOCK		1
712#define DO_XXLOCK_UNLOCK	2
713static int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
714{
715	int ret = 0;
716	struct map_info *map = mtd->priv;
717	struct lpddr_private *lpddr = map->fldrv_priv;
718	int chipnum = adr >> lpddr->chipshift;
719	struct flchip *chip = &lpddr->chips[chipnum];
720
721	mutex_lock(&chip->mutex);
722	ret = get_chip(map, chip, FL_LOCKING);
723	if (ret) {
724		mutex_unlock(&chip->mutex);
725		return ret;
726	}
727
728	if (thunk == DO_XXLOCK_LOCK) {
729		send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL);
730		chip->state = FL_LOCKING;
731	} else if (thunk == DO_XXLOCK_UNLOCK) {
732		send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL);
733		chip->state = FL_UNLOCKING;
734	} else
735		BUG();
736
737	ret = wait_for_ready(map, chip, 1);
738	if (ret)	{
739		printk(KERN_ERR "%s: block unlock error status %d \n",
740				map->name, ret);
741		goto out;
742	}
743out:	put_chip(map, chip);
744	mutex_unlock(&chip->mutex);
745	return ret;
746}
747
748static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
749{
750	return do_xxlock(mtd, ofs, len, DO_XXLOCK_LOCK);
751}
752
753static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
754{
755	return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK);
756}
757
758MODULE_LICENSE("GPL");
759MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
760MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * LPDDR flash memory device operations. This module provides read, write,
  4 * erase, lock/unlock support for LPDDR flash memories
  5 * (C) 2008 Korolev Alexey <akorolev@infradead.org>
  6 * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
  7 * Many thanks to Roman Borisov for initial enabling
  8 *
  9 * TODO:
 10 * Implement VPP management
 11 * Implement XIP support
 12 * Implement OTP support
 13 */
 14#include <linux/mtd/pfow.h>
 15#include <linux/mtd/qinfo.h>
 16#include <linux/slab.h>
 17#include <linux/module.h>
 18
 19static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
 20					size_t *retlen, u_char *buf);
 21static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to,
 22				size_t len, size_t *retlen, const u_char *buf);
 23static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
 24				unsigned long count, loff_t to, size_t *retlen);
 25static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr);
 26static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 27static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 28static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
 29			size_t *retlen, void **mtdbuf, resource_size_t *phys);
 30static int lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
 31static int get_chip(struct map_info *map, struct flchip *chip, int mode);
 32static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
 33static void put_chip(struct map_info *map, struct flchip *chip);
 34
 35struct mtd_info *lpddr_cmdset(struct map_info *map)
 36{
 37	struct lpddr_private *lpddr = map->fldrv_priv;
 38	struct flchip_shared *shared;
 39	struct flchip *chip;
 40	struct mtd_info *mtd;
 41	int numchips;
 42	int i, j;
 43
 44	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
 45	if (!mtd)
 46		return NULL;
 47	mtd->priv = map;
 48	mtd->type = MTD_NORFLASH;
 49
 50	/* Fill in the default mtd operations */
 51	mtd->_read = lpddr_read;
 52	mtd->type = MTD_NORFLASH;
 53	mtd->flags = MTD_CAP_NORFLASH;
 54	mtd->flags &= ~MTD_BIT_WRITEABLE;
 55	mtd->_erase = lpddr_erase;
 56	mtd->_write = lpddr_write_buffers;
 57	mtd->_writev = lpddr_writev;
 58	mtd->_lock = lpddr_lock;
 59	mtd->_unlock = lpddr_unlock;
 60	if (map_is_linear(map)) {
 61		mtd->_point = lpddr_point;
 62		mtd->_unpoint = lpddr_unpoint;
 63	}
 64	mtd->size = 1ULL << lpddr->qinfo->DevSizeShift;
 65	mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
 66	mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
 67
 68	shared = kmalloc_array(lpddr->numchips, sizeof(struct flchip_shared),
 69						GFP_KERNEL);
 70	if (!shared) {
 71		kfree(mtd);
 72		return NULL;
 73	}
 74
 75	chip = &lpddr->chips[0];
 76	numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum;
 77	for (i = 0; i < numchips; i++) {
 78		shared[i].writing = shared[i].erasing = NULL;
 79		mutex_init(&shared[i].lock);
 80		for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) {
 81			*chip = lpddr->chips[i];
 82			chip->start += j << lpddr->chipshift;
 83			chip->oldstate = chip->state = FL_READY;
 84			chip->priv = &shared[i];
 85			/* those should be reset too since
 86			   they create memory references. */
 87			init_waitqueue_head(&chip->wq);
 88			mutex_init(&chip->mutex);
 89			chip++;
 90		}
 91	}
 92
 93	return mtd;
 94}
 95EXPORT_SYMBOL(lpddr_cmdset);
 96
 97static void print_drs_error(unsigned int dsr)
 98{
 99	int prog_status = (dsr & DSR_RPS) >> 8;
100
101	if (!(dsr & DSR_AVAILABLE))
102		pr_notice("DSR.15: (0) Device not Available\n");
103	if ((prog_status & 0x03) == 0x03)
104		pr_notice("DSR.9,8: (11) Attempt to program invalid half with 41h command\n");
105	else if (prog_status & 0x02)
106		pr_notice("DSR.9,8: (10) Object Mode Program attempt in region with Control Mode data\n");
107	else if (prog_status &  0x01)
108		pr_notice("DSR.9,8: (01) Program attempt in region with Object Mode data\n");
109	if (!(dsr & DSR_READY_STATUS))
110		pr_notice("DSR.7: (0) Device is Busy\n");
111	if (dsr & DSR_ESS)
112		pr_notice("DSR.6: (1) Erase Suspended\n");
113	if (dsr & DSR_ERASE_STATUS)
114		pr_notice("DSR.5: (1) Erase/Blank check error\n");
115	if (dsr & DSR_PROGRAM_STATUS)
116		pr_notice("DSR.4: (1) Program Error\n");
117	if (dsr & DSR_VPPS)
118		pr_notice("DSR.3: (1) Vpp low detect, operation aborted\n");
119	if (dsr & DSR_PSS)
120		pr_notice("DSR.2: (1) Program suspended\n");
121	if (dsr & DSR_DPS)
122		pr_notice("DSR.1: (1) Aborted Erase/Program attempt on locked block\n");
123}
124
125static int wait_for_ready(struct map_info *map, struct flchip *chip,
126		unsigned int chip_op_time)
127{
128	unsigned int timeo, reset_timeo, sleep_time;
129	unsigned int dsr;
130	flstate_t chip_state = chip->state;
131	int ret = 0;
132
133	/* set our timeout to 8 times the expected delay */
134	timeo = chip_op_time * 8;
135	if (!timeo)
136		timeo = 500000;
137	reset_timeo = timeo;
138	sleep_time = chip_op_time / 2;
139
140	for (;;) {
141		dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
142		if (dsr & DSR_READY_STATUS)
143			break;
144		if (!timeo) {
145			printk(KERN_ERR "%s: Flash timeout error state %d \n",
146							map->name, chip_state);
147			ret = -ETIME;
148			break;
149		}
150
151		/* OK Still waiting. Drop the lock, wait a while and retry. */
152		mutex_unlock(&chip->mutex);
153		if (sleep_time >= 1000000/HZ) {
154			/*
155			 * Half of the normal delay still remaining
156			 * can be performed with a sleeping delay instead
157			 * of busy waiting.
158			 */
159			msleep(sleep_time/1000);
160			timeo -= sleep_time;
161			sleep_time = 1000000/HZ;
162		} else {
163			udelay(1);
164			cond_resched();
165			timeo--;
166		}
167		mutex_lock(&chip->mutex);
168
169		while (chip->state != chip_state) {
170			/* Someone's suspended the operation: sleep */
171			DECLARE_WAITQUEUE(wait, current);
172			set_current_state(TASK_UNINTERRUPTIBLE);
173			add_wait_queue(&chip->wq, &wait);
174			mutex_unlock(&chip->mutex);
175			schedule();
176			remove_wait_queue(&chip->wq, &wait);
177			mutex_lock(&chip->mutex);
178		}
179		if (chip->erase_suspended || chip->write_suspended)  {
180			/* Suspend has occurred while sleep: reset timeout */
181			timeo = reset_timeo;
182			chip->erase_suspended = chip->write_suspended = 0;
183		}
184	}
185	/* check status for errors */
186	if (dsr & DSR_ERR) {
187		/* Clear DSR*/
188		map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR);
189		printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n",
190				map->name, dsr);
191		print_drs_error(dsr);
192		ret = -EIO;
193	}
194	chip->state = FL_READY;
195	return ret;
196}
197
198static int get_chip(struct map_info *map, struct flchip *chip, int mode)
199{
200	int ret;
201	DECLARE_WAITQUEUE(wait, current);
202
203 retry:
204	if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)
205		&& chip->state != FL_SYNCING) {
206		/*
207		 * OK. We have possibility for contension on the write/erase
208		 * operations which are global to the real chip and not per
209		 * partition.  So let's fight it over in the partition which
210		 * currently has authority on the operation.
211		 *
212		 * The rules are as follows:
213		 *
214		 * - any write operation must own shared->writing.
215		 *
216		 * - any erase operation must own _both_ shared->writing and
217		 *   shared->erasing.
218		 *
219		 * - contension arbitration is handled in the owner's context.
220		 *
221		 * The 'shared' struct can be read and/or written only when
222		 * its lock is taken.
223		 */
224		struct flchip_shared *shared = chip->priv;
225		struct flchip *contender;
226		mutex_lock(&shared->lock);
227		contender = shared->writing;
228		if (contender && contender != chip) {
229			/*
230			 * The engine to perform desired operation on this
231			 * partition is already in use by someone else.
232			 * Let's fight over it in the context of the chip
233			 * currently using it.  If it is possible to suspend,
234			 * that other partition will do just that, otherwise
235			 * it'll happily send us to sleep.  In any case, when
236			 * get_chip returns success we're clear to go ahead.
237			 */
238			ret = mutex_trylock(&contender->mutex);
239			mutex_unlock(&shared->lock);
240			if (!ret)
241				goto retry;
242			mutex_unlock(&chip->mutex);
243			ret = chip_ready(map, contender, mode);
244			mutex_lock(&chip->mutex);
245
246			if (ret == -EAGAIN) {
247				mutex_unlock(&contender->mutex);
248				goto retry;
249			}
250			if (ret) {
251				mutex_unlock(&contender->mutex);
252				return ret;
253			}
254			mutex_lock(&shared->lock);
255
256			/* We should not own chip if it is already in FL_SYNCING
257			 * state. Put contender and retry. */
258			if (chip->state == FL_SYNCING) {
259				put_chip(map, contender);
260				mutex_unlock(&contender->mutex);
261				goto retry;
262			}
263			mutex_unlock(&contender->mutex);
264		}
265
266		/* Check if we have suspended erase on this chip.
267		   Must sleep in such a case. */
268		if (mode == FL_ERASING && shared->erasing
269		    && shared->erasing->oldstate == FL_ERASING) {
270			mutex_unlock(&shared->lock);
271			set_current_state(TASK_UNINTERRUPTIBLE);
272			add_wait_queue(&chip->wq, &wait);
273			mutex_unlock(&chip->mutex);
274			schedule();
275			remove_wait_queue(&chip->wq, &wait);
276			mutex_lock(&chip->mutex);
277			goto retry;
278		}
279
280		/* We now own it */
281		shared->writing = chip;
282		if (mode == FL_ERASING)
283			shared->erasing = chip;
284		mutex_unlock(&shared->lock);
285	}
286
287	ret = chip_ready(map, chip, mode);
288	if (ret == -EAGAIN)
289		goto retry;
290
291	return ret;
292}
293
294static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
295{
296	struct lpddr_private *lpddr = map->fldrv_priv;
297	int ret = 0;
298	DECLARE_WAITQUEUE(wait, current);
299
300	/* Prevent setting state FL_SYNCING for chip in suspended state. */
301	if (FL_SYNCING == mode && FL_READY != chip->oldstate)
302		goto sleep;
303
304	switch (chip->state) {
305	case FL_READY:
306	case FL_JEDEC_QUERY:
307		return 0;
308
309	case FL_ERASING:
310		if (!lpddr->qinfo->SuspEraseSupp ||
311			!(mode == FL_READY || mode == FL_POINT))
312			goto sleep;
313
314		map_write(map, CMD(LPDDR_SUSPEND),
315			map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND);
316		chip->oldstate = FL_ERASING;
317		chip->state = FL_ERASE_SUSPENDING;
318		ret = wait_for_ready(map, chip, 0);
319		if (ret) {
320			/* Oops. something got wrong. */
321			/* Resume and pretend we weren't here.  */
322			put_chip(map, chip);
323			printk(KERN_ERR "%s: suspend operation failed."
324					"State may be wrong \n", map->name);
325			return -EIO;
326		}
327		chip->erase_suspended = 1;
328		chip->state = FL_READY;
329		return 0;
330		/* Erase suspend */
331	case FL_POINT:
332		/* Only if there's no operation suspended... */
333		if (mode == FL_READY && chip->oldstate == FL_READY)
334			return 0;
335		fallthrough;
336	default:
337sleep:
338		set_current_state(TASK_UNINTERRUPTIBLE);
339		add_wait_queue(&chip->wq, &wait);
340		mutex_unlock(&chip->mutex);
341		schedule();
342		remove_wait_queue(&chip->wq, &wait);
343		mutex_lock(&chip->mutex);
344		return -EAGAIN;
345	}
346}
347
348static void put_chip(struct map_info *map, struct flchip *chip)
349{
350	if (chip->priv) {
351		struct flchip_shared *shared = chip->priv;
352		mutex_lock(&shared->lock);
353		if (shared->writing == chip && chip->oldstate == FL_READY) {
354			/* We own the ability to write, but we're done */
355			shared->writing = shared->erasing;
356			if (shared->writing && shared->writing != chip) {
357				/* give back the ownership */
358				struct flchip *loaner = shared->writing;
359				mutex_lock(&loaner->mutex);
360				mutex_unlock(&shared->lock);
361				mutex_unlock(&chip->mutex);
362				put_chip(map, loaner);
363				mutex_lock(&chip->mutex);
364				mutex_unlock(&loaner->mutex);
365				wake_up(&chip->wq);
366				return;
367			}
368			shared->erasing = NULL;
369			shared->writing = NULL;
370		} else if (shared->erasing == chip && shared->writing != chip) {
371			/*
372			 * We own the ability to erase without the ability
373			 * to write, which means the erase was suspended
374			 * and some other partition is currently writing.
375			 * Don't let the switch below mess things up since
376			 * we don't have ownership to resume anything.
377			 */
378			mutex_unlock(&shared->lock);
379			wake_up(&chip->wq);
380			return;
381		}
382		mutex_unlock(&shared->lock);
383	}
384
385	switch (chip->oldstate) {
386	case FL_ERASING:
387		map_write(map, CMD(LPDDR_RESUME),
388				map->pfow_base + PFOW_COMMAND_CODE);
389		map_write(map, CMD(LPDDR_START_EXECUTION),
390				map->pfow_base + PFOW_COMMAND_EXECUTE);
391		chip->oldstate = FL_READY;
392		chip->state = FL_ERASING;
393		break;
394	case FL_READY:
395		break;
396	default:
397		printk(KERN_ERR "%s: put_chip() called with oldstate %d!\n",
398				map->name, chip->oldstate);
399	}
400	wake_up(&chip->wq);
401}
402
403static int do_write_buffer(struct map_info *map, struct flchip *chip,
404			unsigned long adr, const struct kvec **pvec,
405			unsigned long *pvec_seek, int len)
406{
407	struct lpddr_private *lpddr = map->fldrv_priv;
408	map_word datum;
409	int ret, wbufsize, word_gap;
410	const struct kvec *vec;
411	unsigned long vec_seek;
412	unsigned long prog_buf_ofs;
413
414	wbufsize = 1 << lpddr->qinfo->BufSizeShift;
415
416	mutex_lock(&chip->mutex);
417	ret = get_chip(map, chip, FL_WRITING);
418	if (ret) {
419		mutex_unlock(&chip->mutex);
420		return ret;
421	}
422	/* Figure out the number of words to write */
423	word_gap = (-adr & (map_bankwidth(map)-1));
424	if (word_gap) {
425		word_gap = map_bankwidth(map) - word_gap;
426		adr -= word_gap;
427		datum = map_word_ff(map);
428	}
429	/* Write data */
430	/* Get the program buffer offset from PFOW register data first*/
431	prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map,
432				map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET));
433	vec = *pvec;
434	vec_seek = *pvec_seek;
435	do {
436		int n = map_bankwidth(map) - word_gap;
437
438		if (n > vec->iov_len - vec_seek)
439			n = vec->iov_len - vec_seek;
440		if (n > len)
441			n = len;
442
443		if (!word_gap && (len < map_bankwidth(map)))
444			datum = map_word_ff(map);
445
446		datum = map_word_load_partial(map, datum,
447				vec->iov_base + vec_seek, word_gap, n);
448
449		len -= n;
450		word_gap += n;
451		if (!len || word_gap == map_bankwidth(map)) {
452			map_write(map, datum, prog_buf_ofs);
453			prog_buf_ofs += map_bankwidth(map);
454			word_gap = 0;
455		}
456
457		vec_seek += n;
458		if (vec_seek == vec->iov_len) {
459			vec++;
460			vec_seek = 0;
461		}
462	} while (len);
463	*pvec = vec;
464	*pvec_seek = vec_seek;
465
466	/* GO GO GO */
467	send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL);
468	chip->state = FL_WRITING;
469	ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime));
470	if (ret)	{
471		printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n",
472			map->name, ret, adr);
473		goto out;
474	}
475
476 out:	put_chip(map, chip);
477	mutex_unlock(&chip->mutex);
478	return ret;
479}
480
481static int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
482{
483	struct map_info *map = mtd->priv;
484	struct lpddr_private *lpddr = map->fldrv_priv;
485	int chipnum = adr >> lpddr->chipshift;
486	struct flchip *chip = &lpddr->chips[chipnum];
487	int ret;
488
489	mutex_lock(&chip->mutex);
490	ret = get_chip(map, chip, FL_ERASING);
491	if (ret) {
492		mutex_unlock(&chip->mutex);
493		return ret;
494	}
495	send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
496	chip->state = FL_ERASING;
497	ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000);
498	if (ret) {
499		printk(KERN_WARNING"%s Erase block error %d at : %llx\n",
500			map->name, ret, adr);
501		goto out;
502	}
503 out:	put_chip(map, chip);
504	mutex_unlock(&chip->mutex);
505	return ret;
506}
507
508static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
509			size_t *retlen, u_char *buf)
510{
511	struct map_info *map = mtd->priv;
512	struct lpddr_private *lpddr = map->fldrv_priv;
513	int chipnum = adr >> lpddr->chipshift;
514	struct flchip *chip = &lpddr->chips[chipnum];
515	int ret = 0;
516
517	mutex_lock(&chip->mutex);
518	ret = get_chip(map, chip, FL_READY);
519	if (ret) {
520		mutex_unlock(&chip->mutex);
521		return ret;
522	}
523
524	map_copy_from(map, buf, adr, len);
525	*retlen = len;
526
527	put_chip(map, chip);
528	mutex_unlock(&chip->mutex);
529	return ret;
530}
531
532static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
533			size_t *retlen, void **mtdbuf, resource_size_t *phys)
534{
535	struct map_info *map = mtd->priv;
536	struct lpddr_private *lpddr = map->fldrv_priv;
537	int chipnum = adr >> lpddr->chipshift;
538	unsigned long ofs, last_end = 0;
539	struct flchip *chip = &lpddr->chips[chipnum];
540	int ret = 0;
541
542	if (!map->virt)
543		return -EINVAL;
544
545	/* ofs: offset within the first chip that the first read should start */
546	ofs = adr - (chipnum << lpddr->chipshift);
547	*mtdbuf = (void *)map->virt + chip->start + ofs;
548
549	while (len) {
550		unsigned long thislen;
551
552		if (chipnum >= lpddr->numchips)
553			break;
554
555		/* We cannot point across chips that are virtually disjoint */
556		if (!last_end)
557			last_end = chip->start;
558		else if (chip->start != last_end)
559			break;
560
561		if ((len + ofs - 1) >> lpddr->chipshift)
562			thislen = (1<<lpddr->chipshift) - ofs;
563		else
564			thislen = len;
565		/* get the chip */
566		mutex_lock(&chip->mutex);
567		ret = get_chip(map, chip, FL_POINT);
568		mutex_unlock(&chip->mutex);
569		if (ret)
570			break;
571
572		chip->state = FL_POINT;
573		chip->ref_point_counter++;
574		*retlen += thislen;
575		len -= thislen;
576
577		ofs = 0;
578		last_end += 1 << lpddr->chipshift;
579		chipnum++;
580		chip = &lpddr->chips[chipnum];
581	}
582	return 0;
583}
584
585static int lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
586{
587	struct map_info *map = mtd->priv;
588	struct lpddr_private *lpddr = map->fldrv_priv;
589	int chipnum = adr >> lpddr->chipshift, err = 0;
590	unsigned long ofs;
591
592	/* ofs: offset within the first chip that the first read should start */
593	ofs = adr - (chipnum << lpddr->chipshift);
594
595	while (len) {
596		unsigned long thislen;
597		struct flchip *chip;
598
599		chip = &lpddr->chips[chipnum];
600		if (chipnum >= lpddr->numchips)
601			break;
602
603		if ((len + ofs - 1) >> lpddr->chipshift)
604			thislen = (1<<lpddr->chipshift) - ofs;
605		else
606			thislen = len;
607
608		mutex_lock(&chip->mutex);
609		if (chip->state == FL_POINT) {
610			chip->ref_point_counter--;
611			if (chip->ref_point_counter == 0)
612				chip->state = FL_READY;
613		} else {
614			printk(KERN_WARNING "%s: Warning: unpoint called on non"
615					"pointed region\n", map->name);
616			err = -EINVAL;
617		}
618
619		put_chip(map, chip);
620		mutex_unlock(&chip->mutex);
621
622		len -= thislen;
623		ofs = 0;
624		chipnum++;
625	}
626
627	return err;
628}
629
630static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
631				size_t *retlen, const u_char *buf)
632{
633	struct kvec vec;
634
635	vec.iov_base = (void *) buf;
636	vec.iov_len = len;
637
638	return lpddr_writev(mtd, &vec, 1, to, retlen);
639}
640
641
642static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
643				unsigned long count, loff_t to, size_t *retlen)
644{
645	struct map_info *map = mtd->priv;
646	struct lpddr_private *lpddr = map->fldrv_priv;
647	int ret = 0;
648	int chipnum;
649	unsigned long ofs, vec_seek, i;
650	int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
651	size_t len = 0;
652
653	for (i = 0; i < count; i++)
654		len += vecs[i].iov_len;
655
656	if (!len)
657		return 0;
658
659	chipnum = to >> lpddr->chipshift;
660
661	ofs = to;
662	vec_seek = 0;
663
664	do {
665		/* We must not cross write block boundaries */
666		int size = wbufsize - (ofs & (wbufsize-1));
667
668		if (size > len)
669			size = len;
670
671		ret = do_write_buffer(map, &lpddr->chips[chipnum],
672					  ofs, &vecs, &vec_seek, size);
673		if (ret)
674			return ret;
675
676		ofs += size;
677		(*retlen) += size;
678		len -= size;
679
680		/* Be nice and reschedule with the chip in a usable
681		 * state for other processes */
682		cond_resched();
683
684	} while (len);
685
686	return 0;
687}
688
689static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
690{
691	unsigned long ofs, len;
692	int ret;
693	struct map_info *map = mtd->priv;
694	struct lpddr_private *lpddr = map->fldrv_priv;
695	int size = 1 << lpddr->qinfo->UniformBlockSizeShift;
696
697	ofs = instr->addr;
698	len = instr->len;
699
700	while (len > 0) {
701		ret = do_erase_oneblock(mtd, ofs);
702		if (ret)
703			return ret;
704		ofs += size;
705		len -= size;
706	}
707
708	return 0;
709}
710
711#define DO_XXLOCK_LOCK		1
712#define DO_XXLOCK_UNLOCK	2
713static int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
714{
715	int ret = 0;
716	struct map_info *map = mtd->priv;
717	struct lpddr_private *lpddr = map->fldrv_priv;
718	int chipnum = adr >> lpddr->chipshift;
719	struct flchip *chip = &lpddr->chips[chipnum];
720
721	mutex_lock(&chip->mutex);
722	ret = get_chip(map, chip, FL_LOCKING);
723	if (ret) {
724		mutex_unlock(&chip->mutex);
725		return ret;
726	}
727
728	if (thunk == DO_XXLOCK_LOCK) {
729		send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL);
730		chip->state = FL_LOCKING;
731	} else if (thunk == DO_XXLOCK_UNLOCK) {
732		send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL);
733		chip->state = FL_UNLOCKING;
734	} else
735		BUG();
736
737	ret = wait_for_ready(map, chip, 1);
738	if (ret)	{
739		printk(KERN_ERR "%s: block unlock error status %d \n",
740				map->name, ret);
741		goto out;
742	}
743out:	put_chip(map, chip);
744	mutex_unlock(&chip->mutex);
745	return ret;
746}
747
748static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
749{
750	return do_xxlock(mtd, ofs, len, DO_XXLOCK_LOCK);
751}
752
753static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
754{
755	return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK);
756}
757
758MODULE_LICENSE("GPL");
759MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
760MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");