Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
Note: File does not exist in v3.15.
   1/*
   2 * NAND Flash Controller Device Driver
   3 * Copyright (c) 2009, Intel Corporation and its suppliers.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc.,
  16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17 *
  18 */
  19
  20#include <linux/fs.h>
  21#include <linux/slab.h>
  22
  23#include "flash.h"
  24#include "ffsdefs.h"
  25#include "lld.h"
  26#include "lld_nand.h"
  27#if CMD_DMA
  28#include "lld_cdma.h"
  29#endif
  30
  31#define BLK_FROM_ADDR(addr)  ((u32)(addr >> DeviceInfo.nBitsInBlockDataSize))
  32#define PAGE_FROM_ADDR(addr, Block)  ((u16)((addr - (u64)Block * \
  33	DeviceInfo.wBlockDataSize) >> DeviceInfo.nBitsInPageDataSize))
  34
  35#define IS_SPARE_BLOCK(blk)     (BAD_BLOCK != (pbt[blk] &\
  36	BAD_BLOCK) && SPARE_BLOCK == (pbt[blk] & SPARE_BLOCK))
  37
  38#define IS_DATA_BLOCK(blk)      (0 == (pbt[blk] & BAD_BLOCK))
  39
  40#define IS_DISCARDED_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
  41	BAD_BLOCK) && DISCARD_BLOCK == (pbt[blk] & DISCARD_BLOCK))
  42
  43#define IS_BAD_BLOCK(blk)       (BAD_BLOCK == (pbt[blk] & BAD_BLOCK))
  44
  45#if DEBUG_BNDRY
  46void debug_boundary_lineno_error(int chnl, int limit, int no,
  47				int lineno, char *filename)
  48{
  49	if (chnl >= limit)
  50		printk(KERN_ERR "Boundary Check Fail value %d >= limit %d, "
  51		"at  %s:%d. Other info:%d. Aborting...\n",
  52		chnl, limit, filename, lineno, no);
  53}
  54/* static int globalmemsize; */
  55#endif
  56
  57static u16 FTL_Cache_If_Hit(u64 dwPageAddr);
  58static int FTL_Cache_Read(u64 dwPageAddr);
  59static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr,
  60				u16 cache_blk);
  61static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr,
  62				 u8 cache_blk, u16 flag);
  63static int FTL_Cache_Write(void);
  64static void FTL_Calculate_LRU(void);
  65static u32 FTL_Get_Block_Index(u32 wBlockNum);
  66
  67static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
  68					   u8 BT_Tag, u16 *Page);
  69static int FTL_Read_Block_Table(void);
  70static int FTL_Write_Block_Table(int wForce);
  71static int FTL_Write_Block_Table_Data(void);
  72static int FTL_Check_Block_Table(int wOldTable);
  73static int FTL_Static_Wear_Leveling(void);
  74static u32 FTL_Replace_Block_Table(void);
  75static int FTL_Write_IN_Progress_Block_Table_Page(void);
  76
  77static u32 FTL_Get_Page_Num(u64 length);
  78static u64 FTL_Get_Physical_Block_Addr(u64 blk_addr);
  79
  80static u32 FTL_Replace_OneBlock(u32 wBlockNum,
  81				      u32 wReplaceNum);
  82static u32 FTL_Replace_LWBlock(u32 wBlockNum,
  83				     int *pGarbageCollect);
  84static u32 FTL_Replace_MWBlock(void);
  85static int FTL_Replace_Block(u64 blk_addr);
  86static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX);
  87
  88struct device_info_tag DeviceInfo;
  89struct flash_cache_tag Cache;
  90static struct spectra_l2_cache_info cache_l2;
  91
  92static u8 *cache_l2_page_buf;
  93static u8 *cache_l2_blk_buf;
  94
  95u8 *g_pBlockTable;
  96u8 *g_pWearCounter;
  97u16 *g_pReadCounter;
  98u32 *g_pBTBlocks;
  99static u16 g_wBlockTableOffset;
 100static u32 g_wBlockTableIndex;
 101static u8 g_cBlockTableStatus;
 102
 103static u8 *g_pTempBuf;
 104static u8 *flag_check_blk_table;
 105static u8 *tmp_buf_search_bt_in_block;
 106static u8 *spare_buf_search_bt_in_block;
 107static u8 *spare_buf_bt_search_bt_in_block;
 108static u8 *tmp_buf1_read_blk_table;
 109static u8 *tmp_buf2_read_blk_table;
 110static u8 *flags_static_wear_leveling;
 111static u8 *tmp_buf_write_blk_table_data;
 112static u8 *tmp_buf_read_disturbance;
 113
 114u8 *buf_read_page_main_spare;
 115u8 *buf_write_page_main_spare;
 116u8 *buf_read_page_spare;
 117u8 *buf_get_bad_block;
 118
 119#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
 120struct flash_cache_delta_list_tag int_cache[MAX_CHANS + MAX_DESCS];
 121struct flash_cache_tag cache_start_copy;
 122#endif
 123
 124int g_wNumFreeBlocks;
 125u8 g_SBDCmdIndex;
 126
 127static u8 *g_pIPF;
 128static u8 bt_flag = FIRST_BT_ID;
 129static u8 bt_block_changed;
 130
 131static u16 cache_block_to_write;
 132static u8 last_erased = FIRST_BT_ID;
 133
 134static u8 GC_Called;
 135static u8 BT_GC_Called;
 136
 137#if CMD_DMA
 138#define COPY_BACK_BUF_NUM 10
 139
 140static u8 ftl_cmd_cnt;  /* Init value is 0 */
 141u8 *g_pBTDelta;
 142u8 *g_pBTDelta_Free;
 143u8 *g_pBTStartingCopy;
 144u8 *g_pWearCounterCopy;
 145u16 *g_pReadCounterCopy;
 146u8 *g_pBlockTableCopies;
 147u8 *g_pNextBlockTable;
 148static u8 *cp_back_buf_copies[COPY_BACK_BUF_NUM];
 149static int cp_back_buf_idx;
 150
 151static u8 *g_temp_buf;
 152
 153#pragma pack(push, 1)
 154#pragma pack(1)
 155struct BTableChangesDelta {
 156	u8 ftl_cmd_cnt;
 157	u8 ValidFields;
 158	u16 g_wBlockTableOffset;
 159	u32 g_wBlockTableIndex;
 160	u32 BT_Index;
 161	u32 BT_Entry_Value;
 162	u32 WC_Index;
 163	u8 WC_Entry_Value;
 164	u32 RC_Index;
 165	u16 RC_Entry_Value;
 166};
 167
 168#pragma pack(pop)
 169
 170struct BTableChangesDelta *p_BTableChangesDelta;
 171#endif
 172
 173
 174#define MARK_BLOCK_AS_BAD(blocknode)      (blocknode |= BAD_BLOCK)
 175#define MARK_BLK_AS_DISCARD(blk)  (blk = (blk & ~SPARE_BLOCK) | DISCARD_BLOCK)
 176
 177#define FTL_Get_LBAPBA_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
 178						sizeof(u32))
 179#define FTL_Get_WearCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
 180						sizeof(u8))
 181#define FTL_Get_ReadCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
 182						sizeof(u16))
 183#if SUPPORT_LARGE_BLOCKNUM
 184#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
 185						sizeof(u8) * 3)
 186#else
 187#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
 188						sizeof(u16))
 189#endif
 190#define FTL_Get_WearCounter_Table_Flash_Size_Bytes \
 191	FTL_Get_WearCounter_Table_Mem_Size_Bytes
 192#define FTL_Get_ReadCounter_Table_Flash_Size_Bytes \
 193	FTL_Get_ReadCounter_Table_Mem_Size_Bytes
 194
 195static u32 FTL_Get_Block_Table_Flash_Size_Bytes(void)
 196{
 197	u32 byte_num;
 198
 199	if (DeviceInfo.MLCDevice) {
 200		byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
 201			DeviceInfo.wDataBlockNum * sizeof(u8) +
 202			DeviceInfo.wDataBlockNum * sizeof(u16);
 203	} else {
 204		byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
 205			DeviceInfo.wDataBlockNum * sizeof(u8);
 206	}
 207
 208	byte_num += 4 * sizeof(u8);
 209
 210	return byte_num;
 211}
 212
 213static u16  FTL_Get_Block_Table_Flash_Size_Pages(void)
 214{
 215	return (u16)FTL_Get_Page_Num(FTL_Get_Block_Table_Flash_Size_Bytes());
 216}
 217
 218static int FTL_Copy_Block_Table_To_Flash(u8 *flashBuf, u32 sizeToTx,
 219					u32 sizeTxed)
 220{
 221	u32 wBytesCopied, blk_tbl_size, wBytes;
 222	u32 *pbt = (u32 *)g_pBlockTable;
 223
 224	blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
 225	for (wBytes = 0;
 226	(wBytes < sizeToTx) && ((wBytes + sizeTxed) < blk_tbl_size);
 227	wBytes++) {
 228#if SUPPORT_LARGE_BLOCKNUM
 229		flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 3]
 230		>> (((wBytes + sizeTxed) % 3) ?
 231		((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16)) & 0xFF;
 232#else
 233		flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 2]
 234		>> (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
 235#endif
 236	}
 237
 238	sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
 239	blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
 240	wBytesCopied = wBytes;
 241	wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
 242		(sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
 243	memcpy(flashBuf + wBytesCopied, g_pWearCounter + sizeTxed, wBytes);
 244
 245	sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
 246
 247	if (DeviceInfo.MLCDevice) {
 248		blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
 249		wBytesCopied += wBytes;
 250		for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
 251			((wBytes + sizeTxed) < blk_tbl_size); wBytes++)
 252			flashBuf[wBytes + wBytesCopied] =
 253			(g_pReadCounter[(wBytes + sizeTxed) / 2] >>
 254			(((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
 255	}
 256
 257	return wBytesCopied + wBytes;
 258}
 259
 260static int FTL_Copy_Block_Table_From_Flash(u8 *flashBuf,
 261				u32 sizeToTx, u32 sizeTxed)
 262{
 263	u32 wBytesCopied, blk_tbl_size, wBytes;
 264	u32 *pbt = (u32 *)g_pBlockTable;
 265
 266	blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
 267	for (wBytes = 0; (wBytes < sizeToTx) &&
 268		((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
 269#if SUPPORT_LARGE_BLOCKNUM
 270		if (!((wBytes + sizeTxed) % 3))
 271			pbt[(wBytes + sizeTxed) / 3] = 0;
 272		pbt[(wBytes + sizeTxed) / 3] |=
 273			(flashBuf[wBytes] << (((wBytes + sizeTxed) % 3) ?
 274			((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16));
 275#else
 276		if (!((wBytes + sizeTxed) % 2))
 277			pbt[(wBytes + sizeTxed) / 2] = 0;
 278		pbt[(wBytes + sizeTxed) / 2] |=
 279			(flashBuf[wBytes] << (((wBytes + sizeTxed) % 2) ?
 280			0 : 8));
 281#endif
 282	}
 283
 284	sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
 285	blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
 286	wBytesCopied = wBytes;
 287	wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
 288		(sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
 289	memcpy(g_pWearCounter + sizeTxed, flashBuf + wBytesCopied, wBytes);
 290	sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
 291
 292	if (DeviceInfo.MLCDevice) {
 293		wBytesCopied += wBytes;
 294		blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
 295		for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
 296			((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
 297			if (((wBytes + sizeTxed) % 2))
 298				g_pReadCounter[(wBytes + sizeTxed) / 2] = 0;
 299			g_pReadCounter[(wBytes + sizeTxed) / 2] |=
 300				(flashBuf[wBytes] <<
 301				(((wBytes + sizeTxed) % 2) ? 0 : 8));
 302		}
 303	}
 304
 305	return wBytesCopied+wBytes;
 306}
 307
 308static int FTL_Insert_Block_Table_Signature(u8 *buf, u8 tag)
 309{
 310	int i;
 311
 312	for (i = 0; i < BTSIG_BYTES; i++)
 313		buf[BTSIG_OFFSET + i] =
 314		((tag + (i * BTSIG_DELTA) - FIRST_BT_ID) %
 315		(1 + LAST_BT_ID-FIRST_BT_ID)) + FIRST_BT_ID;
 316
 317	return PASS;
 318}
 319
 320static int FTL_Extract_Block_Table_Tag(u8 *buf, u8 **tagarray)
 321{
 322	static u8 tag[BTSIG_BYTES >> 1];
 323	int i, j, k, tagi, tagtemp, status;
 324
 325	*tagarray = (u8 *)tag;
 326	tagi = 0;
 327
 328	for (i = 0; i < (BTSIG_BYTES - 1); i++) {
 329		for (j = i + 1; (j < BTSIG_BYTES) &&
 330			(tagi < (BTSIG_BYTES >> 1)); j++) {
 331			tagtemp = buf[BTSIG_OFFSET + j] -
 332				buf[BTSIG_OFFSET + i];
 333			if (tagtemp && !(tagtemp % BTSIG_DELTA)) {
 334				tagtemp = (buf[BTSIG_OFFSET + i] +
 335					(1 + LAST_BT_ID - FIRST_BT_ID) -
 336					(i * BTSIG_DELTA)) %
 337					(1 + LAST_BT_ID - FIRST_BT_ID);
 338				status = FAIL;
 339				for (k = 0; k < tagi; k++) {
 340					if (tagtemp == tag[k])
 341						status = PASS;
 342				}
 343
 344				if (status == FAIL) {
 345					tag[tagi++] = tagtemp;
 346					i = (j == (i + 1)) ? i + 1 : i;
 347					j = (j == (i + 1)) ? i + 1 : i;
 348				}
 349			}
 350		}
 351	}
 352
 353	return tagi;
 354}
 355
 356
 357static int FTL_Execute_SPL_Recovery(void)
 358{
 359	u32 j, block, blks;
 360	u32 *pbt = (u32 *)g_pBlockTable;
 361	int ret;
 362
 363	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
 364				__FILE__, __LINE__, __func__);
 365
 366	blks = DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock;
 367	for (j = 0; j <= blks; j++) {
 368		block = (pbt[j]);
 369		if (((block & BAD_BLOCK) != BAD_BLOCK) &&
 370			((block & SPARE_BLOCK) == SPARE_BLOCK)) {
 371			ret =  GLOB_LLD_Erase_Block(block & ~BAD_BLOCK);
 372			if (FAIL == ret) {
 373				nand_dbg_print(NAND_DBG_WARN,
 374					"NAND Program fail in %s, Line %d, "
 375					"Function: %s, new Bad Block %d "
 376					"generated!\n",
 377					__FILE__, __LINE__, __func__,
 378					(int)(block & ~BAD_BLOCK));
 379				MARK_BLOCK_AS_BAD(pbt[j]);
 380			}
 381		}
 382	}
 383
 384	return PASS;
 385}
 386
 387/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
 388* Function:     GLOB_FTL_IdentifyDevice
 389* Inputs:       pointer to identify data structure
 390* Outputs:      PASS / FAIL
 391* Description:  the identify data structure is filled in with
 392*                   information for the block driver.
 393*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
 394int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data)
 395{
 396	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
 397				__FILE__, __LINE__, __func__);
 398
 399	dev_data->NumBlocks = DeviceInfo.wTotalBlocks;
 400	dev_data->PagesPerBlock = DeviceInfo.wPagesPerBlock;
 401	dev_data->PageDataSize = DeviceInfo.wPageDataSize;
 402	dev_data->wECCBytesPerSector = DeviceInfo.wECCBytesPerSector;
 403	dev_data->wDataBlockNum = DeviceInfo.wDataBlockNum;
 404
 405	return PASS;
 406}
 407
 408/* ..... */
 409static int allocate_memory(void)
 410{
 411	u32 block_table_size, page_size, block_size, mem_size;
 412	u32 total_bytes = 0;
 413	int i;
 414#if CMD_DMA
 415	int j;
 416#endif
 417
 418	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
 419		__FILE__, __LINE__, __func__);
 420
 421	page_size = DeviceInfo.wPageSize;
 422	block_size = DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
 423
 424	block_table_size = DeviceInfo.wDataBlockNum *
 425		(sizeof(u32) + sizeof(u8) + sizeof(u16));
 426	block_table_size += (DeviceInfo.wPageDataSize -
 427		(block_table_size % DeviceInfo.wPageDataSize)) %
 428		DeviceInfo.wPageDataSize;
 429
 430	/* Malloc memory for block tables */
 431	g_pBlockTable = kzalloc(block_table_size, GFP_ATOMIC);
 432	if (!g_pBlockTable)
 433		goto block_table_fail;
 434	total_bytes += block_table_size;
 435
 436	g_pWearCounter = (u8 *)(g_pBlockTable +
 437		DeviceInfo.wDataBlockNum * sizeof(u32));
 438
 439	if (DeviceInfo.MLCDevice)
 440		g_pReadCounter = (u16 *)(g_pBlockTable +
 441			DeviceInfo.wDataBlockNum *
 442			(sizeof(u32) + sizeof(u8)));
 443
 444	/* Malloc memory and init for cache items */
 445	for (i = 0; i < CACHE_ITEM_NUM; i++) {
 446		Cache.array[i].address = NAND_CACHE_INIT_ADDR;
 447		Cache.array[i].use_cnt = 0;
 448		Cache.array[i].changed = CLEAR;
 449		Cache.array[i].buf = kzalloc(Cache.cache_item_size,
 450					     GFP_ATOMIC);
 451		if (!Cache.array[i].buf)
 452			goto cache_item_fail;
 453		total_bytes += Cache.cache_item_size;
 454	}
 455
 456	/* Malloc memory for IPF */
 457	g_pIPF = kzalloc(page_size, GFP_ATOMIC);
 458	if (!g_pIPF)
 459		goto ipf_fail;
 460	total_bytes += page_size;
 461
 462	/* Malloc memory for data merging during Level2 Cache flush */
 463	cache_l2_page_buf = kmalloc(page_size, GFP_ATOMIC);
 464	if (!cache_l2_page_buf)
 465		goto cache_l2_page_buf_fail;
 466	memset(cache_l2_page_buf, 0xff, page_size);
 467	total_bytes += page_size;
 468
 469	cache_l2_blk_buf = kmalloc(block_size, GFP_ATOMIC);
 470	if (!cache_l2_blk_buf)
 471		goto cache_l2_blk_buf_fail;
 472	memset(cache_l2_blk_buf, 0xff, block_size);
 473	total_bytes += block_size;
 474
 475	/* Malloc memory for temp buffer */
 476	g_pTempBuf = kzalloc(Cache.cache_item_size, GFP_ATOMIC);
 477	if (!g_pTempBuf)
 478		goto Temp_buf_fail;
 479	total_bytes += Cache.cache_item_size;
 480
 481	/* Malloc memory for block table blocks */
 482	mem_size = (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32);
 483	g_pBTBlocks = kmalloc(mem_size, GFP_ATOMIC);
 484	if (!g_pBTBlocks)
 485		goto bt_blocks_fail;
 486	memset(g_pBTBlocks, 0xff, mem_size);
 487	total_bytes += mem_size;
 488
 489	/* Malloc memory for function FTL_Check_Block_Table */
 490	flag_check_blk_table = kmalloc(DeviceInfo.wDataBlockNum, GFP_ATOMIC);
 491	if (!flag_check_blk_table)
 492		goto flag_check_blk_table_fail;
 493	total_bytes += DeviceInfo.wDataBlockNum;
 494
 495	/* Malloc memory for function FTL_Search_Block_Table_IN_Block */
 496	tmp_buf_search_bt_in_block = kmalloc(page_size, GFP_ATOMIC);
 497	if (!tmp_buf_search_bt_in_block)
 498		goto tmp_buf_search_bt_in_block_fail;
 499	memset(tmp_buf_search_bt_in_block, 0xff, page_size);
 500	total_bytes += page_size;
 501
 502	mem_size = DeviceInfo.wPageSize - DeviceInfo.wPageDataSize;
 503	spare_buf_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
 504	if (!spare_buf_search_bt_in_block)
 505		goto spare_buf_search_bt_in_block_fail;
 506	memset(spare_buf_search_bt_in_block, 0xff, mem_size);
 507	total_bytes += mem_size;
 508
 509	spare_buf_bt_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
 510	if (!spare_buf_bt_search_bt_in_block)
 511		goto spare_buf_bt_search_bt_in_block_fail;
 512	memset(spare_buf_bt_search_bt_in_block, 0xff, mem_size);
 513	total_bytes += mem_size;
 514
 515	/* Malloc memory for function FTL_Read_Block_Table */
 516	tmp_buf1_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
 517	if (!tmp_buf1_read_blk_table)
 518		goto tmp_buf1_read_blk_table_fail;
 519	memset(tmp_buf1_read_blk_table, 0xff, page_size);
 520	total_bytes += page_size;
 521
 522	tmp_buf2_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
 523	if (!tmp_buf2_read_blk_table)
 524		goto tmp_buf2_read_blk_table_fail;
 525	memset(tmp_buf2_read_blk_table, 0xff, page_size);
 526	total_bytes += page_size;
 527
 528	/* Malloc memory for function FTL_Static_Wear_Leveling */
 529	flags_static_wear_leveling = kmalloc(DeviceInfo.wDataBlockNum,
 530					GFP_ATOMIC);
 531	if (!flags_static_wear_leveling)
 532		goto flags_static_wear_leveling_fail;
 533	total_bytes += DeviceInfo.wDataBlockNum;
 534
 535	/* Malloc memory for function FTL_Write_Block_Table_Data */
 536	if (FTL_Get_Block_Table_Flash_Size_Pages() > 3)
 537		mem_size = FTL_Get_Block_Table_Flash_Size_Bytes() -
 538				2 * DeviceInfo.wPageSize;
 539	else
 540		mem_size = DeviceInfo.wPageSize;
 541	tmp_buf_write_blk_table_data = kmalloc(mem_size, GFP_ATOMIC);
 542	if (!tmp_buf_write_blk_table_data)
 543		goto tmp_buf_write_blk_table_data_fail;
 544	memset(tmp_buf_write_blk_table_data, 0xff, mem_size);
 545	total_bytes += mem_size;
 546
 547	/* Malloc memory for function FTL_Read_Disturbance */
 548	tmp_buf_read_disturbance = kmalloc(block_size, GFP_ATOMIC);
 549	if (!tmp_buf_read_disturbance)
 550		goto tmp_buf_read_disturbance_fail;
 551	memset(tmp_buf_read_disturbance, 0xff, block_size);
 552	total_bytes += block_size;
 553
 554	/* Alloc mem for function NAND_Read_Page_Main_Spare of lld_nand.c */
 555	buf_read_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
 556	if (!buf_read_page_main_spare)
 557		goto buf_read_page_main_spare_fail;
 558	total_bytes += DeviceInfo.wPageSize;
 559
 560	/* Alloc mem for function NAND_Write_Page_Main_Spare of lld_nand.c */
 561	buf_write_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
 562	if (!buf_write_page_main_spare)
 563		goto buf_write_page_main_spare_fail;
 564	total_bytes += DeviceInfo.wPageSize;
 565
 566	/* Alloc mem for function NAND_Read_Page_Spare of lld_nand.c */
 567	buf_read_page_spare = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
 568	if (!buf_read_page_spare)
 569		goto buf_read_page_spare_fail;
 570	memset(buf_read_page_spare, 0xff, DeviceInfo.wPageSpareSize);
 571	total_bytes += DeviceInfo.wPageSpareSize;
 572
 573	/* Alloc mem for function NAND_Get_Bad_Block of lld_nand.c */
 574	buf_get_bad_block = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
 575	if (!buf_get_bad_block)
 576		goto buf_get_bad_block_fail;
 577	memset(buf_get_bad_block, 0xff, DeviceInfo.wPageSpareSize);
 578	total_bytes += DeviceInfo.wPageSpareSize;
 579
 580#if CMD_DMA
 581	g_temp_buf = kmalloc(block_size, GFP_ATOMIC);
 582	if (!g_temp_buf)
 583		goto temp_buf_fail;
 584	memset(g_temp_buf, 0xff, block_size);
 585	total_bytes += block_size;
 586
 587	/* Malloc memory for copy of block table used in CDMA mode */
 588	g_pBTStartingCopy = kzalloc(block_table_size, GFP_ATOMIC);
 589	if (!g_pBTStartingCopy)
 590		goto bt_starting_copy;
 591	total_bytes += block_table_size;
 592
 593	g_pWearCounterCopy = (u8 *)(g_pBTStartingCopy +
 594		DeviceInfo.wDataBlockNum * sizeof(u32));
 595
 596	if (DeviceInfo.MLCDevice)
 597		g_pReadCounterCopy = (u16 *)(g_pBTStartingCopy +
 598			DeviceInfo.wDataBlockNum *
 599			(sizeof(u32) + sizeof(u8)));
 600
 601	/* Malloc memory for block table copies */
 602	mem_size = 5 * DeviceInfo.wDataBlockNum * sizeof(u32) +
 603			5 * DeviceInfo.wDataBlockNum * sizeof(u8);
 604	if (DeviceInfo.MLCDevice)
 605		mem_size += 5 * DeviceInfo.wDataBlockNum * sizeof(u16);
 606	g_pBlockTableCopies = kzalloc(mem_size, GFP_ATOMIC);
 607	if (!g_pBlockTableCopies)
 608		goto blk_table_copies_fail;
 609	total_bytes += mem_size;
 610	g_pNextBlockTable = g_pBlockTableCopies;
 611
 612	/* Malloc memory for Block Table Delta */
 613	mem_size = MAX_DESCS * sizeof(struct BTableChangesDelta);
 614	g_pBTDelta = kzalloc(mem_size, GFP_ATOMIC);
 615	if (!g_pBTDelta)
 616		goto bt_delta_fail;
 617	total_bytes += mem_size;
 618	g_pBTDelta_Free = g_pBTDelta;
 619
 620	/* Malloc memory for Copy Back Buffers */
 621	for (j = 0; j < COPY_BACK_BUF_NUM; j++) {
 622		cp_back_buf_copies[j] = kzalloc(block_size, GFP_ATOMIC);
 623		if (!cp_back_buf_copies[j])
 624			goto cp_back_buf_copies_fail;
 625		total_bytes += block_size;
 626	}
 627	cp_back_buf_idx = 0;
 628
 629	/* Malloc memory for pending commands list */
 630	mem_size = sizeof(struct pending_cmd) * MAX_DESCS;
 631	info.pcmds = kzalloc(mem_size, GFP_KERNEL);
 632	if (!info.pcmds)
 633		goto pending_cmds_buf_fail;
 634	total_bytes += mem_size;
 635
 636	/* Malloc memory for CDMA descripter table */
 637	mem_size = sizeof(struct cdma_descriptor) * MAX_DESCS;
 638	info.cdma_desc_buf = kzalloc(mem_size, GFP_KERNEL);
 639	if (!info.cdma_desc_buf)
 640		goto cdma_desc_buf_fail;
 641	total_bytes += mem_size;
 642
 643	/* Malloc memory for Memcpy descripter table */
 644	mem_size = sizeof(struct memcpy_descriptor) * MAX_DESCS;
 645	info.memcp_desc_buf = kzalloc(mem_size, GFP_KERNEL);
 646	if (!info.memcp_desc_buf)
 647		goto memcp_desc_buf_fail;
 648	total_bytes += mem_size;
 649#endif
 650
 651	nand_dbg_print(NAND_DBG_WARN,
 652		"Total memory allocated in FTL layer: %d\n", total_bytes);
 653
 654	return PASS;
 655
 656#if CMD_DMA
 657memcp_desc_buf_fail:
 658	kfree(info.cdma_desc_buf);
 659cdma_desc_buf_fail:
 660	kfree(info.pcmds);
 661pending_cmds_buf_fail:
 662cp_back_buf_copies_fail:
 663	j--;
 664	for (; j >= 0; j--)
 665		kfree(cp_back_buf_copies[j]);
 666	kfree(g_pBTDelta);
 667bt_delta_fail:
 668	kfree(g_pBlockTableCopies);
 669blk_table_copies_fail:
 670	kfree(g_pBTStartingCopy);
 671bt_starting_copy:
 672	kfree(g_temp_buf);
 673temp_buf_fail:
 674	kfree(buf_get_bad_block);
 675#endif
 676
 677buf_get_bad_block_fail:
 678	kfree(buf_read_page_spare);
 679buf_read_page_spare_fail:
 680	kfree(buf_write_page_main_spare);
 681buf_write_page_main_spare_fail:
 682	kfree(buf_read_page_main_spare);
 683buf_read_page_main_spare_fail:
 684	kfree(tmp_buf_read_disturbance);
 685tmp_buf_read_disturbance_fail:
 686	kfree(tmp_buf_write_blk_table_data);
 687tmp_buf_write_blk_table_data_fail:
 688	kfree(flags_static_wear_leveling);
 689flags_static_wear_leveling_fail:
 690	kfree(tmp_buf2_read_blk_table);
 691tmp_buf2_read_blk_table_fail:
 692	kfree(tmp_buf1_read_blk_table);
 693tmp_buf1_read_blk_table_fail:
 694	kfree(spare_buf_bt_search_bt_in_block);
 695spare_buf_bt_search_bt_in_block_fail:
 696	kfree(spare_buf_search_bt_in_block);
 697spare_buf_search_bt_in_block_fail:
 698	kfree(tmp_buf_search_bt_in_block);
 699tmp_buf_search_bt_in_block_fail:
 700	kfree(flag_check_blk_table);
 701flag_check_blk_table_fail:
 702	kfree(g_pBTBlocks);
 703bt_blocks_fail:
 704	kfree(g_pTempBuf);
 705Temp_buf_fail:
 706	kfree(cache_l2_blk_buf);
 707cache_l2_blk_buf_fail:
 708	kfree(cache_l2_page_buf);
 709cache_l2_page_buf_fail:
 710	kfree(g_pIPF);
 711ipf_fail:
 712cache_item_fail:
 713	i--;
 714	for (; i >= 0; i--)
 715		kfree(Cache.array[i].buf);
 716	kfree(g_pBlockTable);
 717block_table_fail:
 718	printk(KERN_ERR "Failed to kmalloc memory in %s Line %d.\n",
 719		__FILE__, __LINE__);
 720
 721	return -ENOMEM;
 722}
 723
 724/* .... */
 725static int free_memory(void)
 726{
 727	int i;
 728
 729#if CMD_DMA
 730	kfree(info.memcp_desc_buf);
 731	kfree(info.cdma_desc_buf);
 732	kfree(info.pcmds);
 733	for (i = COPY_BACK_BUF_NUM - 1; i >= 0; i--)
 734		kfree(cp_back_buf_copies[i]);
 735	kfree(g_pBTDelta);
 736	kfree(g_pBlockTableCopies);
 737	kfree(g_pBTStartingCopy);
 738	kfree(g_temp_buf);
 739	kfree(buf_get_bad_block);
 740#endif
 741	kfree(buf_read_page_spare);
 742	kfree(buf_write_page_main_spare);
 743	kfree(buf_read_page_main_spare);
 744	kfree(tmp_buf_read_disturbance);
 745	kfree(tmp_buf_write_blk_table_data);
 746	kfree(flags_static_wear_leveling);
 747	kfree(tmp_buf2_read_blk_table);
 748	kfree(tmp_buf1_read_blk_table);
 749	kfree(spare_buf_bt_search_bt_in_block);
 750	kfree(spare_buf_search_bt_in_block);
 751	kfree(tmp_buf_search_bt_in_block);
 752	kfree(flag_check_blk_table);
 753	kfree(g_pBTBlocks);
 754	kfree(g_pTempBuf);
 755	kfree(g_pIPF);
 756	for (i = CACHE_ITEM_NUM - 1; i >= 0; i--)
 757		kfree(Cache.array[i].buf);
 758	kfree(g_pBlockTable);
 759
 760	return 0;
 761}
 762
 763static void dump_cache_l2_table(void)
 764{
 765	struct list_head *p;
 766	struct spectra_l2_cache_list *pnd;
 767	int n;
 768
 769	n = 0;
 770	list_for_each(p, &cache_l2.table.list) {
 771		pnd = list_entry(p, struct spectra_l2_cache_list, list);
 772		nand_dbg_print(NAND_DBG_WARN, "dump_cache_l2_table node: %d, logical_blk_num: %d\n", n, pnd->logical_blk_num);
 773/*
 774		for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
 775			if (pnd->pages_array[i] != MAX_U32_VALUE)
 776				nand_dbg_print(NAND_DBG_WARN, "    pages_array[%d]: 0x%x\n", i, pnd->pages_array[i]);
 777		}
 778*/
 779		n++;
 780	}
 781}
 782
 783/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
 784* Function:     GLOB_FTL_Init
 785* Inputs:       none
 786* Outputs:      PASS=0 / FAIL=1
 787* Description:  allocates the memory for cache array,
 788*               important data structures
 789*               clears the cache array
 790*               reads the block table from flash into array
 791*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
 792int GLOB_FTL_Init(void)
 793{
 794	int i;
 795
 796	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
 797		__FILE__, __LINE__, __func__);
 798
 799	Cache.pages_per_item = 1;
 800	Cache.cache_item_size = 1 * DeviceInfo.wPageDataSize;
 801
 802	if (allocate_memory() != PASS)
 803		return FAIL;
 804
 805#if CMD_DMA
 806#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
 807	memcpy((void *)&cache_start_copy, (void *)&Cache,
 808		sizeof(struct flash_cache_tag));
 809	memset((void *)&int_cache, -1,
 810		sizeof(struct flash_cache_delta_list_tag) *
 811		(MAX_CHANS + MAX_DESCS));
 812#endif
 813	ftl_cmd_cnt = 0;
 814#endif
 815
 816	if (FTL_Read_Block_Table() != PASS)
 817		return FAIL;
 818
 819	/* Init the Level2 Cache data structure */
 820	for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
 821		cache_l2.blk_array[i] = MAX_U32_VALUE;
 822	cache_l2.cur_blk_idx = 0;
 823	cache_l2.cur_page_num = 0;
 824	INIT_LIST_HEAD(&cache_l2.table.list);
 825	cache_l2.table.logical_blk_num = MAX_U32_VALUE;
 826
 827	dump_cache_l2_table();
 828
 829	return 0;
 830}
 831
 832
 833#if CMD_DMA
 834#if 0
 835static void save_blk_table_changes(u16 idx)
 836{
 837	u8 ftl_cmd;
 838	u32 *pbt = (u32 *)g_pBTStartingCopy;
 839
 840#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
 841	u16 id;
 842	u8 cache_blks;
 843
 844	id = idx - MAX_CHANS;
 845	if (int_cache[id].item != -1) {
 846		cache_blks = int_cache[id].item;
 847		cache_start_copy.array[cache_blks].address =
 848			int_cache[id].cache.address;
 849		cache_start_copy.array[cache_blks].changed =
 850			int_cache[id].cache.changed;
 851	}
 852#endif
 853
 854	ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
 855
 856	while (ftl_cmd <= PendingCMD[idx].Tag) {
 857		if (p_BTableChangesDelta->ValidFields == 0x01) {
 858			g_wBlockTableOffset =
 859				p_BTableChangesDelta->g_wBlockTableOffset;
 860		} else if (p_BTableChangesDelta->ValidFields == 0x0C) {
 861			pbt[p_BTableChangesDelta->BT_Index] =
 862				p_BTableChangesDelta->BT_Entry_Value;
 863			debug_boundary_error(((
 864				p_BTableChangesDelta->BT_Index)),
 865				DeviceInfo.wDataBlockNum, 0);
 866		} else if (p_BTableChangesDelta->ValidFields == 0x03) {
 867			g_wBlockTableOffset =
 868				p_BTableChangesDelta->g_wBlockTableOffset;
 869			g_wBlockTableIndex =
 870				p_BTableChangesDelta->g_wBlockTableIndex;
 871		} else if (p_BTableChangesDelta->ValidFields == 0x30) {
 872			g_pWearCounterCopy[p_BTableChangesDelta->WC_Index] =
 873				p_BTableChangesDelta->WC_Entry_Value;
 874		} else if ((DeviceInfo.MLCDevice) &&
 875			(p_BTableChangesDelta->ValidFields == 0xC0)) {
 876			g_pReadCounterCopy[p_BTableChangesDelta->RC_Index] =
 877				p_BTableChangesDelta->RC_Entry_Value;
 878			nand_dbg_print(NAND_DBG_DEBUG,
 879				"In event status setting read counter "
 880				"GLOB_ftl_cmd_cnt %u Count %u Index %u\n",
 881				ftl_cmd,
 882				p_BTableChangesDelta->RC_Entry_Value,
 883				(unsigned int)p_BTableChangesDelta->RC_Index);
 884		} else {
 885			nand_dbg_print(NAND_DBG_DEBUG,
 886				"This should never occur \n");
 887		}
 888		p_BTableChangesDelta += 1;
 889		ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
 890	}
 891}
 892
 893static void discard_cmds(u16 n)
 894{
 895	u32 *pbt = (u32 *)g_pBTStartingCopy;
 896	u8 ftl_cmd;
 897	unsigned long k;
 898#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
 899	u8 cache_blks;
 900	u16 id;
 901#endif
 902
 903	if ((PendingCMD[n].CMD == WRITE_MAIN_CMD) ||
 904		(PendingCMD[n].CMD == WRITE_MAIN_SPARE_CMD)) {
 905		for (k = 0; k < DeviceInfo.wDataBlockNum; k++) {
 906			if (PendingCMD[n].Block == (pbt[k] & (~BAD_BLOCK)))
 907				MARK_BLK_AS_DISCARD(pbt[k]);
 908		}
 909	}
 910
 911	ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
 912	while (ftl_cmd <= PendingCMD[n].Tag) {
 913		p_BTableChangesDelta += 1;
 914		ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
 915	}
 916
 917#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
 918	id = n - MAX_CHANS;
 919
 920	if (int_cache[id].item != -1) {
 921		cache_blks = int_cache[id].item;
 922		if (PendingCMD[n].CMD == MEMCOPY_CMD) {
 923			if ((cache_start_copy.array[cache_blks].buf <=
 924				PendingCMD[n].DataDestAddr) &&
 925				((cache_start_copy.array[cache_blks].buf +
 926				Cache.cache_item_size) >
 927				PendingCMD[n].DataDestAddr)) {
 928				cache_start_copy.array[cache_blks].address =
 929						NAND_CACHE_INIT_ADDR;
 930				cache_start_copy.array[cache_blks].use_cnt =
 931								0;
 932				cache_start_copy.array[cache_blks].changed =
 933								CLEAR;
 934			}
 935		} else {
 936			cache_start_copy.array[cache_blks].address =
 937					int_cache[id].cache.address;
 938			cache_start_copy.array[cache_blks].changed =
 939					int_cache[id].cache.changed;
 940		}
 941	}
 942#endif
 943}
 944
 945static void process_cmd_pass(int *first_failed_cmd, u16 idx)
 946{
 947	if (0 == *first_failed_cmd)
 948		save_blk_table_changes(idx);
 949	else
 950		discard_cmds(idx);
 951}
 952
 953static void process_cmd_fail_abort(int *first_failed_cmd,
 954				u16 idx, int event)
 955{
 956	u32 *pbt = (u32 *)g_pBTStartingCopy;
 957	u8 ftl_cmd;
 958	unsigned long i;
 959	int erase_fail, program_fail;
 960#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
 961	u8 cache_blks;
 962	u16 id;
 963#endif
 964
 965	if (0 == *first_failed_cmd)
 966		*first_failed_cmd = PendingCMD[idx].SBDCmdIndex;
 967
 968	nand_dbg_print(NAND_DBG_DEBUG, "Uncorrectable error has occurred "
 969		"while executing %u Command %u accesing Block %u\n",
 970		(unsigned int)p_BTableChangesDelta->ftl_cmd_cnt,
 971		PendingCMD[idx].CMD,
 972		(unsigned int)PendingCMD[idx].Block);
 973
 974	ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
 975	while (ftl_cmd <= PendingCMD[idx].Tag) {
 976		p_BTableChangesDelta += 1;
 977		ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
 978	}
 979
 980#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
 981	id = idx - MAX_CHANS;
 982
 983	if (int_cache[id].item != -1) {
 984		cache_blks = int_cache[id].item;
 985		if ((PendingCMD[idx].CMD == WRITE_MAIN_CMD)) {
 986			cache_start_copy.array[cache_blks].address =
 987					int_cache[id].cache.address;
 988			cache_start_copy.array[cache_blks].changed = SET;
 989		} else if ((PendingCMD[idx].CMD == READ_MAIN_CMD)) {
 990			cache_start_copy.array[cache_blks].address =
 991				NAND_CACHE_INIT_ADDR;
 992			cache_start_copy.array[cache_blks].use_cnt = 0;
 993			cache_start_copy.array[cache_blks].changed =
 994							CLEAR;
 995		} else if (PendingCMD[idx].CMD == ERASE_CMD) {
 996			/* ? */
 997		} else if (PendingCMD[idx].CMD == MEMCOPY_CMD) {
 998			/* ? */
 999		}
1000	}
1001#endif
1002
1003	erase_fail = (event == EVENT_ERASE_FAILURE) &&
1004			(PendingCMD[idx].CMD == ERASE_CMD);
1005
1006	program_fail = (event == EVENT_PROGRAM_FAILURE) &&
1007			((PendingCMD[idx].CMD == WRITE_MAIN_CMD) ||
1008			(PendingCMD[idx].CMD == WRITE_MAIN_SPARE_CMD));
1009
1010	if (erase_fail || program_fail) {
1011		for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1012			if (PendingCMD[idx].Block ==
1013				(pbt[i] & (~BAD_BLOCK)))
1014				MARK_BLOCK_AS_BAD(pbt[i]);
1015		}
1016	}
1017}
1018
1019static void process_cmd(int *first_failed_cmd, u16 idx, int event)
1020{
1021	u8 ftl_cmd;
1022	int cmd_match = 0;
1023
1024	if (p_BTableChangesDelta->ftl_cmd_cnt == PendingCMD[idx].Tag)
1025		cmd_match = 1;
1026
1027	if (PendingCMD[idx].Status == CMD_PASS) {
1028		process_cmd_pass(first_failed_cmd, idx);
1029	} else if ((PendingCMD[idx].Status == CMD_FAIL) ||
1030			(PendingCMD[idx].Status == CMD_ABORT)) {
1031		process_cmd_fail_abort(first_failed_cmd, idx, event);
1032	} else if ((PendingCMD[idx].Status == CMD_NOT_DONE) &&
1033					PendingCMD[idx].Tag) {
1034		nand_dbg_print(NAND_DBG_DEBUG,
1035			" Command no. %hu is not executed\n",
1036			(unsigned int)PendingCMD[idx].Tag);
1037		ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
1038		while (ftl_cmd <= PendingCMD[idx].Tag) {
1039			p_BTableChangesDelta += 1;
1040			ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
1041		}
1042	}
1043}
1044#endif
1045
1046static void process_cmd(int *first_failed_cmd, u16 idx, int event)
1047{
1048	printk(KERN_ERR "temporary workaround function. "
1049		"Should not be called! \n");
1050}
1051
1052/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1053* Function:    	GLOB_FTL_Event_Status
1054* Inputs:       none
1055* Outputs:      Event Code
1056* Description:	It is called by SBD after hardware interrupt signalling
1057*               completion of commands chain
1058*               It does following things
1059*               get event status from LLD
1060*               analyze command chain status
1061*               determine last command executed
1062*               analyze results
1063*               rebuild the block table in case of uncorrectable error
1064*               return event code
1065*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1066int GLOB_FTL_Event_Status(int *first_failed_cmd)
1067{
1068	int event_code = PASS;
1069	u16 i_P;
1070
1071	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1072		__FILE__, __LINE__, __func__);
1073
1074	*first_failed_cmd = 0;
1075
1076	event_code = GLOB_LLD_Event_Status();
1077
1078	switch (event_code) {
1079	case EVENT_PASS:
1080		nand_dbg_print(NAND_DBG_DEBUG, "Handling EVENT_PASS\n");
1081		break;
1082	case EVENT_UNCORRECTABLE_DATA_ERROR:
1083		nand_dbg_print(NAND_DBG_DEBUG, "Handling Uncorrectable ECC!\n");
1084		break;
1085	case EVENT_PROGRAM_FAILURE:
1086	case EVENT_ERASE_FAILURE:
1087		nand_dbg_print(NAND_DBG_WARN, "Handling Ugly case. "
1088			"Event code: 0x%x\n", event_code);
1089		p_BTableChangesDelta =
1090			(struct BTableChangesDelta *)g_pBTDelta;
1091		for (i_P = MAX_CHANS; i_P < (ftl_cmd_cnt + MAX_CHANS);
1092				i_P++)
1093			process_cmd(first_failed_cmd, i_P, event_code);
1094		memcpy(g_pBlockTable, g_pBTStartingCopy,
1095			DeviceInfo.wDataBlockNum * sizeof(u32));
1096		memcpy(g_pWearCounter, g_pWearCounterCopy,
1097			DeviceInfo.wDataBlockNum * sizeof(u8));
1098		if (DeviceInfo.MLCDevice)
1099			memcpy(g_pReadCounter, g_pReadCounterCopy,
1100				DeviceInfo.wDataBlockNum * sizeof(u16));
1101
1102#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1103		memcpy((void *)&Cache, (void *)&cache_start_copy,
1104			sizeof(struct flash_cache_tag));
1105		memset((void *)&int_cache, -1,
1106			sizeof(struct flash_cache_delta_list_tag) *
1107			(MAX_DESCS + MAX_CHANS));
1108#endif
1109		break;
1110	default:
1111		nand_dbg_print(NAND_DBG_WARN,
1112			"Handling unexpected event code - 0x%x\n",
1113			event_code);
1114		event_code = ERR;
1115		break;
1116	}
1117
1118	memcpy(g_pBTStartingCopy, g_pBlockTable,
1119		DeviceInfo.wDataBlockNum * sizeof(u32));
1120	memcpy(g_pWearCounterCopy, g_pWearCounter,
1121		DeviceInfo.wDataBlockNum * sizeof(u8));
1122	if (DeviceInfo.MLCDevice)
1123		memcpy(g_pReadCounterCopy, g_pReadCounter,
1124			DeviceInfo.wDataBlockNum * sizeof(u16));
1125
1126	g_pBTDelta_Free = g_pBTDelta;
1127	ftl_cmd_cnt = 0;
1128	g_pNextBlockTable = g_pBlockTableCopies;
1129	cp_back_buf_idx = 0;
1130
1131#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1132	memcpy((void *)&cache_start_copy, (void *)&Cache,
1133		sizeof(struct flash_cache_tag));
1134	memset((void *)&int_cache, -1,
1135		sizeof(struct flash_cache_delta_list_tag) *
1136		(MAX_DESCS + MAX_CHANS));
1137#endif
1138
1139	return event_code;
1140}
1141
1142/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1143* Function:     glob_ftl_execute_cmds
1144* Inputs:       none
1145* Outputs:      none
1146* Description:  pass thru to LLD
1147***************************************************************/
1148u16 glob_ftl_execute_cmds(void)
1149{
1150	nand_dbg_print(NAND_DBG_TRACE,
1151		"glob_ftl_execute_cmds: ftl_cmd_cnt %u\n",
1152		(unsigned int)ftl_cmd_cnt);
1153	g_SBDCmdIndex = 0;
1154	return glob_lld_execute_cmds();
1155}
1156
1157#endif
1158
1159#if !CMD_DMA
1160/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1161* Function:     GLOB_FTL_Read Immediate
1162* Inputs:         pointer to data
1163*                     address of data
1164* Outputs:      PASS / FAIL
1165* Description:  Reads one page of data into RAM directly from flash without
1166*       using or disturbing cache.It is assumed this function is called
1167*       with CMD-DMA disabled.
1168*****************************************************************/
1169int GLOB_FTL_Read_Immediate(u8 *read_data, u64 addr)
1170{
1171	int wResult = FAIL;
1172	u32 Block;
1173	u16 Page;
1174	u32 phy_blk;
1175	u32 *pbt = (u32 *)g_pBlockTable;
1176
1177	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1178		__FILE__, __LINE__, __func__);
1179
1180	Block = BLK_FROM_ADDR(addr);
1181	Page = PAGE_FROM_ADDR(addr, Block);
1182
1183	if (!IS_SPARE_BLOCK(Block))
1184		return FAIL;
1185
1186	phy_blk = pbt[Block];
1187	wResult = GLOB_LLD_Read_Page_Main(read_data, phy_blk, Page, 1);
1188
1189	if (DeviceInfo.MLCDevice) {
1190		g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]++;
1191		if (g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]
1192			>= MAX_READ_COUNTER)
1193			FTL_Read_Disturbance(phy_blk);
1194		if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
1195			g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1196			FTL_Write_IN_Progress_Block_Table_Page();
1197		}
1198	}
1199
1200	return wResult;
1201}
1202#endif
1203
1204#ifdef SUPPORT_BIG_ENDIAN
1205/*********************************************************************
1206* Function:     FTL_Invert_Block_Table
1207* Inputs:       none
1208* Outputs:      none
1209* Description:  Re-format the block table in ram based on BIG_ENDIAN and
1210*                     LARGE_BLOCKNUM if necessary
1211**********************************************************************/
1212static void FTL_Invert_Block_Table(void)
1213{
1214	u32 i;
1215	u32 *pbt = (u32 *)g_pBlockTable;
1216
1217	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1218		__FILE__, __LINE__, __func__);
1219
1220#ifdef SUPPORT_LARGE_BLOCKNUM
1221	for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1222		pbt[i] = INVERTUINT32(pbt[i]);
1223		g_pWearCounter[i] = INVERTUINT32(g_pWearCounter[i]);
1224	}
1225#else
1226	for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1227		pbt[i] = INVERTUINT16(pbt[i]);
1228		g_pWearCounter[i] = INVERTUINT16(g_pWearCounter[i]);
1229	}
1230#endif
1231}
1232#endif
1233
1234/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1235* Function:     GLOB_FTL_Flash_Init
1236* Inputs:       none
1237* Outputs:      PASS=0 / FAIL=0x01 (based on read ID)
1238* Description:  The flash controller is initialized
1239*               The flash device is reset
1240*               Perform a flash READ ID command to confirm that a
1241*                   valid device is attached and active.
1242*                   The DeviceInfo structure gets filled in
1243*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1244int GLOB_FTL_Flash_Init(void)
1245{
1246	int status = FAIL;
1247
1248	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1249		__FILE__, __LINE__, __func__);
1250
1251	g_SBDCmdIndex = 0;
1252
1253	status = GLOB_LLD_Flash_Init();
1254
1255	return status;
1256}
1257
1258/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1259* Inputs:       none
1260* Outputs:      PASS=0 / FAIL=0x01 (based on read ID)
1261* Description:  The flash controller is released
1262*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1263int GLOB_FTL_Flash_Release(void)
1264{
1265	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1266		__FILE__, __LINE__, __func__);
1267
1268	return GLOB_LLD_Flash_Release();
1269}
1270
1271
1272/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1273* Function:     GLOB_FTL_Cache_Release
1274* Inputs:       none
1275* Outputs:      none
1276* Description:  release all allocated memory in GLOB_FTL_Init
1277*               (allocated in GLOB_FTL_Init)
1278*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1279void GLOB_FTL_Cache_Release(void)
1280{
1281	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1282			       __FILE__, __LINE__, __func__);
1283
1284	free_memory();
1285}
1286
1287/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1288* Function:     FTL_Cache_If_Hit
1289* Inputs:       Page Address
1290* Outputs:      Block number/UNHIT BLOCK
1291* Description:  Determines if the addressed page is in cache
1292*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1293static u16 FTL_Cache_If_Hit(u64 page_addr)
1294{
1295	u16 item;
1296	u64 addr;
1297	int i;
1298
1299	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1300		__FILE__, __LINE__, __func__);
1301
1302	item = UNHIT_CACHE_ITEM;
1303	for (i = 0; i < CACHE_ITEM_NUM; i++) {
1304		addr = Cache.array[i].address;
1305		if ((page_addr >= addr) &&
1306			(page_addr < (addr + Cache.cache_item_size))) {
1307			item = i;
1308			break;
1309		}
1310	}
1311
1312	return item;
1313}
1314
1315/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1316* Function:     FTL_Calculate_LRU
1317* Inputs:       None
1318* Outputs:      None
1319* Description:  Calculate the least recently block in a cache and record its
1320*               index in LRU field.
1321*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1322static void FTL_Calculate_LRU(void)
1323{
1324	u16 i, bCurrentLRU, bTempCount;
1325
1326	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1327		__FILE__, __LINE__, __func__);
1328
1329	bCurrentLRU = 0;
1330	bTempCount = MAX_WORD_VALUE;
1331
1332	for (i = 0; i < CACHE_ITEM_NUM; i++) {
1333		if (Cache.array[i].use_cnt < bTempCount) {
1334			bCurrentLRU = i;
1335			bTempCount = Cache.array[i].use_cnt;
1336		}
1337	}
1338
1339	Cache.LRU = bCurrentLRU;
1340}
1341
1342/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1343* Function:     FTL_Cache_Read_Page
1344* Inputs:       pointer to read buffer, logical address and cache item number
1345* Outputs:      None
1346* Description:  Read the page from the cached block addressed by blocknumber
1347*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1348static void FTL_Cache_Read_Page(u8 *data_buf, u64 logic_addr, u16 cache_item)
1349{
1350	u8 *start_addr;
1351
1352	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1353		__FILE__, __LINE__, __func__);
1354
1355	start_addr = Cache.array[cache_item].buf;
1356	start_addr += (u32)(((logic_addr - Cache.array[cache_item].address) >>
1357		DeviceInfo.nBitsInPageDataSize) * DeviceInfo.wPageDataSize);
1358
1359#if CMD_DMA
1360	GLOB_LLD_MemCopy_CMD(data_buf, start_addr,
1361			DeviceInfo.wPageDataSize, 0);
1362	ftl_cmd_cnt++;
1363#else
1364	memcpy(data_buf, start_addr, DeviceInfo.wPageDataSize);
1365#endif
1366
1367	if (Cache.array[cache_item].use_cnt < MAX_WORD_VALUE)
1368		Cache.array[cache_item].use_cnt++;
1369}
1370
1371/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1372* Function:     FTL_Cache_Read_All
1373* Inputs:       pointer to read buffer,block address
1374* Outputs:      PASS=0 / FAIL =1
1375* Description:  It reads pages in cache
1376*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1377static int FTL_Cache_Read_All(u8 *pData, u64 phy_addr)
1378{
1379	int wResult = PASS;
1380	u32 Block;
1381	u32 lba;
1382	u16 Page;
1383	u16 PageCount;
1384	u32 *pbt = (u32 *)g_pBlockTable;
1385	u32 i;
1386
1387	Block = BLK_FROM_ADDR(phy_addr);
1388	Page = PAGE_FROM_ADDR(phy_addr, Block);
1389	PageCount = Cache.pages_per_item;
1390
1391	nand_dbg_print(NAND_DBG_DEBUG,
1392			"%s, Line %d, Function: %s, Block: 0x%x\n",
1393			__FILE__, __LINE__, __func__, Block);
1394
1395	lba = 0xffffffff;
1396	for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1397		if ((pbt[i] & (~BAD_BLOCK)) == Block) {
1398			lba = i;
1399			if (IS_SPARE_BLOCK(i) || IS_BAD_BLOCK(i) ||
1400				IS_DISCARDED_BLOCK(i)) {
1401				/* Add by yunpeng -2008.12.3 */
1402#if CMD_DMA
1403				GLOB_LLD_MemCopy_CMD(pData, g_temp_buf,
1404				PageCount * DeviceInfo.wPageDataSize, 0);
1405				ftl_cmd_cnt++;
1406#else
1407				memset(pData, 0xFF,
1408					PageCount * DeviceInfo.wPageDataSize);
1409#endif
1410				return wResult;
1411			} else {
1412				continue; /* break ?? */
1413			}
1414		}
1415	}
1416
1417	if (0xffffffff == lba)
1418		printk(KERN_ERR "FTL_Cache_Read_All: Block is not found in BT\n");
1419
1420#if CMD_DMA
1421	wResult = GLOB_LLD_Read_Page_Main_cdma(pData, Block, Page,
1422			PageCount, LLD_CMD_FLAG_MODE_CDMA);
1423	if (DeviceInfo.MLCDevice) {
1424		g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
1425		nand_dbg_print(NAND_DBG_DEBUG,
1426			       "Read Counter modified in ftl_cmd_cnt %u"
1427				" Block %u Counter%u\n",
1428			       ftl_cmd_cnt, (unsigned int)Block,
1429			       g_pReadCounter[Block -
1430			       DeviceInfo.wSpectraStartBlock]);
1431
1432		p_BTableChangesDelta =
1433			(struct BTableChangesDelta *)g_pBTDelta_Free;
1434		g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
1435		p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
1436		p_BTableChangesDelta->RC_Index =
1437			Block - DeviceInfo.wSpectraStartBlock;
1438		p_BTableChangesDelta->RC_Entry_Value =
1439			g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock];
1440		p_BTableChangesDelta->ValidFields = 0xC0;
1441
1442		ftl_cmd_cnt++;
1443
1444		if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
1445		    MAX_READ_COUNTER)
1446			FTL_Read_Disturbance(Block);
1447		if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
1448			g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1449			FTL_Write_IN_Progress_Block_Table_Page();
1450		}
1451	} else {
1452		ftl_cmd_cnt++;
1453	}
1454#else
1455	wResult = GLOB_LLD_Read_Page_Main(pData, Block, Page, PageCount);
1456	if (wResult == FAIL)
1457		return wResult;
1458
1459	if (DeviceInfo.MLCDevice) {
1460		g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
1461		if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
1462						MAX_READ_COUNTER)
1463			FTL_Read_Disturbance(Block);
1464		if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
1465			g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1466			FTL_Write_IN_Progress_Block_Table_Page();
1467		}
1468	}
1469#endif
1470	return wResult;
1471}
1472
1473/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1474* Function:     FTL_Cache_Write_All
1475* Inputs:       pointer to cache in sys memory
1476*               address of free block in flash
1477* Outputs:      PASS=0 / FAIL=1
1478* Description:  writes all the pages of the block in cache to flash
1479*
1480*               NOTE:need to make sure this works ok when cache is limited
1481*               to a partial block. This is where copy-back would be
1482*               activated.  This would require knowing which pages in the
1483*               cached block are clean/dirty.Right now we only know if
1484*               the whole block is clean/dirty.
1485*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1486static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr)
1487{
1488	u16 wResult = PASS;
1489	u32 Block;
1490	u16 Page;
1491	u16 PageCount;
1492
1493	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1494			       __FILE__, __LINE__, __func__);
1495
1496	nand_dbg_print(NAND_DBG_DEBUG, "This block %d going to be written "
1497		"on %d\n", cache_block_to_write,
1498		(u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize));
1499
1500	Block = BLK_FROM_ADDR(blk_addr);
1501	Page = PAGE_FROM_ADDR(blk_addr, Block);
1502	PageCount = Cache.pages_per_item;
1503
1504#if CMD_DMA
1505	if (FAIL == GLOB_LLD_Write_Page_Main_cdma(pData,
1506					Block, Page, PageCount)) {
1507		nand_dbg_print(NAND_DBG_WARN,
1508			"NAND Program fail in %s, Line %d, "
1509			"Function: %s, new Bad Block %d generated! "
1510			"Need Bad Block replacing.\n",
1511			__FILE__, __LINE__, __func__, Block);
1512		wResult = FAIL;
1513	}
1514	ftl_cmd_cnt++;
1515#else
1516	if (FAIL == GLOB_LLD_Write_Page_Main(pData, Block, Page, PageCount)) {
1517		nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s,"
1518			" Line %d, Function %s, new Bad Block %d generated!"
1519			"Need Bad Block replacing.\n",
1520			__FILE__, __LINE__, __func__, Block);
1521		wResult = FAIL;
1522	}
1523#endif
1524	return wResult;
1525}
1526
1527/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1528* Function:     FTL_Copy_Block
1529* Inputs:       source block address
1530*               Destination block address
1531* Outputs:      PASS=0 / FAIL=1
1532* Description:  used only for static wear leveling to move the block
1533*               containing static data to new blocks(more worn)
1534*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1535int FTL_Copy_Block(u64 old_blk_addr, u64 blk_addr)
1536{
1537	int i, r1, r2, wResult = PASS;
1538
1539	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1540		__FILE__, __LINE__, __func__);
1541
1542	for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
1543		r1 = FTL_Cache_Read_All(g_pTempBuf, old_blk_addr +
1544					i * DeviceInfo.wPageDataSize);
1545		r2 = FTL_Cache_Write_All(g_pTempBuf, blk_addr +
1546					i * DeviceInfo.wPageDataSize);
1547		if ((ERR == r1) || (FAIL == r2)) {
1548			wResult = FAIL;
1549			break;
1550		}
1551	}
1552
1553	return wResult;
1554}
1555
1556/* Search the block table to find out the least wear block and then return it */
1557static u32 find_least_worn_blk_for_l2_cache(void)
1558{
1559	int i;
1560	u32 *pbt = (u32 *)g_pBlockTable;
1561	u8 least_wear_cnt = MAX_BYTE_VALUE;
1562	u32 least_wear_blk_idx = MAX_U32_VALUE;
1563	u32 phy_idx;
1564
1565	for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1566		if (IS_SPARE_BLOCK(i)) {
1567			phy_idx = (u32)((~BAD_BLOCK) & pbt[i]);
1568			if (phy_idx > DeviceInfo.wSpectraEndBlock)
1569				printk(KERN_ERR "find_least_worn_blk_for_l2_cache: "
1570					"Too big phy block num (%d)\n", phy_idx);
1571			if (g_pWearCounter[phy_idx -DeviceInfo.wSpectraStartBlock] < least_wear_cnt) {
1572				least_wear_cnt = g_pWearCounter[phy_idx - DeviceInfo.wSpectraStartBlock];
1573				least_wear_blk_idx = i;
1574			}
1575		}
1576	}
1577
1578	nand_dbg_print(NAND_DBG_WARN,
1579		"find_least_worn_blk_for_l2_cache: "
1580		"find block %d with least worn counter (%d)\n",
1581		least_wear_blk_idx, least_wear_cnt);
1582
1583	return least_wear_blk_idx;
1584}
1585
1586
1587
1588/* Get blocks for Level2 Cache */
1589static int get_l2_cache_blks(void)
1590{
1591	int n;
1592	u32 blk;
1593	u32 *pbt = (u32 *)g_pBlockTable;
1594
1595	for (n = 0; n < BLK_NUM_FOR_L2_CACHE; n++) {
1596		blk = find_least_worn_blk_for_l2_cache();
1597		if (blk >= DeviceInfo.wDataBlockNum) {
1598			nand_dbg_print(NAND_DBG_WARN,
1599				"find_least_worn_blk_for_l2_cache: "
1600				"No enough free NAND blocks (n: %d) for L2 Cache!\n", n);
1601			return FAIL;
1602		}
1603		/* Tag the free block as discard in block table */
1604		pbt[blk] = (pbt[blk] & (~BAD_BLOCK)) | DISCARD_BLOCK;
1605		/* Add the free block to the L2 Cache block array */
1606		cache_l2.blk_array[n] = pbt[blk] & (~BAD_BLOCK);
1607	}
1608
1609	return PASS;
1610}
1611
1612static int erase_l2_cache_blocks(void)
1613{
1614	int i, ret = PASS;
1615	u32 pblk, lblk = BAD_BLOCK;
1616	u64 addr;
1617	u32 *pbt = (u32 *)g_pBlockTable;
1618
1619	nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1620			       __FILE__, __LINE__, __func__);
1621
1622	for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++) {
1623		pblk = cache_l2.blk_array[i];
1624
1625		/* If the L2 cache block is invalid, then just skip it */
1626		if (MAX_U32_VALUE == pblk)
1627			continue;
1628
1629		BUG_ON(pblk > DeviceInfo.wSpectraEndBlock);
1630
1631		addr = (u64)pblk << DeviceInfo.nBitsInBlockDataSize;
1632		if (PASS == GLOB_FTL_Block_Erase(addr)) {
1633			/* Get logical block number of the erased block */
1634			lblk = FTL_Get_Block_Index(pblk);
1635			BUG_ON(BAD_BLOCK == lblk);
1636			/* Tag it as free in the block table */
1637			pbt[lblk] &= (u32)(~DISCARD_BLOCK);
1638			pbt[lblk] |= (u32)(SPARE_BLOCK);
1639		} else {
1640			MARK_BLOCK_AS_BAD(pbt[lblk]);
1641			ret = ERR;
1642		}
1643	}
1644
1645	return ret;
1646}
1647
1648/*
1649 * Merge the valid data page in the L2 cache blocks into NAND.
1650*/
1651static int flush_l2_cache(void)
1652{
1653	struct list_head *p;
1654	struct spectra_l2_cache_list *pnd, *tmp_pnd;
1655	u32 *pbt = (u32 *)g_pBlockTable;
1656	u32 phy_blk, l2_blk;
1657	u64 addr;
1658	u16 l2_page;
1659	int i, ret = PASS;
1660
1661	nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1662			       __FILE__, __LINE__, __func__);
1663
1664	if (list_empty(&cache_l2.table.list)) /* No data to flush */
1665		return ret;
1666
1667	//dump_cache_l2_table();
1668
1669	if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
1670		g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1671		FTL_Write_IN_Progress_Block_Table_Page();
1672	}
1673
1674	list_for_each(p, &cache_l2.table.list) {
1675		pnd = list_entry(p, struct spectra_l2_cache_list, list);
1676		if (IS_SPARE_BLOCK(pnd->logical_blk_num) ||
1677			IS_BAD_BLOCK(pnd->logical_blk_num) ||
1678			IS_DISCARDED_BLOCK(pnd->logical_blk_num)) {
1679			nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
1680			memset(cache_l2_blk_buf, 0xff, DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize);			
1681		} else {
1682			nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
1683			phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
1684			ret = GLOB_LLD_Read_Page_Main(cache_l2_blk_buf,
1685				phy_blk, 0, DeviceInfo.wPagesPerBlock);
1686			if (ret == FAIL) {
1687				printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
1688			}
1689		}
1690
1691		for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
1692			if (pnd->pages_array[i] != MAX_U32_VALUE) {
1693				l2_blk = cache_l2.blk_array[(pnd->pages_array[i] >> 16) & 0xffff];
1694				l2_page = pnd->pages_array[i] & 0xffff;
1695				ret = GLOB_LLD_Read_Page_Main(cache_l2_page_buf, l2_blk, l2_page, 1);
1696				if (ret == FAIL) {
1697					printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
1698				}
1699				memcpy(cache_l2_blk_buf + i * DeviceInfo.wPageDataSize, cache_l2_page_buf, DeviceInfo.wPageDataSize);
1700			}
1701		}
1702
1703		/* Find a free block and tag the original block as discarded */
1704		addr = (u64)pnd->logical_blk_num << DeviceInfo.nBitsInBlockDataSize;
1705		ret = FTL_Replace_Block(addr);
1706		if (ret == FAIL) {
1707			printk(KERN_ERR "FTL_Replace_Block fail in %s, Line %d\n", __FILE__, __LINE__);
1708		}
1709
1710		/* Write back the updated data into NAND */
1711		phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
1712		if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
1713			nand_dbg_print(NAND_DBG_WARN,
1714				"Program NAND block %d fail in %s, Line %d\n",
1715				phy_blk, __FILE__, __LINE__);
1716			/* This may not be really a bad block. So just tag it as discarded. */
1717			/* Then it has a chance to be erased when garbage collection. */
1718			/* If it is really bad, then the erase will fail and it will be marked */
1719			/* as bad then. Otherwise it will be marked as free and can be used again */
1720			MARK_BLK_AS_DISCARD(pbt[pnd->logical_blk_num]);
1721			/* Find another free block and write it again */
1722			FTL_Replace_Block(addr);
1723			phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
1724			if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
1725				printk(KERN_ERR "Failed to write back block %d when flush L2 cache."
1726					"Some data will be lost!\n", phy_blk);
1727				MARK_BLOCK_AS_BAD(pbt[pnd->logical_blk_num]);
1728			}
1729		} else {
1730			/* tag the new free block as used block */
1731			pbt[pnd->logical_blk_num] &= (~SPARE_BLOCK);
1732		}
1733	}
1734
1735	/* Destroy the L2 Cache table and free the memory of all nodes */
1736	list_for_each_entry_safe(pnd, tmp_pnd, &cache_l2.table.list, list) {
1737		list_del(&pnd->list);
1738		kfree(pnd);
1739	}
1740
1741	/* Erase discard L2 cache blocks */
1742	if (erase_l2_cache_blocks() != PASS)
1743		nand_dbg_print(NAND_DBG_WARN,
1744			" Erase L2 cache blocks error in %s, Line %d\n",
1745			__FILE__, __LINE__);
1746
1747	/* Init the Level2 Cache data structure */
1748	for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
1749		cache_l2.blk_array[i] = MAX_U32_VALUE;
1750	cache_l2.cur_blk_idx = 0;
1751	cache_l2.cur_page_num = 0;
1752	INIT_LIST_HEAD(&cache_l2.table.list);
1753	cache_l2.table.logical_blk_num = MAX_U32_VALUE;
1754
1755	return ret;
1756}
1757
1758/*
1759 * Write back a changed victim cache item to the Level2 Cache
1760 * and update the L2 Cache table to map the change.
1761 * If the L2 Cache is full, then start to do the L2 Cache flush.
1762*/
1763static int write_back_to_l2_cache(u8 *buf, u64 logical_addr)
1764{
1765	u32 logical_blk_num;
1766	u16 logical_page_num;
1767	struct list_head *p;
1768	struct spectra_l2_cache_list *pnd, *pnd_new;
1769	u32 node_size;
1770	int i, found;
1771
1772	nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
1773			       __FILE__, __LINE__, __func__);
1774
1775	/*
1776	 * If Level2 Cache table is empty, then it means either:
1777	 * 1. This is the first time that the function called after FTL_init
1778	 * or
1779	 * 2. The Level2 Cache has just been flushed
1780	 *
1781	 * So, 'steal' some free blocks from NAND for L2 Cache using
1782	 * by just mask them as discard in the block table
1783	*/
1784	if (list_empty(&cache_l2.table.list)) {
1785		BUG_ON(cache_l2.cur_blk_idx != 0);
1786		BUG_ON(cache_l2.cur_page_num!= 0);
1787		BUG_ON(cache_l2.table.logical_blk_num != MAX_U32_VALUE);
1788		if (FAIL == get_l2_cache_blks()) {
1789			GLOB_FTL_Garbage_Collection();
1790			if (FAIL == get_l2_cache_blks()) {
1791				printk(KERN_ALERT "Fail to get L2 cache blks!\n");
1792				return FAIL;
1793			}
1794		}
1795	}
1796
1797	logical_blk_num = BLK_FROM_ADDR(logical_addr);
1798	logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
1799	BUG_ON(logical_blk_num == MAX_U32_VALUE);
1800
1801	/* Write the cache item data into the current position of L2 Cache */
1802#if CMD_DMA
1803	/*
1804	 * TODO
1805	 */
1806#else
1807	if (FAIL == GLOB_LLD_Write_Page_Main(buf,
1808		cache_l2.blk_array[cache_l2.cur_blk_idx],
1809		cache_l2.cur_page_num, 1)) {
1810		nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
1811			"%s, Line %d, new Bad Block %d generated!\n",
1812			__FILE__, __LINE__,
1813			cache_l2.blk_array[cache_l2.cur_blk_idx]);
1814
1815		/* TODO: tag the current block as bad and try again */
1816
1817		return FAIL;
1818	}
1819#endif
1820
1821	/* 
1822	 * Update the L2 Cache table.
1823	 *
1824	 * First seaching in the table to see whether the logical block
1825	 * has been mapped. If not, then kmalloc a new node for the
1826	 * logical block, fill data, and then insert it to the list.
1827	 * Otherwise, just update the mapped node directly.
1828	 */
1829	found = 0;
1830	list_for_each(p, &cache_l2.table.list) {
1831		pnd = list_entry(p, struct spectra_l2_cache_list, list);
1832		if (pnd->logical_blk_num == logical_blk_num) {
1833			pnd->pages_array[logical_page_num] =
1834				(cache_l2.cur_blk_idx << 16) |
1835				cache_l2.cur_page_num;
1836			found = 1;
1837			break;
1838		}
1839	}
1840	if (!found) { /* Create new node for the logical block here */
1841
1842		/* The logical pages to physical pages map array is
1843		 * located at the end of struct spectra_l2_cache_list.
1844		 */ 
1845		node_size = sizeof(struct spectra_l2_cache_list) +
1846			sizeof(u32) * DeviceInfo.wPagesPerBlock;
1847		pnd_new = kmalloc(node_size, GFP_ATOMIC);
1848		if (!pnd_new) {
1849			printk(KERN_ERR "Failed to kmalloc in %s Line %d\n",
1850				__FILE__, __LINE__);
1851			/* 
1852			 * TODO: Need to flush all the L2 cache into NAND ASAP
1853			 * since no memory available here
1854			 */
1855		}
1856		pnd_new->logical_blk_num = logical_blk_num;
1857		for (i = 0; i < DeviceInfo.wPagesPerBlock; i++)
1858			pnd_new->pages_array[i] = MAX_U32_VALUE;
1859		pnd_new->pages_array[logical_page_num] =
1860			(cache_l2.cur_blk_idx << 16) | cache_l2.cur_page_num;
1861		list_add(&pnd_new->list, &cache_l2.table.list);
1862	}
1863
1864	/* Increasing the current position pointer of the L2 Cache */
1865	cache_l2.cur_page_num++;
1866	if (cache_l2.cur_page_num >= DeviceInfo.wPagesPerBlock) {
1867		cache_l2.cur_blk_idx++;
1868		if (cache_l2.cur_blk_idx >= BLK_NUM_FOR_L2_CACHE) {
1869			/* The L2 Cache is full. Need to flush it now */
1870			nand_dbg_print(NAND_DBG_WARN,
1871				"L2 Cache is full, will start to flush it\n");
1872			flush_l2_cache();
1873		} else {
1874			cache_l2.cur_page_num = 0;
1875		}
1876	}
1877
1878	return PASS;
1879}
1880
1881/*
1882 * Search in the Level2 Cache table to find the cache item.
1883 * If find, read the data from the NAND page of L2 Cache,
1884 * Otherwise, return FAIL.
1885 */
1886static int search_l2_cache(u8 *buf, u64 logical_addr)
1887{
1888	u32 logical_blk_num;
1889	u16 logical_page_num;
1890	struct list_head *p;
1891	struct spectra_l2_cache_list *pnd;
1892	u32 tmp = MAX_U32_VALUE;
1893	u32 phy_blk;
1894	u16 phy_page;
1895	int ret = FAIL;
1896
1897	logical_blk_num = BLK_FROM_ADDR(logical_addr);
1898	logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
1899
1900	list_for_each(p, &cache_l2.table.list) {
1901		pnd = list_entry(p, struct spectra_l2_cache_list, list);
1902		if (pnd->logical_blk_num == logical_blk_num) {
1903			tmp = pnd->pages_array[logical_page_num];
1904			break;
1905		}
1906	}
1907
1908	if (tmp != MAX_U32_VALUE) { /* Found valid map */
1909		phy_blk = cache_l2.blk_array[(tmp >> 16) & 0xFFFF];
1910		phy_page = tmp & 0xFFFF;
1911#if CMD_DMA
1912		/* TODO */
1913#else
1914		ret = GLOB_LLD_Read_Page_Main(buf, phy_blk, phy_page, 1);
1915#endif
1916	}
1917
1918	return ret;
1919}
1920
1921/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1922* Function:     FTL_Cache_Write_Page
1923* Inputs:       Pointer to buffer, page address, cache block number
1924* Outputs:      PASS=0 / FAIL=1
1925* Description:  It writes the data in Cache Block
1926*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1927static void FTL_Cache_Write_Page(u8 *pData, u64 page_addr,
1928				u8 cache_blk, u16 flag)
1929{
1930	u8 *pDest;
1931	u64 addr;
1932
1933	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1934		__FILE__, __LINE__, __func__);
1935
1936	addr = Cache.array[cache_blk].address;
1937	pDest = Cache.array[cache_blk].buf;
1938
1939	pDest += (unsigned long)(page_addr - addr);
1940	Cache.array[cache_blk].changed = SET;
1941#if CMD_DMA
1942#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1943	int_cache[ftl_cmd_cnt].item = cache_blk;
1944	int_cache[ftl_cmd_cnt].cache.address =
1945			Cache.array[cache_blk].address;
1946	int_cache[ftl_cmd_cnt].cache.changed =
1947			Cache.array[cache_blk].changed;
1948#endif
1949	GLOB_LLD_MemCopy_CMD(pDest, pData, DeviceInfo.wPageDataSize, flag);
1950	ftl_cmd_cnt++;
1951#else
1952	memcpy(pDest, pData, DeviceInfo.wPageDataSize);
1953#endif
1954	if (Cache.array[cache_blk].use_cnt < MAX_WORD_VALUE)
1955		Cache.array[cache_blk].use_cnt++;
1956}
1957
1958/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1959* Function:     FTL_Cache_Write
1960* Inputs:       none
1961* Outputs:      PASS=0 / FAIL=1
1962* Description:  It writes least frequently used Cache block to flash if it
1963*               has been changed
1964*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1965static int FTL_Cache_Write(void)
1966{
1967	int i, bResult = PASS;
1968	u16 bNO, least_count = 0xFFFF;
1969
1970	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1971		__FILE__, __LINE__, __func__);
1972
1973	FTL_Calculate_LRU();
1974
1975	bNO = Cache.LRU;
1976	nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: "
1977		"Least used cache block is %d\n", bNO);
1978
1979	if (Cache.array[bNO].changed != SET)
1980		return bResult;
1981
1982	nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: Cache"
1983		" Block %d containing logical block %d is dirty\n",
1984		bNO,
1985		(u32)(Cache.array[bNO].address >>
1986		DeviceInfo.nBitsInBlockDataSize));
1987#if CMD_DMA
1988#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1989	int_cache[ftl_cmd_cnt].item = bNO;
1990	int_cache[ftl_cmd_cnt].cache.address =
1991				Cache.array[bNO].address;
1992	int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
1993#endif
1994#endif
1995	bResult = write_back_to_l2_cache(Cache.array[bNO].buf,
1996			Cache.array[bNO].address);
1997	if (bResult != ERR)
1998		Cache.array[bNO].changed = CLEAR;
1999
2000	least_count = Cache.array[bNO].use_cnt;
2001
2002	for (i = 0; i < CACHE_ITEM_NUM; i++) {
2003		if (i == bNO)
2004			continue;
2005		if (Cache.array[i].use_cnt > 0)
2006			Cache.array[i].use_cnt -= least_count;
2007	}
2008
2009	return bResult;
2010}
2011
2012/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2013* Function:     FTL_Cache_Read
2014* Inputs:       Page address
2015* Outputs:      PASS=0 / FAIL=1
2016* Description:  It reads the block from device in Cache Block
2017*               Set the LRU count to 1
2018*               Mark the Cache Block as clean
2019*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2020static int FTL_Cache_Read(u64 logical_addr)
2021{
2022	u64 item_addr, phy_addr;
2023	u16 num;
2024	int ret;
2025
2026	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2027		__FILE__, __LINE__, __func__);
2028
2029	num = Cache.LRU; /* The LRU cache item will be overwritten */
2030
2031	item_addr = (u64)GLOB_u64_Div(logical_addr, Cache.cache_item_size) *
2032		Cache.cache_item_size;
2033	Cache.array[num].address = item_addr;
2034	Cache.array[num].use_cnt = 1;
2035	Cache.array[num].changed = CLEAR;
2036
2037#if CMD_DMA
2038#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
2039	int_cache[ftl_cmd_cnt].item = num;
2040	int_cache[ftl_cmd_cnt].cache.address =
2041			Cache.array[num].address;
2042	int_cache[ftl_cmd_cnt].cache.changed =
2043			Cache.array[num].changed;
2044#endif
2045#endif
2046	/*
2047	 * Search in L2 Cache. If hit, fill data into L1 Cache item buffer,
2048	 * Otherwise, read it from NAND
2049	 */
2050	ret = search_l2_cache(Cache.array[num].buf, logical_addr);
2051	if (PASS == ret) /* Hit in L2 Cache */
2052		return ret;
2053
2054	/* Compute the physical start address of NAND device according to */
2055	/* the logical start address of the cache item (LRU cache item) */
2056	phy_addr = FTL_Get_Physical_Block_Addr(item_addr) +
2057		GLOB_u64_Remainder(item_addr, 2);
2058
2059	return FTL_Cache_Read_All(Cache.array[num].buf, phy_addr);
2060}
2061
2062/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2063* Function:     FTL_Check_Block_Table
2064* Inputs:       ?
2065* Outputs:      PASS=0 / FAIL=1
2066* Description:  It checks the correctness of each block table entry
2067*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2068static int FTL_Check_Block_Table(int wOldTable)
2069{
2070	u32 i;
2071	int wResult = PASS;
2072	u32 blk_idx;
2073	u32 *pbt = (u32 *)g_pBlockTable;
2074	u8 *pFlag = flag_check_blk_table;
2075
2076	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2077		       __FILE__, __LINE__, __func__);
2078
2079	if (NULL != pFlag) {
2080		memset(pFlag, FAIL, DeviceInfo.wDataBlockNum);
2081		for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
2082			blk_idx = (u32)(pbt[i] & (~BAD_BLOCK));
2083
2084			/*
2085			 * 20081006/KBV - Changed to pFlag[i] reference
2086			 * to avoid buffer overflow
2087			 */
2088
2089			/*
2090			 * 2008-10-20 Yunpeng Note: This change avoid
2091			 * buffer overflow, but changed function of
2092			 * the code, so it should be re-write later
2093			 */
2094			if ((blk_idx > DeviceInfo.wSpectraEndBlock) ||
2095				PASS == pFlag[i]) {
2096				wResult = FAIL;
2097				break;
2098			} else {
2099				pFlag[i] = PASS;
2100			}
2101		}
2102	}
2103
2104	return wResult;
2105}
2106
2107
2108/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2109* Function:     FTL_Write_Block_Table
2110* Inputs:       flasg
2111* Outputs:      0=Block Table was updated. No write done. 1=Block write needs to
2112* happen. -1 Error
2113* Description:  It writes the block table
2114*               Block table always mapped to LBA 0 which inturn mapped
2115*               to any physical block
2116*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2117static int FTL_Write_Block_Table(int wForce)
2118{
2119	u32 *pbt = (u32 *)g_pBlockTable;
2120	int wSuccess = PASS;
2121	u32 wTempBlockTableIndex;
2122	u16 bt_pages, new_bt_offset;
2123	u8 blockchangeoccured = 0;
2124
2125	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2126			       __FILE__, __LINE__, __func__);
2127
2128	bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
2129
2130	if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus)
2131		return 0;
2132
2133	if (PASS == wForce) {
2134		g_wBlockTableOffset =
2135			(u16)(DeviceInfo.wPagesPerBlock - bt_pages);
2136#if CMD_DMA
2137		p_BTableChangesDelta =
2138			(struct BTableChangesDelta *)g_pBTDelta_Free;
2139		g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2140
2141		p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
2142		p_BTableChangesDelta->g_wBlockTableOffset =
2143			g_wBlockTableOffset;
2144		p_BTableChangesDelta->ValidFields = 0x01;
2145#endif
2146	}
2147
2148	nand_dbg_print(NAND_DBG_DEBUG,
2149		"Inside FTL_Write_Block_Table: block %d Page:%d\n",
2150		g_wBlockTableIndex, g_wBlockTableOffset);
2151
2152	do {
2153		new_bt_offset = g_wBlockTableOffset + bt_pages + 1;
2154		if ((0 == (new_bt_offset % DeviceInfo.wPagesPerBlock)) ||
2155			(new_bt_offset > DeviceInfo.wPagesPerBlock) ||
2156			(FAIL == wSuccess)) {
2157			wTempBlockTableIndex = FTL_Replace_Block_Table();
2158			if (BAD_BLOCK == wTempBlockTableIndex)
2159				return ERR;
2160			if (!blockchangeoccured) {
2161				bt_block_changed = 1;
2162				blockchangeoccured = 1;
2163			}
2164
2165			g_wBlockTableIndex = wTempBlockTableIndex;
2166			g_wBlockTableOffset = 0;
2167			pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
2168#if CMD_DMA
2169			p_BTableChangesDelta =
2170				(struct BTableChangesDelta *)g_pBTDelta_Free;
2171			g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2172
2173			p_BTableChangesDelta->ftl_cmd_cnt =
2174				    ftl_cmd_cnt;
2175			p_BTableChangesDelta->g_wBlockTableOffset =
2176				    g_wBlockTableOffset;
2177			p_BTableChangesDelta->g_wBlockTableIndex =
2178				    g_wBlockTableIndex;
2179			p_BTableChangesDelta->ValidFields = 0x03;
2180
2181			p_BTableChangesDelta =
2182				(struct BTableChangesDelta *)g_pBTDelta_Free;
2183			g_pBTDelta_Free +=
2184				sizeof(struct BTableChangesDelta);
2185
2186			p_BTableChangesDelta->ftl_cmd_cnt =
2187				    ftl_cmd_cnt;
2188			p_BTableChangesDelta->BT_Index =
2189				    BLOCK_TABLE_INDEX;
2190			p_BTableChangesDelta->BT_Entry_Value =
2191				    pbt[BLOCK_TABLE_INDEX];
2192			p_BTableChangesDelta->ValidFields = 0x0C;
2193#endif
2194		}
2195
2196		wSuccess = FTL_Write_Block_Table_Data();
2197		if (FAIL == wSuccess)
2198			MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
2199	} while (FAIL == wSuccess);
2200
2201	g_cBlockTableStatus = CURRENT_BLOCK_TABLE;
2202
2203	return 1;
2204}
2205
2206static int  force_format_nand(void)
2207{
2208	u32 i;
2209
2210	/* Force erase the whole unprotected physical partiton of NAND */
2211	printk(KERN_ALERT "Start to force erase whole NAND device ...\n");
2212	printk(KERN_ALERT "From phyical block %d to %d\n",
2213		DeviceInfo.wSpectraStartBlock, DeviceInfo.wSpectraEndBlock);
2214	for (i = DeviceInfo.wSpectraStartBlock; i <= DeviceInfo.wSpectraEndBlock; i++) {
2215		if (GLOB_LLD_Erase_Block(i))
2216			printk(KERN_ERR "Failed to force erase NAND block %d\n", i);
2217	}
2218	printk(KERN_ALERT "Force Erase ends. Please reboot the system ...\n");
2219	while(1);
2220
2221	return PASS;
2222}
2223
2224int GLOB_FTL_Flash_Format(void)
2225{
2226	//return FTL_Format_Flash(1);
2227	return force_format_nand();
2228
2229}
2230
2231/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2232* Function:     FTL_Search_Block_Table_IN_Block
2233* Inputs:       Block Number
2234*               Pointer to page
2235* Outputs:      PASS / FAIL
2236*               Page contatining the block table
2237* Description:  It searches the block table in the block
2238*               passed as an argument.
2239*
2240*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2241static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
2242						u8 BT_Tag, u16 *Page)
2243{
2244	u16 i, j, k;
2245	u16 Result = PASS;
2246	u16 Last_IPF = 0;
2247	u8  BT_Found = 0;
2248	u8 *tagarray;
2249	u8 *tempbuf = tmp_buf_search_bt_in_block;
2250	u8 *pSpareBuf = spare_buf_search_bt_in_block;
2251	u8 *pSpareBufBTLastPage = spare_buf_bt_search_bt_in_block;
2252	u8 bt_flag_last_page = 0xFF;
2253	u8 search_in_previous_pages = 0;
2254	u16 bt_pages;
2255
2256	nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2257			       __FILE__, __LINE__, __func__);
2258
2259	nand_dbg_print(NAND_DBG_DEBUG,
2260		       "Searching block table in %u block\n",
2261		       (unsigned int)BT_Block);
2262
2263	bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
2264
2265	for (i = bt_pages; i < DeviceInfo.wPagesPerBlock;
2266				i += (bt_pages + 1)) {
2267		nand_dbg_print(NAND_DBG_DEBUG,
2268			       "Searching last IPF: %d\n", i);
2269		Result = GLOB_LLD_Read_Page_Main_Polling(tempbuf,
2270							BT_Block, i, 1);
2271
2272		if (0 == memcmp(tempbuf, g_pIPF, DeviceInfo.wPageDataSize)) {
2273			if ((i + bt_pages + 1) < DeviceInfo.wPagesPerBlock) {
2274				continue;
2275			} else {
2276				search_in_previous_pages = 1;
2277				Last_IPF = i;
2278			}
2279		}
2280
2281		if (!search_in_previous_pages) {
2282			if (i != bt_pages) {
2283				i -= (bt_pages + 1);
2284				Last_IPF = i;
2285			}
2286		}
2287
2288		if (0 == Last_IPF)
2289			break;
2290
2291		if (!search_in_previous_pages) {
2292			i = i + 1;
2293			nand_dbg_print(NAND_DBG_DEBUG,
2294				"Reading the spare area of Block %u Page %u",
2295				(unsigned int)BT_Block, i);
2296			Result = GLOB_LLD_Read_Page_Spare(pSpareBuf,
2297							BT_Block, i, 1);
2298			nand_dbg_print(NAND_DBG_DEBUG,
2299				"Reading the spare area of Block %u Page %u",
2300				(unsigned int)BT_Block, i + bt_pages - 1);
2301			Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
2302				BT_Block, i + bt_pages - 1, 1);
2303
2304			k = 0;
2305			j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
2306			if (j) {
2307				for (; k < j; k++) {
2308					if (tagarray[k] == BT_Tag)
2309						break;
2310				}
2311			}
2312
2313			if (k < j)
2314				bt_flag = tagarray[k];
2315			else
2316				Result = FAIL;
2317
2318			if (Result == PASS) {
2319				k = 0;
2320				j = FTL_Extract_Block_Table_Tag(
2321					pSpareBufBTLastPage, &tagarray);
2322				if (j) {
2323					for (; k < j; k++) {
2324						if (tagarray[k] == BT_Tag)
2325							break;
2326					}
2327				}
2328
2329				if (k < j)
2330					bt_flag_last_page = tagarray[k];
2331				else
2332					Result = FAIL;
2333
2334				if (Result == PASS) {
2335					if (bt_flag == bt_flag_last_page) {
2336						nand_dbg_print(NAND_DBG_DEBUG,
2337							"Block table is found"
2338							" in page after IPF "
2339							"at block %d "
2340							"page %d\n",
2341							(int)BT_Block, i);
2342						BT_Found = 1;
2343						*Page  = i;
2344						g_cBlockTableStatus =
2345							CURRENT_BLOCK_TABLE;
2346						break;
2347					} else {
2348						Result = FAIL;
2349					}
2350				}
2351			}
2352		}
2353
2354		if (search_in_previous_pages)
2355			i = i - bt_pages;
2356		else
2357			i = i - (bt_pages + 1);
2358
2359		Result = PASS;
2360
2361		nand_dbg_print(NAND_DBG_DEBUG,
2362			"Reading the spare area of Block %d Page %d",
2363			(int)BT_Block, i);
2364
2365		Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
2366		nand_dbg_print(NAND_DBG_DEBUG,
2367			"Reading the spare area of Block %u Page %u",
2368			(unsigned int)BT_Block, i + bt_pages - 1);
2369
2370		Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
2371					BT_Block, i + bt_pages - 1, 1);
2372
2373		k = 0;
2374		j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
2375		if (j) {
2376			for (; k < j; k++) {
2377				if (tagarray[k] == BT_Tag)
2378					break;
2379			}
2380		}
2381
2382		if (k < j)
2383			bt_flag = tagarray[k];
2384		else
2385			Result = FAIL;
2386
2387		if (Result == PASS) {
2388			k = 0;
2389			j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
2390						&tagarray);
2391			if (j) {
2392				for (; k < j; k++) {
2393					if (tagarray[k] == BT_Tag)
2394						break;
2395				}
2396			}
2397
2398			if (k < j) {
2399				bt_flag_last_page = tagarray[k];
2400			} else {
2401				Result = FAIL;
2402				break;
2403			}
2404
2405			if (Result == PASS) {
2406				if (bt_flag == bt_flag_last_page) {
2407					nand_dbg_print(NAND_DBG_DEBUG,
2408						"Block table is found "
2409						"in page prior to IPF "
2410						"at block %u page %d\n",
2411						(unsigned int)BT_Block, i);
2412					BT_Found = 1;
2413					*Page  = i;
2414					g_cBlockTableStatus =
2415						IN_PROGRESS_BLOCK_TABLE;
2416					break;
2417				} else {
2418					Result = FAIL;
2419					break;
2420				}
2421			}
2422		}
2423	}
2424
2425	if (Result == FAIL) {
2426		if ((Last_IPF > bt_pages) && (i < Last_IPF) && (!BT_Found)) {
2427			BT_Found = 1;
2428			*Page = i - (bt_pages + 1);
2429		}
2430		if ((Last_IPF == bt_pages) && (i < Last_IPF) && (!BT_Found))
2431			goto func_return;
2432	}
2433
2434	if (Last_IPF == 0) {
2435		i = 0;
2436		Result = PASS;
2437		nand_dbg_print(NAND_DBG_DEBUG, "Reading the spare area of "
2438			"Block %u Page %u", (unsigned int)BT_Block, i);
2439
2440		Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
2441		nand_dbg_print(NAND_DBG_DEBUG,
2442			"Reading the spare area of Block %u Page %u",
2443			(unsigned int)BT_Block, i + bt_pages - 1);
2444		Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
2445					BT_Block, i + bt_pages - 1, 1);
2446
2447		k = 0;
2448		j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
2449		if (j) {
2450			for (; k < j; k++) {
2451				if (tagarray[k] == BT_Tag)
2452					break;
2453			}
2454		}
2455
2456		if (k < j)
2457			bt_flag = tagarray[k];
2458		else
2459			Result = FAIL;
2460
2461		if (Result == PASS) {
2462			k = 0;
2463			j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
2464							&tagarray);
2465			if (j) {
2466				for (; k < j; k++) {
2467					if (tagarray[k] == BT_Tag)
2468						break;
2469				}
2470			}
2471
2472			if (k < j)
2473				bt_flag_last_page = tagarray[k];
2474			else
2475				Result = FAIL;
2476
2477			if (Result == PASS) {
2478				if (bt_flag == bt_flag_last_page) {
2479					nand_dbg_print(NAND_DBG_DEBUG,
2480						"Block table is found "
2481						"in page after IPF at "
2482						"block %u page %u\n",
2483						(unsigned int)BT_Block,
2484						(unsigned int)i);
2485					BT_Found = 1;
2486					*Page  = i;
2487					g_cBlockTableStatus =
2488						CURRENT_BLOCK_TABLE;
2489					goto func_return;
2490				} else {
2491					Result = FAIL;
2492				}
2493			}
2494		}
2495
2496		if (Result == FAIL)
2497			goto func_return;
2498	}
2499func_return:
2500	return Result;
2501}
2502
2503u8 *get_blk_table_start_addr(void)
2504{
2505	return g_pBlockTable;
2506}
2507
2508unsigned long get_blk_table_len(void)
2509{
2510	return DeviceInfo.wDataBlockNum * sizeof(u32);
2511}
2512
2513u8 *get_wear_leveling_table_start_addr(void)
2514{
2515	return g_pWearCounter;
2516}
2517
2518unsigned long get_wear_leveling_table_len(void)
2519{
2520	return DeviceInfo.wDataBlockNum * sizeof(u8);
2521}
2522
2523/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2524* Function:     FTL_Read_Block_Table
2525* Inputs:       none
2526* Outputs:      PASS / FAIL
2527* Description:  read the flash spare area and find a block containing the
2528*               most recent block table(having largest block_table_counter).
2529*               Find the last written Block table in this block.
2530*               Check the correctness of Block Table
2531*               If CDMA is enabled, this function is called in
2532*               polling mode.
2533*               We don't need to store changes in Block table in this
2534*               function as it is called only at initialization
2535*
2536*               Note: Currently this function is called at initialization
2537*               before any read/erase/write command issued to flash so,
2538*               there is no need to wait for CDMA list to complete as of now
2539*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2540static int FTL_Read_Block_Table(void)
2541{
2542	u16 i = 0;
2543	int k, j;
2544	u8 *tempBuf, *tagarray;
2545	int wResult = FAIL;
2546	int status = FAIL;
2547	u8 block_table_found = 0;
2548	int search_result;
2549	u32 Block;
2550	u16 Page = 0;
2551	u16 PageCount;
2552	u16 bt_pages;
2553	int wBytesCopied = 0, tempvar;
2554
2555	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2556			       __FILE__, __LINE__, __func__);
2557
2558	tempBuf = tmp_buf1_read_blk_table;
2559	bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
2560
2561	for (j = DeviceInfo.wSpectraStartBlock;
2562		j <= (int)DeviceInfo.wSpectraEndBlock;
2563			j++) {
2564		status = GLOB_LLD_Read_Page_Spare(tempBuf, j, 0, 1);
2565		k = 0;
2566		i = FTL_Extract_Block_Table_Tag(tempBuf, &tagarray);
2567		if (i) {
2568			status  = GLOB_LLD_Read_Page_Main_Polling(tempBuf,
2569								j, 0, 1);
2570			for (; k < i; k++) {
2571				if (tagarray[k] == tempBuf[3])
2572					break;
2573			}
2574		}
2575
2576		if (k < i)
2577			k = tagarray[k];
2578		else
2579			continue;
2580
2581		nand_dbg_print(NAND_DBG_DEBUG,
2582				"Block table is contained in Block %d %d\n",
2583				       (unsigned int)j, (unsigned int)k);
2584
2585		if (g_pBTBlocks[k-FIRST_BT_ID] == BTBLOCK_INVAL) {
2586			g_pBTBlocks[k-FIRST_BT_ID] = j;
2587			block_table_found = 1;
2588		} else {
2589			printk(KERN_ERR "FTL_Read_Block_Table -"
2590				"This should never happens. "
2591				"Two block table have same counter %u!\n", k);
2592		}
2593	}
2594
2595	if (block_table_found) {
2596		if (g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL &&
2597		g_pBTBlocks[LAST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) {
2598			j = LAST_BT_ID;
2599			while ((j > FIRST_BT_ID) &&
2600			(g_pBTBlocks[j - FIRST_BT_ID] != BTBLOCK_INVAL))
2601				j--;
2602			if (j == FIRST_BT_ID) {
2603				j = LAST_BT_ID;
2604				last_erased = LAST_BT_ID;
2605			} else {
2606				last_erased = (u8)j + 1;
2607				while ((j > FIRST_BT_ID) && (BTBLOCK_INVAL ==
2608					g_pBTBlocks[j - FIRST_BT_ID]))
2609					j--;
2610			}
2611		} else {
2612			j = FIRST_BT_ID;
2613			while (g_pBTBlocks[j - FIRST_BT_ID] == BTBLOCK_INVAL)
2614				j++;
2615			last_erased = (u8)j;
2616			while ((j < LAST_BT_ID) && (BTBLOCK_INVAL !=
2617				g_pBTBlocks[j - FIRST_BT_ID]))
2618				j++;
2619			if (g_pBTBlocks[j-FIRST_BT_ID] == BTBLOCK_INVAL)
2620				j--;
2621		}
2622
2623		if (last_erased > j)
2624			j += (1 + LAST_BT_ID - FIRST_BT_ID);
2625
2626		for (; (j >= last_erased) && (FAIL == wResult); j--) {
2627			i = (j - FIRST_BT_ID) %
2628				(1 + LAST_BT_ID - FIRST_BT_ID);
2629			search_result =
2630			FTL_Search_Block_Table_IN_Block(g_pBTBlocks[i],
2631						i + FIRST_BT_ID, &Page);
2632			if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
2633				block_table_found = 0;
2634
2635			while ((search_result == PASS) && (FAIL == wResult)) {
2636				nand_dbg_print(NAND_DBG_DEBUG,
2637					"FTL_Read_Block_Table:"
2638					"Block: %u Page: %u "
2639					"contains block table\n",
2640					(unsigned int)g_pBTBlocks[i],
2641					(unsigned int)Page);
2642
2643				tempBuf = tmp_buf2_read_blk_table;
2644
2645				for (k = 0; k < bt_pages; k++) {
2646					Block = g_pBTBlocks[i];
2647					PageCount = 1;
2648
2649					status  =
2650					GLOB_LLD_Read_Page_Main_Polling(
2651					tempBuf, Block, Page, PageCount);
2652
2653					tempvar = k ? 0 : 4;
2654
2655					wBytesCopied +=
2656					FTL_Copy_Block_Table_From_Flash(
2657					tempBuf + tempvar,
2658					DeviceInfo.wPageDataSize - tempvar,
2659					wBytesCopied);
2660
2661					Page++;
2662				}
2663
2664				wResult = FTL_Check_Block_Table(FAIL);
2665				if (FAIL == wResult) {
2666					block_table_found = 0;
2667					if (Page > bt_pages)
2668						Page -= ((bt_pages<<1) + 1);
2669					else
2670						search_result = FAIL;
2671				}
2672			}
2673		}
2674	}
2675
2676	if (PASS == wResult) {
2677		if (!block_table_found)
2678			FTL_Execute_SPL_Recovery();
2679
2680		if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
2681			g_wBlockTableOffset = (u16)Page + 1;
2682		else
2683			g_wBlockTableOffset = (u16)Page - bt_pages;
2684
2685		g_wBlockTableIndex = (u32)g_pBTBlocks[i];
2686
2687#if CMD_DMA
2688		if (DeviceInfo.MLCDevice)
2689			memcpy(g_pBTStartingCopy, g_pBlockTable,
2690				DeviceInfo.wDataBlockNum * sizeof(u32)
2691				+ DeviceInfo.wDataBlockNum * sizeof(u8)
2692				+ DeviceInfo.wDataBlockNum * sizeof(u16));
2693		else
2694			memcpy(g_pBTStartingCopy, g_pBlockTable,
2695				DeviceInfo.wDataBlockNum * sizeof(u32)
2696				+ DeviceInfo.wDataBlockNum * sizeof(u8));
2697#endif
2698	}
2699
2700	if (FAIL == wResult)
2701		printk(KERN_ERR "Yunpeng - "
2702		"Can not find valid spectra block table!\n");
2703
2704#if AUTO_FORMAT_FLASH
2705	if (FAIL == wResult) {
2706		nand_dbg_print(NAND_DBG_DEBUG, "doing auto-format\n");
2707		wResult = FTL_Format_Flash(0);
2708	}
2709#endif
2710
2711	return wResult;
2712}
2713
2714/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2715* Function:     FTL_Get_Page_Num
2716* Inputs:       Size in bytes
2717* Outputs:      Size in pages
2718* Description:  It calculates the pages required for the length passed
2719*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2720static u32 FTL_Get_Page_Num(u64 length)
2721{
2722	return (u32)((length >> DeviceInfo.nBitsInPageDataSize) +
2723		(GLOB_u64_Remainder(length , 1) > 0 ? 1 : 0));
2724}
2725
2726/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2727* Function:     FTL_Get_Physical_Block_Addr
2728* Inputs:       Block Address (byte format)
2729* Outputs:      Physical address of the block.
2730* Description:  It translates LBA to PBA by returning address stored
2731*               at the LBA location in the block table
2732*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2733static u64 FTL_Get_Physical_Block_Addr(u64 logical_addr)
2734{
2735	u32 *pbt;
2736	u64 physical_addr;
2737
2738	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2739		__FILE__, __LINE__, __func__);
2740
2741	pbt = (u32 *)g_pBlockTable;
2742	physical_addr = (u64) DeviceInfo.wBlockDataSize *
2743		(pbt[BLK_FROM_ADDR(logical_addr)] & (~BAD_BLOCK));
2744
2745	return physical_addr;
2746}
2747
2748/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2749* Function:     FTL_Get_Block_Index
2750* Inputs:       Physical Block no.
2751* Outputs:      Logical block no. /BAD_BLOCK
2752* Description:  It returns the logical block no. for the PBA passed
2753*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2754static u32 FTL_Get_Block_Index(u32 wBlockNum)
2755{
2756	u32 *pbt = (u32 *)g_pBlockTable;
2757	u32 i;
2758
2759	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2760		       __FILE__, __LINE__, __func__);
2761
2762	for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
2763		if (wBlockNum == (pbt[i] & (~BAD_BLOCK)))
2764			return i;
2765
2766	return BAD_BLOCK;
2767}
2768
2769/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2770* Function:     GLOB_FTL_Wear_Leveling
2771* Inputs:       none
2772* Outputs:      PASS=0
2773* Description:  This is static wear leveling (done by explicit call)
2774*               do complete static wear leveling
2775*               do complete garbage collection
2776*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2777int GLOB_FTL_Wear_Leveling(void)
2778{
2779	nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2780		__FILE__, __LINE__, __func__);
2781
2782	FTL_Static_Wear_Leveling();
2783	GLOB_FTL_Garbage_Collection();
2784
2785	return PASS;
2786}
2787
2788static void find_least_most_worn(u8 *chg,
2789	u32 *least_idx, u8 *least_cnt,
2790	u32 *most_idx, u8 *most_cnt)
2791{
2792	u32 *pbt = (u32 *)g_pBlockTable;
2793	u32 idx;
2794	u8 cnt;
2795	int i;
2796
2797	for (i = BLOCK_TABLE_INDEX + 1; i < DeviceInfo.wDataBlockNum; i++) {
2798		if (IS_BAD_BLOCK(i) || PASS == chg[i])
2799			continue;
2800
2801		idx = (u32) ((~BAD_BLOCK) & pbt[i]);
2802		cnt = g_pWearCounter[idx - DeviceInfo.wSpectraStartBlock];
2803
2804		if (IS_SPARE_BLOCK(i)) {
2805			if (cnt > *most_cnt) {
2806				*most_cnt = cnt;
2807				*most_idx = idx;
2808			}
2809		}
2810
2811		if (IS_DATA_BLOCK(i)) {
2812			if (cnt < *least_cnt) {
2813				*least_cnt = cnt;
2814				*least_idx = idx;
2815			}
2816		}
2817
2818		if (PASS == chg[*most_idx] || PASS == chg[*least_idx]) {
2819			debug_boundary_error(*most_idx,
2820				DeviceInfo.wDataBlockNum, 0);
2821			debug_boundary_error(*least_idx,
2822				DeviceInfo.wDataBlockNum, 0);
2823			continue;
2824		}
2825	}
2826}
2827
2828static int move_blks_for_wear_leveling(u8 *chg,
2829	u32 *least_idx, u32 *rep_blk_num, int *result)
2830{
2831	u32 *pbt = (u32 *)g_pBlockTable;
2832	u32 rep_blk;
2833	int j, ret_cp_blk, ret_erase;
2834	int ret = PASS;
2835
2836	chg[*least_idx] = PASS;
2837	debug_boundary_error(*least_idx, DeviceInfo.wDataBlockNum, 0);
2838
2839	rep_blk = FTL_Replace_MWBlock();
2840	if (rep_blk != BAD_BLOCK) {
2841		nand_dbg_print(NAND_DBG_DEBUG,
2842			"More than two spare blocks exist so do it\n");
2843		nand_dbg_print(NAND_DBG_DEBUG, "Block Replaced is %d\n",
2844				rep_blk);
2845
2846		chg[rep_blk] = PASS;
2847
2848		if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
2849			g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
2850			FTL_Write_IN_Progress_Block_Table_Page();
2851		}
2852
2853		for (j = 0; j < RETRY_TIMES; j++) {
2854			ret_cp_blk = FTL_Copy_Block((u64)(*least_idx) *
2855				DeviceInfo.wBlockDataSize,
2856				(u64)rep_blk * DeviceInfo.wBlockDataSize);
2857			if (FAIL == ret_cp_blk) {
2858				ret_erase = GLOB_FTL_Block_Erase((u64)rep_blk
2859					* DeviceInfo.wBlockDataSize);
2860				if (FAIL == ret_erase)
2861					MARK_BLOCK_AS_BAD(pbt[rep_blk]);
2862			} else {
2863				nand_dbg_print(NAND_DBG_DEBUG,
2864					"FTL_Copy_Block == OK\n");
2865				break;
2866			}
2867		}
2868
2869		if (j < RETRY_TIMES) {
2870			u32 tmp;
2871			u32 old_idx = FTL_Get_Block_Index(*least_idx);
2872			u32 rep_idx = FTL_Get_Block_Index(rep_blk);
2873			tmp = (u32)(DISCARD_BLOCK | pbt[old_idx]);
2874			pbt[old_idx] = (u32)((~SPARE_BLOCK) &
2875							pbt[rep_idx]);
2876			pbt[rep_idx] = tmp;
2877#if CMD_DMA
2878			p_BTableChangesDelta = (struct BTableChangesDelta *)
2879						g_pBTDelta_Free;
2880			g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2881			p_BTableChangesDelta->ftl_cmd_cnt =
2882						ftl_cmd_cnt;
2883			p_BTableChangesDelta->BT_Index = old_idx;
2884			p_BTableChangesDelta->BT_Entry_Value = pbt[old_idx];
2885			p_BTableChangesDelta->ValidFields = 0x0C;
2886
2887			p_BTableChangesDelta = (struct BTableChangesDelta *)
2888						g_pBTDelta_Free;
2889			g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2890
2891			p_BTableChangesDelta->ftl_cmd_cnt =
2892						ftl_cmd_cnt;
2893			p_BTableChangesDelta->BT_Index = rep_idx;
2894			p_BTableChangesDelta->BT_Entry_Value = pbt[rep_idx];
2895			p_BTableChangesDelta->ValidFields = 0x0C;
2896#endif
2897		} else {
2898			pbt[FTL_Get_Block_Index(rep_blk)] |= BAD_BLOCK;
2899#if CMD_DMA
2900			p_BTableChangesDelta = (struct BTableChangesDelta *)
2901						g_pBTDelta_Free;
2902			g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2903
2904			p_BTableChangesDelta->ftl_cmd_cnt =
2905						ftl_cmd_cnt;
2906			p_BTableChangesDelta->BT_Index =
2907					FTL_Get_Block_Index(rep_blk);
2908			p_BTableChangesDelta->BT_Entry_Value =
2909					pbt[FTL_Get_Block_Index(rep_blk)];
2910			p_BTableChangesDelta->ValidFields = 0x0C;
2911#endif
2912			*result = FAIL;
2913			ret = FAIL;
2914		}
2915
2916		if (((*rep_blk_num)++) > WEAR_LEVELING_BLOCK_NUM)
2917			ret = FAIL;
2918	} else {
2919		printk(KERN_ERR "Less than 3 spare blocks exist so quit\n");
2920		ret = FAIL;
2921	}
2922
2923	return ret;
2924}
2925
2926/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2927* Function:     FTL_Static_Wear_Leveling
2928* Inputs:       none
2929* Outputs:      PASS=0 / FAIL=1
2930* Description:  This is static wear leveling (done by explicit call)
2931*               search for most&least used
2932*               if difference < GATE:
2933*                   update the block table with exhange
2934*                   mark block table in flash as IN_PROGRESS
2935*                   copy flash block
2936*               the caller should handle GC clean up after calling this function
2937*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2938int FTL_Static_Wear_Leveling(void)
2939{
2940	u8 most_worn_cnt;
2941	u8 least_worn_cnt;
2942	u32 most_worn_idx;
2943	u32 least_worn_idx;
2944	int result = PASS;
2945	int go_on = PASS;
2946	u32 replaced_blks = 0;
2947	u8 *chang_flag = flags_static_wear_leveling;
2948
2949	nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2950		       __FILE__, __LINE__, __func__);
2951
2952	if (!chang_flag)
2953		return FAIL;
2954
2955	memset(chang_flag, FAIL, DeviceInfo.wDataBlockNum);
2956	while (go_on == PASS) {
2957		nand_dbg_print(NAND_DBG_DEBUG,
2958			"starting static wear leveling\n");
2959		most_worn_cnt = 0;
2960		least_worn_cnt = 0xFF;
2961		least_worn_idx = BLOCK_TABLE_INDEX;
2962		most_worn_idx = BLOCK_TABLE_INDEX;
2963
2964		find_least_most_worn(chang_flag, &least_worn_idx,
2965			&least_worn_cnt, &most_worn_idx, &most_worn_cnt);
2966
2967		nand_dbg_print(NAND_DBG_DEBUG,
2968			"Used and least worn is block %u, whos count is %u\n",
2969			(unsigned int)least_worn_idx,
2970			(unsigned int)least_worn_cnt);
2971
2972		nand_dbg_print(NAND_DBG_DEBUG,
2973			"Free and  most worn is block %u, whos count is %u\n",
2974			(unsigned int)most_worn_idx,
2975			(unsigned int)most_worn_cnt);
2976
2977		if ((most_worn_cnt > least_worn_cnt) &&
2978			(most_worn_cnt - least_worn_cnt > WEAR_LEVELING_GATE))
2979			go_on = move_blks_for_wear_leveling(chang_flag,
2980				&least_worn_idx, &replaced_blks, &result);
2981		else
2982			go_on = FAIL;
2983	}
2984
2985	return result;
2986}
2987
2988#if CMD_DMA
2989static int do_garbage_collection(u32 discard_cnt)
2990{
2991	u32 *pbt = (u32 *)g_pBlockTable;
2992	u32 pba;
2993	u8 bt_block_erased = 0;
2994	int i, cnt, ret = FAIL;
2995	u64 addr;
2996
2997	i = 0;
2998	while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0) &&
2999			((ftl_cmd_cnt + 28) < 256)) {
3000		if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
3001				(pbt[i] & DISCARD_BLOCK)) {
3002			if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
3003				g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3004				FTL_Write_IN_Progress_Block_Table_Page();
3005			}
3006
3007			addr = FTL_Get_Physical_Block_Addr((u64)i *
3008						DeviceInfo.wBlockDataSize);
3009			pba = BLK_FROM_ADDR(addr);
3010
3011			for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
3012				if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
3013					nand_dbg_print(NAND_DBG_DEBUG,
3014						"GC will erase BT block %u\n",
3015						(unsigned int)pba);
3016					discard_cnt--;
3017					i++;
3018					bt_block_erased = 1;
3019					break;
3020				}
3021			}
3022
3023			if (bt_block_erased) {
3024				bt_block_erased = 0;
3025				continue;
3026			}
3027
3028			addr = FTL_Get_Physical_Block_Addr((u64)i *
3029						DeviceInfo.wBlockDataSize);
3030
3031			if (PASS == GLOB_FTL_Block_Erase(addr)) {
3032				pbt[i] &= (u32)(~DISCARD_BLOCK);
3033				pbt[i] |= (u32)(SPARE_BLOCK);
3034				p_BTableChangesDelta =
3035					(struct BTableChangesDelta *)
3036					g_pBTDelta_Free;
3037				g_pBTDelta_Free +=
3038					sizeof(struct BTableChangesDelta);
3039				p_BTableChangesDelta->ftl_cmd_cnt =
3040					ftl_cmd_cnt - 1;
3041				p_BTableChangesDelta->BT_Index = i;
3042				p_BTableChangesDelta->BT_Entry_Value = pbt[i];
3043				p_BTableChangesDelta->ValidFields = 0x0C;
3044				discard_cnt--;
3045				ret = PASS;
3046			} else {
3047				MARK_BLOCK_AS_BAD(pbt[i]);
3048			}
3049		}
3050
3051		i++;
3052	}
3053
3054	return ret;
3055}
3056
3057#else
3058static int do_garbage_collection(u32 discard_cnt)
3059{
3060	u32 *pbt = (u32 *)g_pBlockTable;
3061	u32 pba;
3062	u8 bt_block_erased = 0;
3063	int i, cnt, ret = FAIL;
3064	u64 addr;
3065
3066	i = 0;
3067	while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0)) {
3068		if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
3069				(pbt[i] & DISCARD_BLOCK)) {
3070			if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
3071				g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3072				FTL_Write_IN_Progress_Block_Table_Page();
3073			}
3074
3075			addr = FTL_Get_Physical_Block_Addr((u64)i *
3076						DeviceInfo.wBlockDataSize);
3077			pba = BLK_FROM_ADDR(addr);
3078
3079			for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
3080				if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
3081					nand_dbg_print(NAND_DBG_DEBUG,
3082						"GC will erase BT block %d\n",
3083						pba);
3084					discard_cnt--;
3085					i++;
3086					bt_block_erased = 1;
3087					break;
3088				}
3089			}
3090
3091			if (bt_block_erased) {
3092				bt_block_erased = 0;
3093				continue;
3094			}
3095
3096			/* If the discard block is L2 cache block, then just skip it */
3097			for (cnt = 0; cnt < BLK_NUM_FOR_L2_CACHE; cnt++) {
3098				if (cache_l2.blk_array[cnt] == pba) {
3099					nand_dbg_print(NAND_DBG_DEBUG,
3100						"GC will erase L2 cache blk %d\n",
3101						pba);
3102					break;
3103				}
3104			}
3105			if (cnt < BLK_NUM_FOR_L2_CACHE) { /* Skip it */
3106				discard_cnt--;
3107				i++;
3108				continue;
3109			}
3110
3111			addr = FTL_Get_Physical_Block_Addr((u64)i *
3112						DeviceInfo.wBlockDataSize);
3113
3114			if (PASS == GLOB_FTL_Block_Erase(addr)) {
3115				pbt[i] &= (u32)(~DISCARD_BLOCK);
3116				pbt[i] |= (u32)(SPARE_BLOCK);
3117				discard_cnt--;
3118				ret = PASS;
3119			} else {
3120				MARK_BLOCK_AS_BAD(pbt[i]);
3121			}
3122		}
3123
3124		i++;
3125	}
3126
3127	return ret;
3128}
3129#endif
3130
3131/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3132* Function:     GLOB_FTL_Garbage_Collection
3133* Inputs:       none
3134* Outputs:      PASS / FAIL (returns the number of un-erased blocks
3135* Description:  search the block table for all discarded blocks to erase
3136*               for each discarded block:
3137*                   set the flash block to IN_PROGRESS
3138*                   erase the block
3139*                   update the block table
3140*                   write the block table to flash
3141*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3142int GLOB_FTL_Garbage_Collection(void)
3143{
3144	u32 i;
3145	u32 wDiscard = 0;
3146	int wResult = FAIL;
3147	u32 *pbt = (u32 *)g_pBlockTable;
3148
3149	nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
3150			       __FILE__, __LINE__, __func__);
3151
3152	if (GC_Called) {
3153		printk(KERN_ALERT "GLOB_FTL_Garbage_Collection() "
3154			"has been re-entered! Exit.\n");
3155		return PASS;
3156	}
3157
3158	GC_Called = 1;
3159
3160	GLOB_FTL_BT_Garbage_Collection();
3161
3162	for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
3163		if (IS_DISCARDED_BLOCK(i))
3164			wDiscard++;
3165	}
3166
3167	if (wDiscard <= 0) {
3168		GC_Called = 0;
3169		return wResult;
3170	}
3171
3172	nand_dbg_print(NAND_DBG_DEBUG,
3173		"Found %d discarded blocks\n", wDiscard);
3174
3175	FTL_Write_Block_Table(FAIL);
3176
3177	wResult = do_garbage_collection(wDiscard);
3178
3179	FTL_Write_Block_Table(FAIL);
3180
3181	GC_Called = 0;
3182
3183	return wResult;
3184}
3185
3186
3187#if CMD_DMA
3188static int do_bt_garbage_collection(void)
3189{
3190	u32 pba, lba;
3191	u32 *pbt = (u32 *)g_pBlockTable;
3192	u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
3193	u64 addr;
3194	int i, ret = FAIL;
3195
3196	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3197			       __FILE__, __LINE__, __func__);
3198
3199	if (BT_GC_Called)
3200		return PASS;
3201
3202	BT_GC_Called = 1;
3203
3204	for (i = last_erased; (i <= LAST_BT_ID) &&
3205		(g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
3206		FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) &&
3207		((ftl_cmd_cnt + 28)) < 256; i++) {
3208		pba = pBTBlocksNode[i - FIRST_BT_ID];
3209		lba = FTL_Get_Block_Index(pba);
3210		nand_dbg_print(NAND_DBG_DEBUG,
3211			"do_bt_garbage_collection: pba %d, lba %d\n",
3212			pba, lba);
3213		nand_dbg_print(NAND_DBG_DEBUG,
3214			"Block Table Entry: %d", pbt[lba]);
3215
3216		if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
3217			(pbt[lba] & DISCARD_BLOCK)) {
3218			nand_dbg_print(NAND_DBG_DEBUG,
3219				"do_bt_garbage_collection_cdma: "
3220				"Erasing Block tables present in block %d\n",
3221				pba);
3222			addr = FTL_Get_Physical_Block_Addr((u64)lba *
3223						DeviceInfo.wBlockDataSize);
3224			if (PASS == GLOB_FTL_Block_Erase(addr)) {
3225				pbt[lba] &= (u32)(~DISCARD_BLOCK);
3226				pbt[lba] |= (u32)(SPARE_BLOCK);
3227
3228				p_BTableChangesDelta =
3229					(struct BTableChangesDelta *)
3230					g_pBTDelta_Free;
3231				g_pBTDelta_Free +=
3232					sizeof(struct BTableChangesDelta);
3233
3234				p_BTableChangesDelta->ftl_cmd_cnt =
3235					ftl_cmd_cnt - 1;
3236				p_BTableChangesDelta->BT_Index = lba;
3237				p_BTableChangesDelta->BT_Entry_Value =
3238								pbt[lba];
3239
3240				p_BTableChangesDelta->ValidFields = 0x0C;
3241
3242				ret = PASS;
3243				pBTBlocksNode[last_erased - FIRST_BT_ID] =
3244							BTBLOCK_INVAL;
3245				nand_dbg_print(NAND_DBG_DEBUG,
3246					"resetting bt entry at index %d "
3247					"value %d\n", i,
3248					pBTBlocksNode[i - FIRST_BT_ID]);
3249				if (last_erased == LAST_BT_ID)
3250					last_erased = FIRST_BT_ID;
3251				else
3252					last_erased++;
3253			} else {
3254				MARK_BLOCK_AS_BAD(pbt[lba]);
3255			}
3256		}
3257	}
3258
3259	BT_GC_Called = 0;
3260
3261	return ret;
3262}
3263
3264#else
3265static int do_bt_garbage_collection(void)
3266{
3267	u32 pba, lba;
3268	u32 *pbt = (u32 *)g_pBlockTable;
3269	u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
3270	u64 addr;
3271	int i, ret = FAIL;
3272
3273	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3274			       __FILE__, __LINE__, __func__);
3275
3276	if (BT_GC_Called)
3277		return PASS;
3278
3279	BT_GC_Called = 1;
3280
3281	for (i = last_erased; (i <= LAST_BT_ID) &&
3282		(g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
3283		FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL); i++) {
3284		pba = pBTBlocksNode[i - FIRST_BT_ID];
3285		lba = FTL_Get_Block_Index(pba);
3286		nand_dbg_print(NAND_DBG_DEBUG,
3287			"do_bt_garbage_collection_cdma: pba %d, lba %d\n",
3288			pba, lba);
3289		nand_dbg_print(NAND_DBG_DEBUG,
3290			"Block Table Entry: %d", pbt[lba]);
3291
3292		if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
3293			(pbt[lba] & DISCARD_BLOCK)) {
3294			nand_dbg_print(NAND_DBG_DEBUG,
3295				"do_bt_garbage_collection: "
3296				"Erasing Block tables present in block %d\n",
3297				pba);
3298			addr = FTL_Get_Physical_Block_Addr((u64)lba *
3299						DeviceInfo.wBlockDataSize);
3300			if (PASS == GLOB_FTL_Block_Erase(addr)) {
3301				pbt[lba] &= (u32)(~DISCARD_BLOCK);
3302				pbt[lba] |= (u32)(SPARE_BLOCK);
3303				ret = PASS;
3304				pBTBlocksNode[last_erased - FIRST_BT_ID] =
3305							BTBLOCK_INVAL;
3306				nand_dbg_print(NAND_DBG_DEBUG,
3307					"resetting bt entry at index %d "
3308					"value %d\n", i,
3309					pBTBlocksNode[i - FIRST_BT_ID]);
3310				if (last_erased == LAST_BT_ID)
3311					last_erased = FIRST_BT_ID;
3312				else
3313					last_erased++;
3314			} else {
3315				MARK_BLOCK_AS_BAD(pbt[lba]);
3316			}
3317		}
3318	}
3319
3320	BT_GC_Called = 0;
3321
3322	return ret;
3323}
3324
3325#endif
3326
3327/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3328* Function:     GLOB_FTL_BT_Garbage_Collection
3329* Inputs:       none
3330* Outputs:      PASS / FAIL (returns the number of un-erased blocks
3331* Description:  Erases discarded blocks containing Block table
3332*
3333*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3334int GLOB_FTL_BT_Garbage_Collection(void)
3335{
3336	return do_bt_garbage_collection();
3337}
3338
3339/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3340* Function:     FTL_Replace_OneBlock
3341* Inputs:       Block number 1
3342*               Block number 2
3343* Outputs:      Replaced Block Number
3344* Description:  Interchange block table entries at wBlockNum and wReplaceNum
3345*
3346*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3347static u32 FTL_Replace_OneBlock(u32 blk, u32 rep_blk)
3348{
3349	u32 tmp_blk;
3350	u32 replace_node = BAD_BLOCK;
3351	u32 *pbt = (u32 *)g_pBlockTable;
3352
3353	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3354		__FILE__, __LINE__, __func__);
3355
3356	if (rep_blk != BAD_BLOCK) {
3357		if (IS_BAD_BLOCK(blk))
3358			tmp_blk = pbt[blk];
3359		else
3360			tmp_blk = DISCARD_BLOCK | (~SPARE_BLOCK & pbt[blk]);
3361
3362		replace_node = (u32) ((~SPARE_BLOCK) & pbt[rep_blk]);
3363		pbt[blk] = replace_node;
3364		pbt[rep_blk] = tmp_blk;
3365
3366#if CMD_DMA
3367		p_BTableChangesDelta =
3368			(struct BTableChangesDelta *)g_pBTDelta_Free;
3369		g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3370
3371		p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
3372		p_BTableChangesDelta->BT_Index = blk;
3373		p_BTableChangesDelta->BT_Entry_Value = pbt[blk];
3374
3375		p_BTableChangesDelta->ValidFields = 0x0C;
3376
3377		p_BTableChangesDelta =
3378			(struct BTableChangesDelta *)g_pBTDelta_Free;
3379		g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3380
3381		p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
3382		p_BTableChangesDelta->BT_Index = rep_blk;
3383		p_BTableChangesDelta->BT_Entry_Value = pbt[rep_blk];
3384		p_BTableChangesDelta->ValidFields = 0x0C;
3385#endif
3386	}
3387
3388	return replace_node;
3389}
3390
3391/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3392* Function:     FTL_Write_Block_Table_Data
3393* Inputs:       Block table size in pages
3394* Outputs:      PASS=0 / FAIL=1
3395* Description:  Write block table data in flash
3396*               If first page and last page
3397*                  Write data+BT flag
3398*               else
3399*                  Write data
3400*               BT flag is a counter. Its value is incremented for block table
3401*               write in a new Block
3402*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3403static int FTL_Write_Block_Table_Data(void)
3404{
3405	u64 dwBlockTableAddr, pTempAddr;
3406	u32 Block;
3407	u16 Page, PageCount;
3408	u8 *tempBuf = tmp_buf_write_blk_table_data;
3409	int wBytesCopied;
3410	u16 bt_pages;
3411
3412	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3413			       __FILE__, __LINE__, __func__);
3414
3415	dwBlockTableAddr =
3416		(u64)((u64)g_wBlockTableIndex * DeviceInfo.wBlockDataSize +
3417		(u64)g_wBlockTableOffset * DeviceInfo.wPageDataSize);
3418	pTempAddr = dwBlockTableAddr;
3419
3420	bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
3421
3422	nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: "
3423			       "page= %d BlockTableIndex= %d "
3424			       "BlockTableOffset=%d\n", bt_pages,
3425			       g_wBlockTableIndex, g_wBlockTableOffset);
3426
3427	Block = BLK_FROM_ADDR(pTempAddr);
3428	Page = PAGE_FROM_ADDR(pTempAddr, Block);
3429	PageCount = 1;
3430
3431	if (bt_block_changed) {
3432		if (bt_flag == LAST_BT_ID) {
3433			bt_flag = FIRST_BT_ID;
3434			g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
3435		} else if (bt_flag < LAST_BT_ID) {
3436			bt_flag++;
3437			g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
3438		}
3439
3440		if ((bt_flag > (LAST_BT_ID-4)) &&
3441			g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] !=
3442						BTBLOCK_INVAL) {
3443			bt_block_changed = 0;
3444			GLOB_FTL_BT_Garbage_Collection();
3445		}
3446
3447		bt_block_changed = 0;
3448		nand_dbg_print(NAND_DBG_DEBUG,
3449			"Block Table Counter is %u Block %u\n",
3450			bt_flag, (unsigned int)Block);
3451	}
3452
3453	memset(tempBuf, 0, 3);
3454	tempBuf[3] = bt_flag;
3455	wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf + 4,
3456			DeviceInfo.wPageDataSize - 4, 0);
3457	memset(&tempBuf[wBytesCopied + 4], 0xff,
3458		DeviceInfo.wPageSize - (wBytesCopied + 4));
3459	FTL_Insert_Block_Table_Signature(&tempBuf[DeviceInfo.wPageDataSize],
3460					bt_flag);
3461
3462#if CMD_DMA
3463	memcpy(g_pNextBlockTable, tempBuf,
3464		DeviceInfo.wPageSize * sizeof(u8));
3465	nand_dbg_print(NAND_DBG_DEBUG, "Writing First Page of Block Table "
3466		"Block %u Page %u\n", (unsigned int)Block, Page);
3467	if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(g_pNextBlockTable,
3468		Block, Page, 1,
3469		LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
3470		nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
3471			"%s, Line %d, Function: %s, "
3472			"new Bad Block %d generated!\n",
3473			__FILE__, __LINE__, __func__, Block);
3474		goto func_return;
3475	}
3476
3477	ftl_cmd_cnt++;
3478	g_pNextBlockTable += ((DeviceInfo.wPageSize * sizeof(u8)));
3479#else
3480	if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf, Block, Page, 1)) {
3481		nand_dbg_print(NAND_DBG_WARN,
3482			"NAND Program fail in %s, Line %d, Function: %s, "
3483			"new Bad Block %d generated!\n",
3484			__FILE__, __LINE__, __func__, Block);
3485		goto func_return;
3486	}
3487#endif
3488
3489	if (bt_pages > 1) {
3490		PageCount = bt_pages - 1;
3491		if (PageCount > 1) {
3492			wBytesCopied += FTL_Copy_Block_Table_To_Flash(tempBuf,
3493				DeviceInfo.wPageDataSize * (PageCount - 1),
3494				wBytesCopied);
3495
3496#if CMD_DMA
3497			memcpy(g_pNextBlockTable, tempBuf,
3498				(PageCount - 1) * DeviceInfo.wPageDataSize);
3499			if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
3500				g_pNextBlockTable, Block, Page + 1,
3501				PageCount - 1)) {
3502				nand_dbg_print(NAND_DBG_WARN,
3503					"NAND Program fail in %s, Line %d, "
3504					"Function: %s, "
3505					"new Bad Block %d generated!\n",
3506					__FILE__, __LINE__, __func__,
3507					(int)Block);
3508				goto func_return;
3509			}
3510
3511			ftl_cmd_cnt++;
3512			g_pNextBlockTable += (PageCount - 1) *
3513				DeviceInfo.wPageDataSize * sizeof(u8);
3514#else
3515			if (FAIL == GLOB_LLD_Write_Page_Main(tempBuf,
3516					Block, Page + 1, PageCount - 1)) {
3517				nand_dbg_print(NAND_DBG_WARN,
3518					"NAND Program fail in %s, Line %d, "
3519					"Function: %s, "
3520					"new Bad Block %d generated!\n",
3521					__FILE__, __LINE__, __func__,
3522					(int)Block);
3523				goto func_return;
3524			}
3525#endif
3526		}
3527
3528		wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf,
3529				DeviceInfo.wPageDataSize, wBytesCopied);
3530		memset(&tempBuf[wBytesCopied], 0xff,
3531			DeviceInfo.wPageSize-wBytesCopied);
3532		FTL_Insert_Block_Table_Signature(
3533			&tempBuf[DeviceInfo.wPageDataSize], bt_flag);
3534#if CMD_DMA
3535		memcpy(g_pNextBlockTable, tempBuf,
3536				DeviceInfo.wPageSize * sizeof(u8));
3537		nand_dbg_print(NAND_DBG_DEBUG,
3538			"Writing the last Page of Block Table "
3539			"Block %u Page %u\n",
3540			(unsigned int)Block, Page + bt_pages - 1);
3541		if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(
3542			g_pNextBlockTable, Block, Page + bt_pages - 1, 1,
3543			LLD_CMD_FLAG_MODE_CDMA |
3544			LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
3545			nand_dbg_print(NAND_DBG_WARN,
3546				"NAND Program fail in %s, Line %d, "
3547				"Function: %s, new Bad Block %d generated!\n",
3548				__FILE__, __LINE__, __func__, Block);
3549			goto func_return;
3550		}
3551		ftl_cmd_cnt++;
3552#else
3553		if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf,
3554					Block, Page+bt_pages - 1, 1)) {
3555			nand_dbg_print(NAND_DBG_WARN,
3556				"NAND Program fail in %s, Line %d, "
3557				"Function: %s, "
3558				"new Bad Block %d generated!\n",
3559				__FILE__, __LINE__, __func__, Block);
3560			goto func_return;
3561		}
3562#endif
3563	}
3564
3565	nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: done\n");
3566
3567func_return:
3568	return PASS;
3569}
3570
3571/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3572* Function:     FTL_Replace_Block_Table
3573* Inputs:       None
3574* Outputs:      PASS=0 / FAIL=1
3575* Description:  Get a new block to write block table
3576*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3577static u32 FTL_Replace_Block_Table(void)
3578{
3579	u32 blk;
3580	int gc;
3581
3582	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3583		__FILE__, __LINE__, __func__);
3584
3585	blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
3586
3587	if ((BAD_BLOCK == blk) && (PASS == gc)) {
3588		GLOB_FTL_Garbage_Collection();
3589		blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
3590	}
3591	if (BAD_BLOCK == blk)
3592		printk(KERN_ERR "%s, %s: There is no spare block. "
3593			"It should never happen\n",
3594			__FILE__, __func__);
3595
3596	nand_dbg_print(NAND_DBG_DEBUG, "New Block table Block is %d\n", blk);
3597
3598	return blk;
3599}
3600
3601/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3602* Function:     FTL_Replace_LWBlock
3603* Inputs:       Block number
3604*               Pointer to Garbage Collect flag
3605* Outputs:
3606* Description:  Determine the least weared block by traversing
3607*               block table
3608*               Set Garbage collection to be called if number of spare
3609*               block is less than Free Block Gate count
3610*               Change Block table entry to map least worn block for current
3611*               operation
3612*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3613static u32 FTL_Replace_LWBlock(u32 wBlockNum, int *pGarbageCollect)
3614{
3615	u32 i;
3616	u32 *pbt = (u32 *)g_pBlockTable;
3617	u8 wLeastWornCounter = 0xFF;
3618	u32 wLeastWornIndex = BAD_BLOCK;
3619	u32 wSpareBlockNum = 0;
3620	u32 wDiscardBlockNum = 0;
3621
3622	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3623		__FILE__, __LINE__, __func__);
3624
3625	if (IS_SPARE_BLOCK(wBlockNum)) {
3626		*pGarbageCollect = FAIL;
3627		pbt[wBlockNum] = (u32)(pbt[wBlockNum] & (~SPARE_BLOCK));
3628#if CMD_DMA
3629		p_BTableChangesDelta =
3630			(struct BTableChangesDelta *)g_pBTDelta_Free;
3631		g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3632		p_BTableChangesDelta->ftl_cmd_cnt =
3633						ftl_cmd_cnt;
3634		p_BTableChangesDelta->BT_Index = (u32)(wBlockNum);
3635		p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
3636		p_BTableChangesDelta->ValidFields = 0x0C;
3637#endif
3638		return pbt[wBlockNum];
3639	}
3640
3641	for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
3642		if (IS_DISCARDED_BLOCK(i))
3643			wDiscardBlockNum++;
3644
3645		if (IS_SPARE_BLOCK(i)) {
3646			u32 wPhysicalIndex = (u32)((~BAD_BLOCK) & pbt[i]);
3647			if (wPhysicalIndex > DeviceInfo.wSpectraEndBlock)
3648				printk(KERN_ERR "FTL_Replace_LWBlock: "
3649					"This should never occur!\n");
3650			if (g_pWearCounter[wPhysicalIndex -
3651				DeviceInfo.wSpectraStartBlock] <
3652				wLeastWornCounter) {
3653				wLeastWornCounter =
3654					g_pWearCounter[wPhysicalIndex -
3655					DeviceInfo.wSpectraStartBlock];
3656				wLeastWornIndex = i;
3657			}
3658			wSpareBlockNum++;
3659		}
3660	}
3661
3662	nand_dbg_print(NAND_DBG_WARN,
3663		"FTL_Replace_LWBlock: Least Worn Counter %d\n",
3664		(int)wLeastWornCounter);
3665
3666	if ((wDiscardBlockNum >= NUM_FREE_BLOCKS_GATE) ||
3667		(wSpareBlockNum <= NUM_FREE_BLOCKS_GATE))
3668		*pGarbageCollect = PASS;
3669	else
3670		*pGarbageCollect = FAIL;
3671
3672	nand_dbg_print(NAND_DBG_DEBUG,
3673		"FTL_Replace_LWBlock: Discarded Blocks %u Spare"
3674		" Blocks %u\n",
3675		(unsigned int)wDiscardBlockNum,
3676		(unsigned int)wSpareBlockNum);
3677
3678	return FTL_Replace_OneBlock(wBlockNum, wLeastWornIndex);
3679}
3680
3681/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3682* Function:     FTL_Replace_MWBlock
3683* Inputs:       None
3684* Outputs:      most worn spare block no./BAD_BLOCK
3685* Description:  It finds most worn spare block.
3686*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3687static u32 FTL_Replace_MWBlock(void)
3688{
3689	u32 i;
3690	u32 *pbt = (u32 *)g_pBlockTable;
3691	u8 wMostWornCounter = 0;
3692	u32 wMostWornIndex = BAD_BLOCK;
3693	u32 wSpareBlockNum = 0;
3694
3695	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3696		       __FILE__, __LINE__, __func__);
3697
3698	for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
3699		if (IS_SPARE_BLOCK(i)) {
3700			u32 wPhysicalIndex = (u32)((~SPARE_BLOCK) & pbt[i]);
3701			if (g_pWearCounter[wPhysicalIndex -
3702			    DeviceInfo.wSpectraStartBlock] >
3703			    wMostWornCounter) {
3704				wMostWornCounter =
3705				    g_pWearCounter[wPhysicalIndex -
3706				    DeviceInfo.wSpectraStartBlock];
3707				wMostWornIndex = wPhysicalIndex;
3708			}
3709			wSpareBlockNum++;
3710		}
3711	}
3712
3713	if (wSpareBlockNum <= 2)
3714		return BAD_BLOCK;
3715
3716	return wMostWornIndex;
3717}
3718
3719/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3720* Function:     FTL_Replace_Block
3721* Inputs:       Block Address
3722* Outputs:      PASS=0 / FAIL=1
3723* Description:  If block specified by blk_addr parameter is not free,
3724*               replace it with the least worn block.
3725*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3726static int FTL_Replace_Block(u64 blk_addr)
3727{
3728	u32 current_blk = BLK_FROM_ADDR(blk_addr);
3729	u32 *pbt = (u32 *)g_pBlockTable;
3730	int wResult = PASS;
3731	int GarbageCollect = FAIL;
3732
3733	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3734		__FILE__, __LINE__, __func__);
3735
3736	if (IS_SPARE_BLOCK(current_blk)) {
3737		pbt[current_blk] = (~SPARE_BLOCK) & pbt[current_blk];
3738#if CMD_DMA
3739		p_BTableChangesDelta =
3740			(struct BTableChangesDelta *)g_pBTDelta_Free;
3741		g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3742		p_BTableChangesDelta->ftl_cmd_cnt =
3743			ftl_cmd_cnt;
3744		p_BTableChangesDelta->BT_Index = current_blk;
3745		p_BTableChangesDelta->BT_Entry_Value = pbt[current_blk];
3746		p_BTableChangesDelta->ValidFields = 0x0C ;
3747#endif
3748		return wResult;
3749	}
3750
3751	FTL_Replace_LWBlock(current_blk, &GarbageCollect);
3752
3753	if (PASS == GarbageCollect)
3754		wResult = GLOB_FTL_Garbage_Collection();
3755
3756	return wResult;
3757}
3758
3759/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3760* Function:     GLOB_FTL_Is_BadBlock
3761* Inputs:       block number to test
3762* Outputs:      PASS (block is BAD) / FAIL (block is not bad)
3763* Description:  test if this block number is flagged as bad
3764*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3765int GLOB_FTL_Is_BadBlock(u32 wBlockNum)
3766{
3767	u32 *pbt = (u32 *)g_pBlockTable;
3768
3769	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3770		__FILE__, __LINE__, __func__);
3771
3772	if (wBlockNum >= DeviceInfo.wSpectraStartBlock
3773		&& BAD_BLOCK == (pbt[wBlockNum] & BAD_BLOCK))
3774		return PASS;
3775	else
3776		return FAIL;
3777}
3778
3779/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3780* Function:     GLOB_FTL_Flush_Cache
3781* Inputs:       none
3782* Outputs:      PASS=0 / FAIL=1
3783* Description:  flush all the cache blocks to flash
3784*               if a cache block is not dirty, don't do anything with it
3785*               else, write the block and update the block table
3786* Note:         This function should be called at shutdown/power down.
3787*               to write important data into device
3788*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3789int GLOB_FTL_Flush_Cache(void)
3790{
3791	int i, ret;
3792
3793	nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
3794		       __FILE__, __LINE__, __func__);
3795
3796	for (i = 0; i < CACHE_ITEM_NUM; i++) {
3797		if (SET == Cache.array[i].changed) {
3798#if CMD_DMA
3799#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
3800			int_cache[ftl_cmd_cnt].item = i;
3801			int_cache[ftl_cmd_cnt].cache.address =
3802					Cache.array[i].address;
3803			int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
3804#endif
3805#endif
3806			ret = write_back_to_l2_cache(Cache.array[i].buf, Cache.array[i].address);
3807			if (PASS == ret) {
3808				Cache.array[i].changed = CLEAR;
3809			} else {
3810				printk(KERN_ALERT "Failed when write back to L2 cache!\n");
3811				/* TODO - How to handle this? */
3812			}
3813		}
3814	}
3815
3816	flush_l2_cache();
3817
3818	return FTL_Write_Block_Table(FAIL);
3819}
3820
3821/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3822* Function:     GLOB_FTL_Page_Read
3823* Inputs:       pointer to data
3824*                   logical address of data (u64 is LBA * Bytes/Page)
3825* Outputs:      PASS=0 / FAIL=1
3826* Description:  reads a page of data into RAM from the cache
3827*               if the data is not already in cache, read from flash to cache
3828*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3829int GLOB_FTL_Page_Read(u8 *data, u64 logical_addr)
3830{
3831	u16 cache_item;
3832	int res = PASS;
3833
3834	nand_dbg_print(NAND_DBG_DEBUG, "GLOB_FTL_Page_Read - "
3835		"page_addr: %llu\n", logical_addr);
3836
3837	cache_item = FTL_Cache_If_Hit(logical_addr);
3838
3839	if (UNHIT_CACHE_ITEM == cache_item) {
3840		nand_dbg_print(NAND_DBG_DEBUG,
3841			       "GLOB_FTL_Page_Read: Cache not hit\n");
3842		res = FTL_Cache_Write();
3843		if (ERR == FTL_Cache_Read(logical_addr))
3844			res = ERR;
3845		cache_item = Cache.LRU;
3846	}
3847
3848	FTL_Cache_Read_Page(data, logical_addr, cache_item);
3849
3850	return res;
3851}
3852
3853/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3854* Function:     GLOB_FTL_Page_Write
3855* Inputs:       pointer to data
3856*               address of data (ADDRESSTYPE is LBA * Bytes/Page)
3857* Outputs:      PASS=0 / FAIL=1
3858* Description:  writes a page of data from RAM to the cache
3859*               if the data is not already in cache, write back the
3860*               least recently used block and read the addressed block
3861*               from flash to cache
3862*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3863int GLOB_FTL_Page_Write(u8 *pData, u64 dwPageAddr)
3864{
3865	u16 cache_blk;
3866	u32 *pbt = (u32 *)g_pBlockTable;
3867	int wResult = PASS;
3868
3869	nand_dbg_print(NAND_DBG_TRACE, "GLOB_FTL_Page_Write - "
3870		"dwPageAddr: %llu\n", dwPageAddr);
3871
3872	cache_blk = FTL_Cache_If_Hit(dwPageAddr);
3873
3874	if (UNHIT_CACHE_ITEM == cache_blk) {
3875		wResult = FTL_Cache_Write();
3876		if (IS_BAD_BLOCK(BLK_FROM_ADDR(dwPageAddr))) {
3877			wResult = FTL_Replace_Block(dwPageAddr);
3878			pbt[BLK_FROM_ADDR(dwPageAddr)] |= SPARE_BLOCK;
3879			if (wResult == FAIL)
3880				return FAIL;
3881		}
3882		if (ERR == FTL_Cache_Read(dwPageAddr))
3883			wResult = ERR;
3884		cache_blk = Cache.LRU;
3885		FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
3886	} else {
3887#if CMD_DMA
3888		FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk,
3889				LLD_CMD_FLAG_ORDER_BEFORE_REST);
3890#else
3891		FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
3892#endif
3893	}
3894
3895	return wResult;
3896}
3897
3898/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3899* Function:     GLOB_FTL_Block_Erase
3900* Inputs:       address of block to erase (now in byte format, should change to
3901* block format)
3902* Outputs:      PASS=0 / FAIL=1
3903* Description:  erases the specified block
3904*               increments the erase count
3905*               If erase count reaches its upper limit,call function to
3906*               do the adjustment as per the relative erase count values
3907*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3908int GLOB_FTL_Block_Erase(u64 blk_addr)
3909{
3910	int status;
3911	u32 BlkIdx;
3912
3913	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3914			       __FILE__, __LINE__, __func__);
3915
3916	BlkIdx = (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize);
3917
3918	if (BlkIdx < DeviceInfo.wSpectraStartBlock) {
3919		printk(KERN_ERR "GLOB_FTL_Block_Erase: "
3920			"This should never occur\n");
3921		return FAIL;
3922	}
3923
3924#if CMD_DMA
3925	status = GLOB_LLD_Erase_Block_cdma(BlkIdx, LLD_CMD_FLAG_MODE_CDMA);
3926	if (status == FAIL)
3927		nand_dbg_print(NAND_DBG_WARN,
3928			       "NAND Program fail in %s, Line %d, "
3929			       "Function: %s, new Bad Block %d generated!\n",
3930			       __FILE__, __LINE__, __func__, BlkIdx);
3931#else
3932	status = GLOB_LLD_Erase_Block(BlkIdx);
3933	if (status == FAIL) {
3934		nand_dbg_print(NAND_DBG_WARN,
3935			       "NAND Program fail in %s, Line %d, "
3936			       "Function: %s, new Bad Block %d generated!\n",
3937			       __FILE__, __LINE__, __func__, BlkIdx);
3938		return status;
3939	}
3940#endif
3941
3942	if (DeviceInfo.MLCDevice) {
3943		g_pReadCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] = 0;
3944		if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
3945			g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3946			FTL_Write_IN_Progress_Block_Table_Page();
3947		}
3948	}
3949
3950	g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock]++;
3951
3952#if CMD_DMA
3953	p_BTableChangesDelta =
3954		(struct BTableChangesDelta *)g_pBTDelta_Free;
3955	g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3956	p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
3957	p_BTableChangesDelta->WC_Index =
3958		BlkIdx - DeviceInfo.wSpectraStartBlock;
3959	p_BTableChangesDelta->WC_Entry_Value =
3960		g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock];
3961	p_BTableChangesDelta->ValidFields = 0x30;
3962
3963	if (DeviceInfo.MLCDevice) {
3964		p_BTableChangesDelta =
3965			(struct BTableChangesDelta *)g_pBTDelta_Free;
3966		g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3967		p_BTableChangesDelta->ftl_cmd_cnt =
3968			ftl_cmd_cnt;
3969		p_BTableChangesDelta->RC_Index =
3970			BlkIdx - DeviceInfo.wSpectraStartBlock;
3971		p_BTableChangesDelta->RC_Entry_Value =
3972			g_pReadCounter[BlkIdx -
3973				DeviceInfo.wSpectraStartBlock];
3974		p_BTableChangesDelta->ValidFields = 0xC0;
3975	}
3976
3977	ftl_cmd_cnt++;
3978#endif
3979
3980	if (g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] == 0xFE)
3981		FTL_Adjust_Relative_Erase_Count(BlkIdx);
3982
3983	return status;
3984}
3985
3986
3987/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3988* Function:     FTL_Adjust_Relative_Erase_Count
3989* Inputs:       index to block that was just incremented and is at the max
3990* Outputs:      PASS=0 / FAIL=1
3991* Description:  If any erase counts at MAX, adjusts erase count of every
3992*               block by subtracting least worn
3993*               counter from counter value of every entry in wear table
3994*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3995static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX)
3996{
3997	u8 wLeastWornCounter = MAX_BYTE_VALUE;
3998	u8 wWearCounter;
3999	u32 i, wWearIndex;
4000	u32 *pbt = (u32 *)g_pBlockTable;
4001	int wResult = PASS;
4002
4003	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
4004		__FILE__, __LINE__, __func__);
4005
4006	for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
4007		if (IS_BAD_BLOCK(i))
4008			continue;
4009		wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
4010
4011		if ((wWearIndex - DeviceInfo.wSpectraStartBlock) < 0)
4012			printk(KERN_ERR "FTL_Adjust_Relative_Erase_Count:"
4013					"This should never occur\n");
4014		wWearCounter = g_pWearCounter[wWearIndex -
4015			DeviceInfo.wSpectraStartBlock];
4016		if (wWearCounter < wLeastWornCounter)
4017			wLeastWornCounter = wWearCounter;
4018	}
4019
4020	if (wLeastWornCounter == 0) {
4021		nand_dbg_print(NAND_DBG_WARN,
4022			"Adjusting Wear Levelling Counters: Special Case\n");
4023		g_pWearCounter[Index_of_MAX -
4024			DeviceInfo.wSpectraStartBlock]--;
4025#if CMD_DMA
4026		p_BTableChangesDelta =
4027			(struct BTableChangesDelta *)g_pBTDelta_Free;
4028		g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4029		p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
4030		p_BTableChangesDelta->WC_Index =
4031			Index_of_MAX - DeviceInfo.wSpectraStartBlock;
4032		p_BTableChangesDelta->WC_Entry_Value =
4033			g_pWearCounter[Index_of_MAX -
4034				DeviceInfo.wSpectraStartBlock];
4035		p_BTableChangesDelta->ValidFields = 0x30;
4036#endif
4037		FTL_Static_Wear_Leveling();
4038	} else {
4039		for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
4040			if (!IS_BAD_BLOCK(i)) {
4041				wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
4042				g_pWearCounter[wWearIndex -
4043					DeviceInfo.wSpectraStartBlock] =
4044					(u8)(g_pWearCounter
4045					[wWearIndex -
4046					DeviceInfo.wSpectraStartBlock] -
4047					wLeastWornCounter);
4048#if CMD_DMA
4049				p_BTableChangesDelta =
4050				(struct BTableChangesDelta *)g_pBTDelta_Free;
4051				g_pBTDelta_Free +=
4052					sizeof(struct BTableChangesDelta);
4053
4054				p_BTableChangesDelta->ftl_cmd_cnt =
4055					ftl_cmd_cnt;
4056				p_BTableChangesDelta->WC_Index = wWearIndex -
4057					DeviceInfo.wSpectraStartBlock;
4058				p_BTableChangesDelta->WC_Entry_Value =
4059					g_pWearCounter[wWearIndex -
4060					DeviceInfo.wSpectraStartBlock];
4061				p_BTableChangesDelta->ValidFields = 0x30;
4062#endif
4063			}
4064	}
4065
4066	return wResult;
4067}
4068
4069/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4070* Function:     FTL_Write_IN_Progress_Block_Table_Page
4071* Inputs:       None
4072* Outputs:      None
4073* Description:  It writes in-progress flag page to the page next to
4074*               block table
4075***********************************************************************/
4076static int FTL_Write_IN_Progress_Block_Table_Page(void)
4077{
4078	int wResult = PASS;
4079	u16 bt_pages;
4080	u16 dwIPFPageAddr;
4081#if CMD_DMA
4082#else
4083	u32 *pbt = (u32 *)g_pBlockTable;
4084	u32 wTempBlockTableIndex;
4085#endif
4086
4087	nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
4088			       __FILE__, __LINE__, __func__);
4089
4090	bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
4091
4092	dwIPFPageAddr = g_wBlockTableOffset + bt_pages;
4093
4094	nand_dbg_print(NAND_DBG_DEBUG, "Writing IPF at "
4095			       "Block %d Page %d\n",
4096			       g_wBlockTableIndex, dwIPFPageAddr);
4097
4098#if CMD_DMA
4099	wResult = GLOB_LLD_Write_Page_Main_Spare_cdma(g_pIPF,
4100		g_wBlockTableIndex, dwIPFPageAddr, 1,
4101		LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST);
4102	if (wResult == FAIL) {
4103		nand_dbg_print(NAND_DBG_WARN,
4104			       "NAND Program fail in %s, Line %d, "
4105			       "Function: %s, new Bad Block %d generated!\n",
4106			       __FILE__, __LINE__, __func__,
4107			       g_wBlockTableIndex);
4108	}
4109	g_wBlockTableOffset = dwIPFPageAddr + 1;
4110	p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
4111	g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4112	p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
4113	p_BTableChangesDelta->g_wBlockTableOffset = g_wBlockTableOffset;
4114	p_BTableChangesDelta->ValidFields = 0x01;
4115	ftl_cmd_cnt++;
4116#else
4117	wResult = GLOB_LLD_Write_Page_Main_Spare(g_pIPF,
4118		g_wBlockTableIndex, dwIPFPageAddr, 1);
4119	if (wResult == FAIL) {
4120		nand_dbg_print(NAND_DBG_WARN,
4121			       "NAND Program fail in %s, Line %d, "
4122			       "Function: %s, new Bad Block %d generated!\n",
4123			       __FILE__, __LINE__, __func__,
4124			       (int)g_wBlockTableIndex);
4125		MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
4126		wTempBlockTableIndex = FTL_Replace_Block_Table();
4127		bt_block_changed = 1;
4128		if (BAD_BLOCK == wTempBlockTableIndex)
4129			return ERR;
4130		g_wBlockTableIndex = wTempBlockTableIndex;
4131		g_wBlockTableOffset = 0;
4132		/* Block table tag is '00'. Means it's used one */
4133		pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
4134		return FAIL;
4135	}
4136	g_wBlockTableOffset = dwIPFPageAddr + 1;
4137#endif
4138	return wResult;
4139}
4140
4141/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4142* Function:     FTL_Read_Disturbance
4143* Inputs:       block address
4144* Outputs:      PASS=0 / FAIL=1
4145* Description:  used to handle read disturbance. Data in block that
4146*               reaches its read limit is moved to new block
4147*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4148int FTL_Read_Disturbance(u32 blk_addr)
4149{
4150	int wResult = FAIL;
4151	u32 *pbt = (u32 *) g_pBlockTable;
4152	u32 dwOldBlockAddr = blk_addr;
4153	u32 wBlockNum;
4154	u32 i;
4155	u32 wLeastReadCounter = 0xFFFF;
4156	u32 wLeastReadIndex = BAD_BLOCK;
4157	u32 wSpareBlockNum = 0;
4158	u32 wTempNode;
4159	u32 wReplacedNode;
4160	u8 *g_pTempBuf;
4161
4162	nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
4163			       __FILE__, __LINE__, __func__);
4164
4165#if CMD_DMA
4166	g_pTempBuf = cp_back_buf_copies[cp_back_buf_idx];
4167	cp_back_buf_idx++;
4168	if (cp_back_buf_idx > COPY_BACK_BUF_NUM) {
4169		printk(KERN_ERR "cp_back_buf_copies overflow! Exit."
4170		"Maybe too many pending commands in your CDMA chain.\n");
4171		return FAIL;
4172	}
4173#else
4174	g_pTempBuf = tmp_buf_read_disturbance;
4175#endif
4176
4177	wBlockNum = FTL_Get_Block_Index(blk_addr);
4178
4179	do {
4180		/* This is a bug.Here 'i' should be logical block number
4181		 * and start from 1 (0 is reserved for block table).
4182		 * Have fixed it.        - Yunpeng 2008. 12. 19
4183		 */
4184		for (i = 1; i < DeviceInfo.wDataBlockNum; i++) {
4185			if (IS_SPARE_BLOCK(i)) {
4186				u32 wPhysicalIndex =
4187					(u32)((~SPARE_BLOCK) & pbt[i]);
4188				if (g_pReadCounter[wPhysicalIndex -
4189					DeviceInfo.wSpectraStartBlock] <
4190					wLeastReadCounter) {
4191					wLeastReadCounter =
4192						g_pReadCounter[wPhysicalIndex -
4193						DeviceInfo.wSpectraStartBlock];
4194					wLeastReadIndex = i;
4195				}
4196				wSpareBlockNum++;
4197			}
4198		}
4199
4200		if (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE) {
4201			wResult = GLOB_FTL_Garbage_Collection();
4202			if (PASS == wResult)
4203				continue;
4204			else
4205				break;
4206		} else {
4207			wTempNode = (u32)(DISCARD_BLOCK | pbt[wBlockNum]);
4208			wReplacedNode = (u32)((~SPARE_BLOCK) &
4209					pbt[wLeastReadIndex]);
4210#if CMD_DMA
4211			pbt[wBlockNum] = wReplacedNode;
4212			pbt[wLeastReadIndex] = wTempNode;
4213			p_BTableChangesDelta =
4214				(struct BTableChangesDelta *)g_pBTDelta_Free;
4215			g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4216
4217			p_BTableChangesDelta->ftl_cmd_cnt =
4218					ftl_cmd_cnt;
4219			p_BTableChangesDelta->BT_Index = wBlockNum;
4220			p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
4221			p_BTableChangesDelta->ValidFields = 0x0C;
4222
4223			p_BTableChangesDelta =
4224				(struct BTableChangesDelta *)g_pBTDelta_Free;
4225			g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4226
4227			p_BTableChangesDelta->ftl_cmd_cnt =
4228					ftl_cmd_cnt;
4229			p_BTableChangesDelta->BT_Index = wLeastReadIndex;
4230			p_BTableChangesDelta->BT_Entry_Value =
4231					pbt[wLeastReadIndex];
4232			p_BTableChangesDelta->ValidFields = 0x0C;
4233
4234			wResult = GLOB_LLD_Read_Page_Main_cdma(g_pTempBuf,
4235				dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock,
4236				LLD_CMD_FLAG_MODE_CDMA);
4237			if (wResult == FAIL)
4238				return wResult;
4239
4240			ftl_cmd_cnt++;
4241
4242			if (wResult != FAIL) {
4243				if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
4244					g_pTempBuf, pbt[wBlockNum], 0,
4245					DeviceInfo.wPagesPerBlock)) {
4246					nand_dbg_print(NAND_DBG_WARN,
4247						"NAND Program fail in "
4248						"%s, Line %d, Function: %s, "
4249						"new Bad Block %d "
4250						"generated!\n",
4251						__FILE__, __LINE__, __func__,
4252						(int)pbt[wBlockNum]);
4253					wResult = FAIL;
4254					MARK_BLOCK_AS_BAD(pbt[wBlockNum]);
4255				}
4256				ftl_cmd_cnt++;
4257			}
4258#else
4259			wResult = GLOB_LLD_Read_Page_Main(g_pTempBuf,
4260				dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock);
4261			if (wResult == FAIL)
4262				return wResult;
4263
4264			if (wResult != FAIL) {
4265				/* This is a bug. At this time, pbt[wBlockNum]
4266				is still the physical address of
4267				discard block, and should not be write.
4268				Have fixed it as below.
4269					-- Yunpeng 2008.12.19
4270				*/
4271				wResult = GLOB_LLD_Write_Page_Main(g_pTempBuf,
4272					wReplacedNode, 0,
4273					DeviceInfo.wPagesPerBlock);
4274				if (wResult == FAIL) {
4275					nand_dbg_print(NAND_DBG_WARN,
4276						"NAND Program fail in "
4277						"%s, Line %d, Function: %s, "
4278						"new Bad Block %d "
4279						"generated!\n",
4280						__FILE__, __LINE__, __func__,
4281						(int)wReplacedNode);
4282					MARK_BLOCK_AS_BAD(wReplacedNode);
4283				} else {
4284					pbt[wBlockNum] = wReplacedNode;
4285					pbt[wLeastReadIndex] = wTempNode;
4286				}
4287			}
4288
4289			if ((wResult == PASS) && (g_cBlockTableStatus !=
4290				IN_PROGRESS_BLOCK_TABLE)) {
4291				g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
4292				FTL_Write_IN_Progress_Block_Table_Page();
4293			}
4294#endif
4295		}
4296	} while (wResult != PASS)
4297	;
4298
4299#if CMD_DMA
4300	/* ... */
4301#endif
4302
4303	return wResult;
4304}
4305