2
* NAND Flash Controller Device Driver
3
* Copyright (c) 2009, Intel Corporation and its suppliers.
5
* This program is free software; you can redistribute it and/or modify it
6
* under the terms and conditions of the GNU General Public License,
7
* version 2, as published by the Free Software Foundation.
9
* This program is distributed in the hope it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14
* You should have received a copy of the GNU General Public License along with
15
* this program; if not, write to the Free Software Foundation, Inc.,
16
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21
#include <linux/slab.h>
31
#define BLK_FROM_ADDR(addr) ((u32)(addr >> DeviceInfo.nBitsInBlockDataSize))
32
#define PAGE_FROM_ADDR(addr, Block) ((u16)((addr - (u64)Block * \
33
DeviceInfo.wBlockDataSize) >> DeviceInfo.nBitsInPageDataSize))
35
#define IS_SPARE_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
36
BAD_BLOCK) && SPARE_BLOCK == (pbt[blk] & SPARE_BLOCK))
38
#define IS_DATA_BLOCK(blk) (0 == (pbt[blk] & BAD_BLOCK))
40
#define IS_DISCARDED_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
41
BAD_BLOCK) && DISCARD_BLOCK == (pbt[blk] & DISCARD_BLOCK))
43
#define IS_BAD_BLOCK(blk) (BAD_BLOCK == (pbt[blk] & BAD_BLOCK))
46
void debug_boundary_lineno_error(int chnl, int limit, int no,
47
int lineno, char *filename)
50
printk(KERN_ERR "Boundary Check Fail value %d >= limit %d, "
51
"at %s:%d. Other info:%d. Aborting...\n",
52
chnl, limit, filename, lineno, no);
54
/* static int globalmemsize; */
57
static u16 FTL_Cache_If_Hit(u64 dwPageAddr);
58
static int FTL_Cache_Read(u64 dwPageAddr);
59
static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr,
61
static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr,
62
u8 cache_blk, u16 flag);
63
static int FTL_Cache_Write(void);
64
static void FTL_Calculate_LRU(void);
65
static u32 FTL_Get_Block_Index(u32 wBlockNum);
67
static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
68
u8 BT_Tag, u16 *Page);
69
static int FTL_Read_Block_Table(void);
70
static int FTL_Write_Block_Table(int wForce);
71
static int FTL_Write_Block_Table_Data(void);
72
static int FTL_Check_Block_Table(int wOldTable);
73
static int FTL_Static_Wear_Leveling(void);
74
static u32 FTL_Replace_Block_Table(void);
75
static int FTL_Write_IN_Progress_Block_Table_Page(void);
77
static u32 FTL_Get_Page_Num(u64 length);
78
static u64 FTL_Get_Physical_Block_Addr(u64 blk_addr);
80
static u32 FTL_Replace_OneBlock(u32 wBlockNum,
82
static u32 FTL_Replace_LWBlock(u32 wBlockNum,
83
int *pGarbageCollect);
84
static u32 FTL_Replace_MWBlock(void);
85
static int FTL_Replace_Block(u64 blk_addr);
86
static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX);
88
struct device_info_tag DeviceInfo;
89
struct flash_cache_tag Cache;
90
static struct spectra_l2_cache_info cache_l2;
92
static u8 *cache_l2_page_buf;
93
static u8 *cache_l2_blk_buf;
99
static u16 g_wBlockTableOffset;
100
static u32 g_wBlockTableIndex;
101
static u8 g_cBlockTableStatus;
103
static u8 *g_pTempBuf;
104
static u8 *flag_check_blk_table;
105
static u8 *tmp_buf_search_bt_in_block;
106
static u8 *spare_buf_search_bt_in_block;
107
static u8 *spare_buf_bt_search_bt_in_block;
108
static u8 *tmp_buf1_read_blk_table;
109
static u8 *tmp_buf2_read_blk_table;
110
static u8 *flags_static_wear_leveling;
111
static u8 *tmp_buf_write_blk_table_data;
112
static u8 *tmp_buf_read_disturbance;
114
u8 *buf_read_page_main_spare;
115
u8 *buf_write_page_main_spare;
116
u8 *buf_read_page_spare;
117
u8 *buf_get_bad_block;
119
#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
120
struct flash_cache_delta_list_tag int_cache[MAX_CHANS + MAX_DESCS];
121
struct flash_cache_tag cache_start_copy;
124
int g_wNumFreeBlocks;
128
static u8 bt_flag = FIRST_BT_ID;
129
static u8 bt_block_changed;
131
static u16 cache_block_to_write;
132
static u8 last_erased = FIRST_BT_ID;
135
static u8 BT_GC_Called;
138
#define COPY_BACK_BUF_NUM 10
140
static u8 ftl_cmd_cnt; /* Init value is 0 */
143
u8 *g_pBTStartingCopy;
144
u8 *g_pWearCounterCopy;
145
u16 *g_pReadCounterCopy;
146
u8 *g_pBlockTableCopies;
147
u8 *g_pNextBlockTable;
148
static u8 *cp_back_buf_copies[COPY_BACK_BUF_NUM];
149
static int cp_back_buf_idx;
151
static u8 *g_temp_buf;
153
#pragma pack(push, 1)
155
struct BTableChangesDelta {
158
u16 g_wBlockTableOffset;
159
u32 g_wBlockTableIndex;
170
struct BTableChangesDelta *p_BTableChangesDelta;
174
#define MARK_BLOCK_AS_BAD(blocknode) (blocknode |= BAD_BLOCK)
175
#define MARK_BLK_AS_DISCARD(blk) (blk = (blk & ~SPARE_BLOCK) | DISCARD_BLOCK)
177
#define FTL_Get_LBAPBA_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
179
#define FTL_Get_WearCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
181
#define FTL_Get_ReadCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
183
#if SUPPORT_LARGE_BLOCKNUM
184
#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
187
#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
190
#define FTL_Get_WearCounter_Table_Flash_Size_Bytes \
191
FTL_Get_WearCounter_Table_Mem_Size_Bytes
192
#define FTL_Get_ReadCounter_Table_Flash_Size_Bytes \
193
FTL_Get_ReadCounter_Table_Mem_Size_Bytes
195
static u32 FTL_Get_Block_Table_Flash_Size_Bytes(void)
199
if (DeviceInfo.MLCDevice) {
200
byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
201
DeviceInfo.wDataBlockNum * sizeof(u8) +
202
DeviceInfo.wDataBlockNum * sizeof(u16);
204
byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
205
DeviceInfo.wDataBlockNum * sizeof(u8);
208
byte_num += 4 * sizeof(u8);
213
static u16 FTL_Get_Block_Table_Flash_Size_Pages(void)
215
return (u16)FTL_Get_Page_Num(FTL_Get_Block_Table_Flash_Size_Bytes());
218
static int FTL_Copy_Block_Table_To_Flash(u8 *flashBuf, u32 sizeToTx,
221
u32 wBytesCopied, blk_tbl_size, wBytes;
222
u32 *pbt = (u32 *)g_pBlockTable;
224
blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
226
(wBytes < sizeToTx) && ((wBytes + sizeTxed) < blk_tbl_size);
228
#if SUPPORT_LARGE_BLOCKNUM
229
flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 3]
230
>> (((wBytes + sizeTxed) % 3) ?
231
((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16)) & 0xFF;
233
flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 2]
234
>> (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
238
sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
239
blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
240
wBytesCopied = wBytes;
241
wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
242
(sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
243
memcpy(flashBuf + wBytesCopied, g_pWearCounter + sizeTxed, wBytes);
245
sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
247
if (DeviceInfo.MLCDevice) {
248
blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
249
wBytesCopied += wBytes;
250
for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
251
((wBytes + sizeTxed) < blk_tbl_size); wBytes++)
252
flashBuf[wBytes + wBytesCopied] =
253
(g_pReadCounter[(wBytes + sizeTxed) / 2] >>
254
(((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
257
return wBytesCopied + wBytes;
260
static int FTL_Copy_Block_Table_From_Flash(u8 *flashBuf,
261
u32 sizeToTx, u32 sizeTxed)
263
u32 wBytesCopied, blk_tbl_size, wBytes;
264
u32 *pbt = (u32 *)g_pBlockTable;
266
blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
267
for (wBytes = 0; (wBytes < sizeToTx) &&
268
((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
269
#if SUPPORT_LARGE_BLOCKNUM
270
if (!((wBytes + sizeTxed) % 3))
271
pbt[(wBytes + sizeTxed) / 3] = 0;
272
pbt[(wBytes + sizeTxed) / 3] |=
273
(flashBuf[wBytes] << (((wBytes + sizeTxed) % 3) ?
274
((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16));
276
if (!((wBytes + sizeTxed) % 2))
277
pbt[(wBytes + sizeTxed) / 2] = 0;
278
pbt[(wBytes + sizeTxed) / 2] |=
279
(flashBuf[wBytes] << (((wBytes + sizeTxed) % 2) ?
284
sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
285
blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
286
wBytesCopied = wBytes;
287
wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
288
(sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
289
memcpy(g_pWearCounter + sizeTxed, flashBuf + wBytesCopied, wBytes);
290
sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
292
if (DeviceInfo.MLCDevice) {
293
wBytesCopied += wBytes;
294
blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
295
for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
296
((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
297
if (((wBytes + sizeTxed) % 2))
298
g_pReadCounter[(wBytes + sizeTxed) / 2] = 0;
299
g_pReadCounter[(wBytes + sizeTxed) / 2] |=
301
(((wBytes + sizeTxed) % 2) ? 0 : 8));
305
return wBytesCopied+wBytes;
308
static int FTL_Insert_Block_Table_Signature(u8 *buf, u8 tag)
312
for (i = 0; i < BTSIG_BYTES; i++)
313
buf[BTSIG_OFFSET + i] =
314
((tag + (i * BTSIG_DELTA) - FIRST_BT_ID) %
315
(1 + LAST_BT_ID-FIRST_BT_ID)) + FIRST_BT_ID;
320
static int FTL_Extract_Block_Table_Tag(u8 *buf, u8 **tagarray)
322
static u8 tag[BTSIG_BYTES >> 1];
323
int i, j, k, tagi, tagtemp, status;
325
*tagarray = (u8 *)tag;
328
for (i = 0; i < (BTSIG_BYTES - 1); i++) {
329
for (j = i + 1; (j < BTSIG_BYTES) &&
330
(tagi < (BTSIG_BYTES >> 1)); j++) {
331
tagtemp = buf[BTSIG_OFFSET + j] -
332
buf[BTSIG_OFFSET + i];
333
if (tagtemp && !(tagtemp % BTSIG_DELTA)) {
334
tagtemp = (buf[BTSIG_OFFSET + i] +
335
(1 + LAST_BT_ID - FIRST_BT_ID) -
337
(1 + LAST_BT_ID - FIRST_BT_ID);
339
for (k = 0; k < tagi; k++) {
340
if (tagtemp == tag[k])
344
if (status == FAIL) {
345
tag[tagi++] = tagtemp;
346
i = (j == (i + 1)) ? i + 1 : i;
347
j = (j == (i + 1)) ? i + 1 : i;
357
static int FTL_Execute_SPL_Recovery(void)
360
u32 *pbt = (u32 *)g_pBlockTable;
363
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
364
__FILE__, __LINE__, __func__);
366
blks = DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock;
367
for (j = 0; j <= blks; j++) {
369
if (((block & BAD_BLOCK) != BAD_BLOCK) &&
370
((block & SPARE_BLOCK) == SPARE_BLOCK)) {
371
ret = GLOB_LLD_Erase_Block(block & ~BAD_BLOCK);
373
nand_dbg_print(NAND_DBG_WARN,
374
"NAND Program fail in %s, Line %d, "
375
"Function: %s, new Bad Block %d "
377
__FILE__, __LINE__, __func__,
378
(int)(block & ~BAD_BLOCK));
379
MARK_BLOCK_AS_BAD(pbt[j]);
387
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
388
* Function: GLOB_FTL_IdentifyDevice
389
* Inputs: pointer to identify data structure
390
* Outputs: PASS / FAIL
391
* Description: the identify data structure is filled in with
392
* information for the block driver.
393
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
394
int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data)
396
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
397
__FILE__, __LINE__, __func__);
399
dev_data->NumBlocks = DeviceInfo.wTotalBlocks;
400
dev_data->PagesPerBlock = DeviceInfo.wPagesPerBlock;
401
dev_data->PageDataSize = DeviceInfo.wPageDataSize;
402
dev_data->wECCBytesPerSector = DeviceInfo.wECCBytesPerSector;
403
dev_data->wDataBlockNum = DeviceInfo.wDataBlockNum;
409
static int allocate_memory(void)
411
u32 block_table_size, page_size, block_size, mem_size;
418
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
419
__FILE__, __LINE__, __func__);
421
page_size = DeviceInfo.wPageSize;
422
block_size = DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
424
block_table_size = DeviceInfo.wDataBlockNum *
425
(sizeof(u32) + sizeof(u8) + sizeof(u16));
426
block_table_size += (DeviceInfo.wPageDataSize -
427
(block_table_size % DeviceInfo.wPageDataSize)) %
428
DeviceInfo.wPageDataSize;
430
/* Malloc memory for block tables */
431
g_pBlockTable = kmalloc(block_table_size, GFP_ATOMIC);
433
goto block_table_fail;
434
memset(g_pBlockTable, 0, block_table_size);
435
total_bytes += block_table_size;
437
g_pWearCounter = (u8 *)(g_pBlockTable +
438
DeviceInfo.wDataBlockNum * sizeof(u32));
440
if (DeviceInfo.MLCDevice)
441
g_pReadCounter = (u16 *)(g_pBlockTable +
442
DeviceInfo.wDataBlockNum *
443
(sizeof(u32) + sizeof(u8)));
445
/* Malloc memory and init for cache items */
446
for (i = 0; i < CACHE_ITEM_NUM; i++) {
447
Cache.array[i].address = NAND_CACHE_INIT_ADDR;
448
Cache.array[i].use_cnt = 0;
449
Cache.array[i].changed = CLEAR;
450
Cache.array[i].buf = kmalloc(Cache.cache_item_size,
452
if (!Cache.array[i].buf)
453
goto cache_item_fail;
454
memset(Cache.array[i].buf, 0, Cache.cache_item_size);
455
total_bytes += Cache.cache_item_size;
458
/* Malloc memory for IPF */
459
g_pIPF = kmalloc(page_size, GFP_ATOMIC);
462
memset(g_pIPF, 0, page_size);
463
total_bytes += page_size;
465
/* Malloc memory for data merging during Level2 Cache flush */
466
cache_l2_page_buf = kmalloc(page_size, GFP_ATOMIC);
467
if (!cache_l2_page_buf)
468
goto cache_l2_page_buf_fail;
469
memset(cache_l2_page_buf, 0xff, page_size);
470
total_bytes += page_size;
472
cache_l2_blk_buf = kmalloc(block_size, GFP_ATOMIC);
473
if (!cache_l2_blk_buf)
474
goto cache_l2_blk_buf_fail;
475
memset(cache_l2_blk_buf, 0xff, block_size);
476
total_bytes += block_size;
478
/* Malloc memory for temp buffer */
479
g_pTempBuf = kmalloc(Cache.cache_item_size, GFP_ATOMIC);
482
memset(g_pTempBuf, 0, Cache.cache_item_size);
483
total_bytes += Cache.cache_item_size;
485
/* Malloc memory for block table blocks */
486
mem_size = (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32);
487
g_pBTBlocks = kmalloc(mem_size, GFP_ATOMIC);
490
memset(g_pBTBlocks, 0xff, mem_size);
491
total_bytes += mem_size;
493
/* Malloc memory for function FTL_Check_Block_Table */
494
flag_check_blk_table = kmalloc(DeviceInfo.wDataBlockNum, GFP_ATOMIC);
495
if (!flag_check_blk_table)
496
goto flag_check_blk_table_fail;
497
total_bytes += DeviceInfo.wDataBlockNum;
499
/* Malloc memory for function FTL_Search_Block_Table_IN_Block */
500
tmp_buf_search_bt_in_block = kmalloc(page_size, GFP_ATOMIC);
501
if (!tmp_buf_search_bt_in_block)
502
goto tmp_buf_search_bt_in_block_fail;
503
memset(tmp_buf_search_bt_in_block, 0xff, page_size);
504
total_bytes += page_size;
506
mem_size = DeviceInfo.wPageSize - DeviceInfo.wPageDataSize;
507
spare_buf_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
508
if (!spare_buf_search_bt_in_block)
509
goto spare_buf_search_bt_in_block_fail;
510
memset(spare_buf_search_bt_in_block, 0xff, mem_size);
511
total_bytes += mem_size;
513
spare_buf_bt_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
514
if (!spare_buf_bt_search_bt_in_block)
515
goto spare_buf_bt_search_bt_in_block_fail;
516
memset(spare_buf_bt_search_bt_in_block, 0xff, mem_size);
517
total_bytes += mem_size;
519
/* Malloc memory for function FTL_Read_Block_Table */
520
tmp_buf1_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
521
if (!tmp_buf1_read_blk_table)
522
goto tmp_buf1_read_blk_table_fail;
523
memset(tmp_buf1_read_blk_table, 0xff, page_size);
524
total_bytes += page_size;
526
tmp_buf2_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
527
if (!tmp_buf2_read_blk_table)
528
goto tmp_buf2_read_blk_table_fail;
529
memset(tmp_buf2_read_blk_table, 0xff, page_size);
530
total_bytes += page_size;
532
/* Malloc memory for function FTL_Static_Wear_Leveling */
533
flags_static_wear_leveling = kmalloc(DeviceInfo.wDataBlockNum,
535
if (!flags_static_wear_leveling)
536
goto flags_static_wear_leveling_fail;
537
total_bytes += DeviceInfo.wDataBlockNum;
539
/* Malloc memory for function FTL_Write_Block_Table_Data */
540
if (FTL_Get_Block_Table_Flash_Size_Pages() > 3)
541
mem_size = FTL_Get_Block_Table_Flash_Size_Bytes() -
542
2 * DeviceInfo.wPageSize;
544
mem_size = DeviceInfo.wPageSize;
545
tmp_buf_write_blk_table_data = kmalloc(mem_size, GFP_ATOMIC);
546
if (!tmp_buf_write_blk_table_data)
547
goto tmp_buf_write_blk_table_data_fail;
548
memset(tmp_buf_write_blk_table_data, 0xff, mem_size);
549
total_bytes += mem_size;
551
/* Malloc memory for function FTL_Read_Disturbance */
552
tmp_buf_read_disturbance = kmalloc(block_size, GFP_ATOMIC);
553
if (!tmp_buf_read_disturbance)
554
goto tmp_buf_read_disturbance_fail;
555
memset(tmp_buf_read_disturbance, 0xff, block_size);
556
total_bytes += block_size;
558
/* Alloc mem for function NAND_Read_Page_Main_Spare of lld_nand.c */
559
buf_read_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
560
if (!buf_read_page_main_spare)
561
goto buf_read_page_main_spare_fail;
562
total_bytes += DeviceInfo.wPageSize;
564
/* Alloc mem for function NAND_Write_Page_Main_Spare of lld_nand.c */
565
buf_write_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
566
if (!buf_write_page_main_spare)
567
goto buf_write_page_main_spare_fail;
568
total_bytes += DeviceInfo.wPageSize;
570
/* Alloc mem for function NAND_Read_Page_Spare of lld_nand.c */
571
buf_read_page_spare = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
572
if (!buf_read_page_spare)
573
goto buf_read_page_spare_fail;
574
memset(buf_read_page_spare, 0xff, DeviceInfo.wPageSpareSize);
575
total_bytes += DeviceInfo.wPageSpareSize;
577
/* Alloc mem for function NAND_Get_Bad_Block of lld_nand.c */
578
buf_get_bad_block = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
579
if (!buf_get_bad_block)
580
goto buf_get_bad_block_fail;
581
memset(buf_get_bad_block, 0xff, DeviceInfo.wPageSpareSize);
582
total_bytes += DeviceInfo.wPageSpareSize;
585
g_temp_buf = kmalloc(block_size, GFP_ATOMIC);
588
memset(g_temp_buf, 0xff, block_size);
589
total_bytes += block_size;
591
/* Malloc memory for copy of block table used in CDMA mode */
592
g_pBTStartingCopy = kmalloc(block_table_size, GFP_ATOMIC);
593
if (!g_pBTStartingCopy)
594
goto bt_starting_copy;
595
memset(g_pBTStartingCopy, 0, block_table_size);
596
total_bytes += block_table_size;
598
g_pWearCounterCopy = (u8 *)(g_pBTStartingCopy +
599
DeviceInfo.wDataBlockNum * sizeof(u32));
601
if (DeviceInfo.MLCDevice)
602
g_pReadCounterCopy = (u16 *)(g_pBTStartingCopy +
603
DeviceInfo.wDataBlockNum *
604
(sizeof(u32) + sizeof(u8)));
606
/* Malloc memory for block table copies */
607
mem_size = 5 * DeviceInfo.wDataBlockNum * sizeof(u32) +
608
5 * DeviceInfo.wDataBlockNum * sizeof(u8);
609
if (DeviceInfo.MLCDevice)
610
mem_size += 5 * DeviceInfo.wDataBlockNum * sizeof(u16);
611
g_pBlockTableCopies = kmalloc(mem_size, GFP_ATOMIC);
612
if (!g_pBlockTableCopies)
613
goto blk_table_copies_fail;
614
memset(g_pBlockTableCopies, 0, mem_size);
615
total_bytes += mem_size;
616
g_pNextBlockTable = g_pBlockTableCopies;
618
/* Malloc memory for Block Table Delta */
619
mem_size = MAX_DESCS * sizeof(struct BTableChangesDelta);
620
g_pBTDelta = kmalloc(mem_size, GFP_ATOMIC);
623
memset(g_pBTDelta, 0, mem_size);
624
total_bytes += mem_size;
625
g_pBTDelta_Free = g_pBTDelta;
627
/* Malloc memory for Copy Back Buffers */
628
for (j = 0; j < COPY_BACK_BUF_NUM; j++) {
629
cp_back_buf_copies[j] = kmalloc(block_size, GFP_ATOMIC);
630
if (!cp_back_buf_copies[j])
631
goto cp_back_buf_copies_fail;
632
memset(cp_back_buf_copies[j], 0, block_size);
633
total_bytes += block_size;
637
/* Malloc memory for pending commands list */
638
mem_size = sizeof(struct pending_cmd) * MAX_DESCS;
639
info.pcmds = kzalloc(mem_size, GFP_KERNEL);
641
goto pending_cmds_buf_fail;
642
total_bytes += mem_size;
644
/* Malloc memory for CDMA descripter table */
645
mem_size = sizeof(struct cdma_descriptor) * MAX_DESCS;
646
info.cdma_desc_buf = kzalloc(mem_size, GFP_KERNEL);
647
if (!info.cdma_desc_buf)
648
goto cdma_desc_buf_fail;
649
total_bytes += mem_size;
651
/* Malloc memory for Memcpy descripter table */
652
mem_size = sizeof(struct memcpy_descriptor) * MAX_DESCS;
653
info.memcp_desc_buf = kzalloc(mem_size, GFP_KERNEL);
654
if (!info.memcp_desc_buf)
655
goto memcp_desc_buf_fail;
656
total_bytes += mem_size;
659
nand_dbg_print(NAND_DBG_WARN,
660
"Total memory allocated in FTL layer: %d\n", total_bytes);
666
kfree(info.cdma_desc_buf);
669
pending_cmds_buf_fail:
670
cp_back_buf_copies_fail:
673
kfree(cp_back_buf_copies[j]);
676
kfree(g_pBlockTableCopies);
677
blk_table_copies_fail:
678
kfree(g_pBTStartingCopy);
682
kfree(buf_get_bad_block);
685
buf_get_bad_block_fail:
686
kfree(buf_read_page_spare);
687
buf_read_page_spare_fail:
688
kfree(buf_write_page_main_spare);
689
buf_write_page_main_spare_fail:
690
kfree(buf_read_page_main_spare);
691
buf_read_page_main_spare_fail:
692
kfree(tmp_buf_read_disturbance);
693
tmp_buf_read_disturbance_fail:
694
kfree(tmp_buf_write_blk_table_data);
695
tmp_buf_write_blk_table_data_fail:
696
kfree(flags_static_wear_leveling);
697
flags_static_wear_leveling_fail:
698
kfree(tmp_buf2_read_blk_table);
699
tmp_buf2_read_blk_table_fail:
700
kfree(tmp_buf1_read_blk_table);
701
tmp_buf1_read_blk_table_fail:
702
kfree(spare_buf_bt_search_bt_in_block);
703
spare_buf_bt_search_bt_in_block_fail:
704
kfree(spare_buf_search_bt_in_block);
705
spare_buf_search_bt_in_block_fail:
706
kfree(tmp_buf_search_bt_in_block);
707
tmp_buf_search_bt_in_block_fail:
708
kfree(flag_check_blk_table);
709
flag_check_blk_table_fail:
714
kfree(cache_l2_blk_buf);
715
cache_l2_blk_buf_fail:
716
kfree(cache_l2_page_buf);
717
cache_l2_page_buf_fail:
723
kfree(Cache.array[i].buf);
724
kfree(g_pBlockTable);
726
printk(KERN_ERR "Failed to kmalloc memory in %s Line %d.\n",
733
static int free_memory(void)
738
kfree(info.memcp_desc_buf);
739
kfree(info.cdma_desc_buf);
741
for (i = COPY_BACK_BUF_NUM - 1; i >= 0; i--)
742
kfree(cp_back_buf_copies[i]);
744
kfree(g_pBlockTableCopies);
745
kfree(g_pBTStartingCopy);
747
kfree(buf_get_bad_block);
749
kfree(buf_read_page_spare);
750
kfree(buf_write_page_main_spare);
751
kfree(buf_read_page_main_spare);
752
kfree(tmp_buf_read_disturbance);
753
kfree(tmp_buf_write_blk_table_data);
754
kfree(flags_static_wear_leveling);
755
kfree(tmp_buf2_read_blk_table);
756
kfree(tmp_buf1_read_blk_table);
757
kfree(spare_buf_bt_search_bt_in_block);
758
kfree(spare_buf_search_bt_in_block);
759
kfree(tmp_buf_search_bt_in_block);
760
kfree(flag_check_blk_table);
764
for (i = CACHE_ITEM_NUM - 1; i >= 0; i--)
765
kfree(Cache.array[i].buf);
766
kfree(g_pBlockTable);
771
static void dump_cache_l2_table(void)
774
struct spectra_l2_cache_list *pnd;
778
list_for_each(p, &cache_l2.table.list) {
779
pnd = list_entry(p, struct spectra_l2_cache_list, list);
780
nand_dbg_print(NAND_DBG_WARN, "dump_cache_l2_table node: %d, logical_blk_num: %d\n", n, pnd->logical_blk_num);
782
for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
783
if (pnd->pages_array[i] != MAX_U32_VALUE)
784
nand_dbg_print(NAND_DBG_WARN, " pages_array[%d]: 0x%x\n", i, pnd->pages_array[i]);
791
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
792
* Function: GLOB_FTL_Init
794
* Outputs: PASS=0 / FAIL=1
795
* Description: allocates the memory for cache array,
796
* important data structures
797
* clears the cache array
798
* reads the block table from flash into array
799
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
800
int GLOB_FTL_Init(void)
804
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
805
__FILE__, __LINE__, __func__);
807
Cache.pages_per_item = 1;
808
Cache.cache_item_size = 1 * DeviceInfo.wPageDataSize;
810
if (allocate_memory() != PASS)
814
#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
815
memcpy((void *)&cache_start_copy, (void *)&Cache,
816
sizeof(struct flash_cache_tag));
817
memset((void *)&int_cache, -1,
818
sizeof(struct flash_cache_delta_list_tag) *
819
(MAX_CHANS + MAX_DESCS));
824
if (FTL_Read_Block_Table() != PASS)
827
/* Init the Level2 Cache data structure */
828
for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
829
cache_l2.blk_array[i] = MAX_U32_VALUE;
830
cache_l2.cur_blk_idx = 0;
831
cache_l2.cur_page_num = 0;
832
INIT_LIST_HEAD(&cache_l2.table.list);
833
cache_l2.table.logical_blk_num = MAX_U32_VALUE;
835
dump_cache_l2_table();
843
static void save_blk_table_changes(u16 idx)
846
u32 *pbt = (u32 *)g_pBTStartingCopy;
848
#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
852
id = idx - MAX_CHANS;
853
if (int_cache[id].item != -1) {
854
cache_blks = int_cache[id].item;
855
cache_start_copy.array[cache_blks].address =
856
int_cache[id].cache.address;
857
cache_start_copy.array[cache_blks].changed =
858
int_cache[id].cache.changed;
862
ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
864
while (ftl_cmd <= PendingCMD[idx].Tag) {
865
if (p_BTableChangesDelta->ValidFields == 0x01) {
866
g_wBlockTableOffset =
867
p_BTableChangesDelta->g_wBlockTableOffset;
868
} else if (p_BTableChangesDelta->ValidFields == 0x0C) {
869
pbt[p_BTableChangesDelta->BT_Index] =
870
p_BTableChangesDelta->BT_Entry_Value;
871
debug_boundary_error(((
872
p_BTableChangesDelta->BT_Index)),
873
DeviceInfo.wDataBlockNum, 0);
874
} else if (p_BTableChangesDelta->ValidFields == 0x03) {
875
g_wBlockTableOffset =
876
p_BTableChangesDelta->g_wBlockTableOffset;
878
p_BTableChangesDelta->g_wBlockTableIndex;
879
} else if (p_BTableChangesDelta->ValidFields == 0x30) {
880
g_pWearCounterCopy[p_BTableChangesDelta->WC_Index] =
881
p_BTableChangesDelta->WC_Entry_Value;
882
} else if ((DeviceInfo.MLCDevice) &&
883
(p_BTableChangesDelta->ValidFields == 0xC0)) {
884
g_pReadCounterCopy[p_BTableChangesDelta->RC_Index] =
885
p_BTableChangesDelta->RC_Entry_Value;
886
nand_dbg_print(NAND_DBG_DEBUG,
887
"In event status setting read counter "
888
"GLOB_ftl_cmd_cnt %u Count %u Index %u\n",
890
p_BTableChangesDelta->RC_Entry_Value,
891
(unsigned int)p_BTableChangesDelta->RC_Index);
893
nand_dbg_print(NAND_DBG_DEBUG,
894
"This should never occur \n");
896
p_BTableChangesDelta += 1;
897
ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
901
static void discard_cmds(u16 n)
903
u32 *pbt = (u32 *)g_pBTStartingCopy;
906
#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
911
if ((PendingCMD[n].CMD == WRITE_MAIN_CMD) ||
912
(PendingCMD[n].CMD == WRITE_MAIN_SPARE_CMD)) {
913
for (k = 0; k < DeviceInfo.wDataBlockNum; k++) {
914
if (PendingCMD[n].Block == (pbt[k] & (~BAD_BLOCK)))
915
MARK_BLK_AS_DISCARD(pbt[k]);
919
ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
920
while (ftl_cmd <= PendingCMD[n].Tag) {
921
p_BTableChangesDelta += 1;
922
ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
925
#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
928
if (int_cache[id].item != -1) {
929
cache_blks = int_cache[id].item;
930
if (PendingCMD[n].CMD == MEMCOPY_CMD) {
931
if ((cache_start_copy.array[cache_blks].buf <=
932
PendingCMD[n].DataDestAddr) &&
933
((cache_start_copy.array[cache_blks].buf +
934
Cache.cache_item_size) >
935
PendingCMD[n].DataDestAddr)) {
936
cache_start_copy.array[cache_blks].address =
937
NAND_CACHE_INIT_ADDR;
938
cache_start_copy.array[cache_blks].use_cnt =
940
cache_start_copy.array[cache_blks].changed =
944
cache_start_copy.array[cache_blks].address =
945
int_cache[id].cache.address;
946
cache_start_copy.array[cache_blks].changed =
947
int_cache[id].cache.changed;
953
static void process_cmd_pass(int *first_failed_cmd, u16 idx)
955
if (0 == *first_failed_cmd)
956
save_blk_table_changes(idx);
961
static void process_cmd_fail_abort(int *first_failed_cmd,
964
u32 *pbt = (u32 *)g_pBTStartingCopy;
967
int erase_fail, program_fail;
968
#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
973
if (0 == *first_failed_cmd)
974
*first_failed_cmd = PendingCMD[idx].SBDCmdIndex;
976
nand_dbg_print(NAND_DBG_DEBUG, "Uncorrectable error has occured "
977
"while executing %u Command %u accesing Block %u\n",
978
(unsigned int)p_BTableChangesDelta->ftl_cmd_cnt,
980
(unsigned int)PendingCMD[idx].Block);
982
ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
983
while (ftl_cmd <= PendingCMD[idx].Tag) {
984
p_BTableChangesDelta += 1;
985
ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
988
#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
989
id = idx - MAX_CHANS;
991
if (int_cache[id].item != -1) {
992
cache_blks = int_cache[id].item;
993
if ((PendingCMD[idx].CMD == WRITE_MAIN_CMD)) {
994
cache_start_copy.array[cache_blks].address =
995
int_cache[id].cache.address;
996
cache_start_copy.array[cache_blks].changed = SET;
997
} else if ((PendingCMD[idx].CMD == READ_MAIN_CMD)) {
998
cache_start_copy.array[cache_blks].address =
999
NAND_CACHE_INIT_ADDR;
1000
cache_start_copy.array[cache_blks].use_cnt = 0;
1001
cache_start_copy.array[cache_blks].changed =
1003
} else if (PendingCMD[idx].CMD == ERASE_CMD) {
1005
} else if (PendingCMD[idx].CMD == MEMCOPY_CMD) {
1011
erase_fail = (event == EVENT_ERASE_FAILURE) &&
1012
(PendingCMD[idx].CMD == ERASE_CMD);
1014
program_fail = (event == EVENT_PROGRAM_FAILURE) &&
1015
((PendingCMD[idx].CMD == WRITE_MAIN_CMD) ||
1016
(PendingCMD[idx].CMD == WRITE_MAIN_SPARE_CMD));
1018
if (erase_fail || program_fail) {
1019
for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1020
if (PendingCMD[idx].Block ==
1021
(pbt[i] & (~BAD_BLOCK)))
1022
MARK_BLOCK_AS_BAD(pbt[i]);
1027
static void process_cmd(int *first_failed_cmd, u16 idx, int event)
1032
if (p_BTableChangesDelta->ftl_cmd_cnt == PendingCMD[idx].Tag)
1035
if (PendingCMD[idx].Status == CMD_PASS) {
1036
process_cmd_pass(first_failed_cmd, idx);
1037
} else if ((PendingCMD[idx].Status == CMD_FAIL) ||
1038
(PendingCMD[idx].Status == CMD_ABORT)) {
1039
process_cmd_fail_abort(first_failed_cmd, idx, event);
1040
} else if ((PendingCMD[idx].Status == CMD_NOT_DONE) &&
1041
PendingCMD[idx].Tag) {
1042
nand_dbg_print(NAND_DBG_DEBUG,
1043
" Command no. %hu is not executed\n",
1044
(unsigned int)PendingCMD[idx].Tag);
1045
ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
1046
while (ftl_cmd <= PendingCMD[idx].Tag) {
1047
p_BTableChangesDelta += 1;
1048
ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
1054
static void process_cmd(int *first_failed_cmd, u16 idx, int event)
1056
printk(KERN_ERR "temporary workaround function. "
1057
"Should not be called! \n");
1060
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1061
* Function: GLOB_FTL_Event_Status
1063
* Outputs: Event Code
1064
* Description: It is called by SBD after hardware interrupt signalling
1065
* completion of commands chain
1066
* It does following things
1067
* get event status from LLD
1068
* analyze command chain status
1069
* determine last command executed
1071
* rebuild the block table in case of uncorrectable error
1073
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1074
int GLOB_FTL_Event_Status(int *first_failed_cmd)
1076
int event_code = PASS;
1079
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1080
__FILE__, __LINE__, __func__);
1082
*first_failed_cmd = 0;
1084
event_code = GLOB_LLD_Event_Status();
1086
switch (event_code) {
1088
nand_dbg_print(NAND_DBG_DEBUG, "Handling EVENT_PASS\n");
1090
case EVENT_UNCORRECTABLE_DATA_ERROR:
1091
nand_dbg_print(NAND_DBG_DEBUG, "Handling Uncorrectable ECC!\n");
1093
case EVENT_PROGRAM_FAILURE:
1094
case EVENT_ERASE_FAILURE:
1095
nand_dbg_print(NAND_DBG_WARN, "Handling Ugly case. "
1096
"Event code: 0x%x\n", event_code);
1097
p_BTableChangesDelta =
1098
(struct BTableChangesDelta *)g_pBTDelta;
1099
for (i_P = MAX_CHANS; i_P < (ftl_cmd_cnt + MAX_CHANS);
1101
process_cmd(first_failed_cmd, i_P, event_code);
1102
memcpy(g_pBlockTable, g_pBTStartingCopy,
1103
DeviceInfo.wDataBlockNum * sizeof(u32));
1104
memcpy(g_pWearCounter, g_pWearCounterCopy,
1105
DeviceInfo.wDataBlockNum * sizeof(u8));
1106
if (DeviceInfo.MLCDevice)
1107
memcpy(g_pReadCounter, g_pReadCounterCopy,
1108
DeviceInfo.wDataBlockNum * sizeof(u16));
1110
#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1111
memcpy((void *)&Cache, (void *)&cache_start_copy,
1112
sizeof(struct flash_cache_tag));
1113
memset((void *)&int_cache, -1,
1114
sizeof(struct flash_cache_delta_list_tag) *
1115
(MAX_DESCS + MAX_CHANS));
1119
nand_dbg_print(NAND_DBG_WARN,
1120
"Handling unexpected event code - 0x%x\n",
1126
memcpy(g_pBTStartingCopy, g_pBlockTable,
1127
DeviceInfo.wDataBlockNum * sizeof(u32));
1128
memcpy(g_pWearCounterCopy, g_pWearCounter,
1129
DeviceInfo.wDataBlockNum * sizeof(u8));
1130
if (DeviceInfo.MLCDevice)
1131
memcpy(g_pReadCounterCopy, g_pReadCounter,
1132
DeviceInfo.wDataBlockNum * sizeof(u16));
1134
g_pBTDelta_Free = g_pBTDelta;
1136
g_pNextBlockTable = g_pBlockTableCopies;
1137
cp_back_buf_idx = 0;
1139
#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1140
memcpy((void *)&cache_start_copy, (void *)&Cache,
1141
sizeof(struct flash_cache_tag));
1142
memset((void *)&int_cache, -1,
1143
sizeof(struct flash_cache_delta_list_tag) *
1144
(MAX_DESCS + MAX_CHANS));
1150
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1151
* Function: glob_ftl_execute_cmds
1154
* Description: pass thru to LLD
1155
***************************************************************/
1156
u16 glob_ftl_execute_cmds(void)
1158
nand_dbg_print(NAND_DBG_TRACE,
1159
"glob_ftl_execute_cmds: ftl_cmd_cnt %u\n",
1160
(unsigned int)ftl_cmd_cnt);
1162
return glob_lld_execute_cmds();
1168
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1169
* Function: GLOB_FTL_Read Immediate
1170
* Inputs: pointer to data
1172
* Outputs: PASS / FAIL
1173
* Description: Reads one page of data into RAM directly from flash without
1174
* using or disturbing cache.It is assumed this function is called
1175
* with CMD-DMA disabled.
1176
*****************************************************************/
1177
int GLOB_FTL_Read_Immediate(u8 *read_data, u64 addr)
1183
u32 *pbt = (u32 *)g_pBlockTable;
1185
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1186
__FILE__, __LINE__, __func__);
1188
Block = BLK_FROM_ADDR(addr);
1189
Page = PAGE_FROM_ADDR(addr, Block);
1191
if (!IS_SPARE_BLOCK(Block))
1194
phy_blk = pbt[Block];
1195
wResult = GLOB_LLD_Read_Page_Main(read_data, phy_blk, Page, 1);
1197
if (DeviceInfo.MLCDevice) {
1198
g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]++;
1199
if (g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]
1200
>= MAX_READ_COUNTER)
1201
FTL_Read_Disturbance(phy_blk);
1202
if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
1203
g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1204
FTL_Write_IN_Progress_Block_Table_Page();
1212
#ifdef SUPPORT_BIG_ENDIAN
1213
/*********************************************************************
1214
* Function: FTL_Invert_Block_Table
1217
* Description: Re-format the block table in ram based on BIG_ENDIAN and
1218
* LARGE_BLOCKNUM if necessary
1219
**********************************************************************/
1220
static void FTL_Invert_Block_Table(void)
1223
u32 *pbt = (u32 *)g_pBlockTable;
1225
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1226
__FILE__, __LINE__, __func__);
1228
#ifdef SUPPORT_LARGE_BLOCKNUM
1229
for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1230
pbt[i] = INVERTUINT32(pbt[i]);
1231
g_pWearCounter[i] = INVERTUINT32(g_pWearCounter[i]);
1234
for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1235
pbt[i] = INVERTUINT16(pbt[i]);
1236
g_pWearCounter[i] = INVERTUINT16(g_pWearCounter[i]);
1242
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1243
* Function: GLOB_FTL_Flash_Init
1245
* Outputs: PASS=0 / FAIL=0x01 (based on read ID)
1246
* Description: The flash controller is initialized
1247
* The flash device is reset
1248
* Perform a flash READ ID command to confirm that a
1249
* valid device is attached and active.
1250
* The DeviceInfo structure gets filled in
1251
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1252
int GLOB_FTL_Flash_Init(void)
1256
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1257
__FILE__, __LINE__, __func__);
1261
status = GLOB_LLD_Flash_Init();
1266
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1268
* Outputs: PASS=0 / FAIL=0x01 (based on read ID)
1269
* Description: The flash controller is released
1270
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1271
int GLOB_FTL_Flash_Release(void)
1273
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1274
__FILE__, __LINE__, __func__);
1276
return GLOB_LLD_Flash_Release();
1280
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1281
* Function: GLOB_FTL_Cache_Release
1284
* Description: release all allocated memory in GLOB_FTL_Init
1285
* (allocated in GLOB_FTL_Init)
1286
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1287
void GLOB_FTL_Cache_Release(void)
1289
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1290
__FILE__, __LINE__, __func__);
1295
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1296
* Function: FTL_Cache_If_Hit
1297
* Inputs: Page Address
1298
* Outputs: Block number/UNHIT BLOCK
1299
* Description: Determines if the addressed page is in cache
1300
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1301
static u16 FTL_Cache_If_Hit(u64 page_addr)
1307
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1308
__FILE__, __LINE__, __func__);
1310
item = UNHIT_CACHE_ITEM;
1311
for (i = 0; i < CACHE_ITEM_NUM; i++) {
1312
addr = Cache.array[i].address;
1313
if ((page_addr >= addr) &&
1314
(page_addr < (addr + Cache.cache_item_size))) {
1323
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1324
* Function: FTL_Calculate_LRU
1327
* Description: Calculate the least recently block in a cache and record its
1328
* index in LRU field.
1329
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1330
static void FTL_Calculate_LRU(void)
1332
u16 i, bCurrentLRU, bTempCount;
1334
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1335
__FILE__, __LINE__, __func__);
1338
bTempCount = MAX_WORD_VALUE;
1340
for (i = 0; i < CACHE_ITEM_NUM; i++) {
1341
if (Cache.array[i].use_cnt < bTempCount) {
1343
bTempCount = Cache.array[i].use_cnt;
1347
Cache.LRU = bCurrentLRU;
1350
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1351
* Function: FTL_Cache_Read_Page
1352
* Inputs: pointer to read buffer, logical address and cache item number
1354
* Description: Read the page from the cached block addressed by blocknumber
1355
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1356
static void FTL_Cache_Read_Page(u8 *data_buf, u64 logic_addr, u16 cache_item)
1360
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1361
__FILE__, __LINE__, __func__);
1363
start_addr = Cache.array[cache_item].buf;
1364
start_addr += (u32)(((logic_addr - Cache.array[cache_item].address) >>
1365
DeviceInfo.nBitsInPageDataSize) * DeviceInfo.wPageDataSize);
1368
GLOB_LLD_MemCopy_CMD(data_buf, start_addr,
1369
DeviceInfo.wPageDataSize, 0);
1372
memcpy(data_buf, start_addr, DeviceInfo.wPageDataSize);
1375
if (Cache.array[cache_item].use_cnt < MAX_WORD_VALUE)
1376
Cache.array[cache_item].use_cnt++;
1379
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1380
* Function: FTL_Cache_Read_All
1381
* Inputs: pointer to read buffer,block address
1382
* Outputs: PASS=0 / FAIL =1
1383
* Description: It reads pages in cache
1384
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1385
static int FTL_Cache_Read_All(u8 *pData, u64 phy_addr)
1392
u32 *pbt = (u32 *)g_pBlockTable;
1395
Block = BLK_FROM_ADDR(phy_addr);
1396
Page = PAGE_FROM_ADDR(phy_addr, Block);
1397
PageCount = Cache.pages_per_item;
1399
nand_dbg_print(NAND_DBG_DEBUG,
1400
"%s, Line %d, Function: %s, Block: 0x%x\n",
1401
__FILE__, __LINE__, __func__, Block);
1404
for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1405
if ((pbt[i] & (~BAD_BLOCK)) == Block) {
1407
if (IS_SPARE_BLOCK(i) || IS_BAD_BLOCK(i) ||
1408
IS_DISCARDED_BLOCK(i)) {
1409
/* Add by yunpeng -2008.12.3 */
1411
GLOB_LLD_MemCopy_CMD(pData, g_temp_buf,
1412
PageCount * DeviceInfo.wPageDataSize, 0);
1416
PageCount * DeviceInfo.wPageDataSize);
1420
continue; /* break ?? */
1425
if (0xffffffff == lba)
1426
printk(KERN_ERR "FTL_Cache_Read_All: Block is not found in BT\n");
1429
wResult = GLOB_LLD_Read_Page_Main_cdma(pData, Block, Page,
1430
PageCount, LLD_CMD_FLAG_MODE_CDMA);
1431
if (DeviceInfo.MLCDevice) {
1432
g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
1433
nand_dbg_print(NAND_DBG_DEBUG,
1434
"Read Counter modified in ftl_cmd_cnt %u"
1435
" Block %u Counter%u\n",
1436
ftl_cmd_cnt, (unsigned int)Block,
1437
g_pReadCounter[Block -
1438
DeviceInfo.wSpectraStartBlock]);
1440
p_BTableChangesDelta =
1441
(struct BTableChangesDelta *)g_pBTDelta_Free;
1442
g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
1443
p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
1444
p_BTableChangesDelta->RC_Index =
1445
Block - DeviceInfo.wSpectraStartBlock;
1446
p_BTableChangesDelta->RC_Entry_Value =
1447
g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock];
1448
p_BTableChangesDelta->ValidFields = 0xC0;
1452
if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
1454
FTL_Read_Disturbance(Block);
1455
if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
1456
g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1457
FTL_Write_IN_Progress_Block_Table_Page();
1463
wResult = GLOB_LLD_Read_Page_Main(pData, Block, Page, PageCount);
1464
if (wResult == FAIL)
1467
if (DeviceInfo.MLCDevice) {
1468
g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
1469
if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
1471
FTL_Read_Disturbance(Block);
1472
if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
1473
g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1474
FTL_Write_IN_Progress_Block_Table_Page();
1481
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1482
* Function: FTL_Cache_Write_All
1483
* Inputs: pointer to cache in sys memory
1484
* address of free block in flash
1485
* Outputs: PASS=0 / FAIL=1
1486
* Description: writes all the pages of the block in cache to flash
1488
* NOTE:need to make sure this works ok when cache is limited
1489
* to a partial block. This is where copy-back would be
1490
* activated. This would require knowing which pages in the
1491
* cached block are clean/dirty.Right now we only know if
1492
* the whole block is clean/dirty.
1493
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1494
static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr)
1501
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1502
__FILE__, __LINE__, __func__);
1504
nand_dbg_print(NAND_DBG_DEBUG, "This block %d going to be written "
1505
"on %d\n", cache_block_to_write,
1506
(u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize));
1508
Block = BLK_FROM_ADDR(blk_addr);
1509
Page = PAGE_FROM_ADDR(blk_addr, Block);
1510
PageCount = Cache.pages_per_item;
1513
if (FAIL == GLOB_LLD_Write_Page_Main_cdma(pData,
1514
Block, Page, PageCount)) {
1515
nand_dbg_print(NAND_DBG_WARN,
1516
"NAND Program fail in %s, Line %d, "
1517
"Function: %s, new Bad Block %d generated! "
1518
"Need Bad Block replacing.\n",
1519
__FILE__, __LINE__, __func__, Block);
1524
if (FAIL == GLOB_LLD_Write_Page_Main(pData, Block, Page, PageCount)) {
1525
nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s,"
1526
" Line %d, Function %s, new Bad Block %d generated!"
1527
"Need Bad Block replacing.\n",
1528
__FILE__, __LINE__, __func__, Block);
1535
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1536
* Function: FTL_Copy_Block
1537
* Inputs: source block address
1538
* Destination block address
1539
* Outputs: PASS=0 / FAIL=1
1540
* Description: used only for static wear leveling to move the block
1541
* containing static data to new blocks(more worn)
1542
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1543
int FTL_Copy_Block(u64 old_blk_addr, u64 blk_addr)
1545
int i, r1, r2, wResult = PASS;
1547
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1548
__FILE__, __LINE__, __func__);
1550
for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
1551
r1 = FTL_Cache_Read_All(g_pTempBuf, old_blk_addr +
1552
i * DeviceInfo.wPageDataSize);
1553
r2 = FTL_Cache_Write_All(g_pTempBuf, blk_addr +
1554
i * DeviceInfo.wPageDataSize);
1555
if ((ERR == r1) || (FAIL == r2)) {
1564
/* Search the block table to find out the least wear block and then return it */
1565
static u32 find_least_worn_blk_for_l2_cache(void)
1568
u32 *pbt = (u32 *)g_pBlockTable;
1569
u8 least_wear_cnt = MAX_BYTE_VALUE;
1570
u32 least_wear_blk_idx = MAX_U32_VALUE;
1573
for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1574
if (IS_SPARE_BLOCK(i)) {
1575
phy_idx = (u32)((~BAD_BLOCK) & pbt[i]);
1576
if (phy_idx > DeviceInfo.wSpectraEndBlock)
1577
printk(KERN_ERR "find_least_worn_blk_for_l2_cache: "
1578
"Too big phy block num (%d)\n", phy_idx);
1579
if (g_pWearCounter[phy_idx -DeviceInfo.wSpectraStartBlock] < least_wear_cnt) {
1580
least_wear_cnt = g_pWearCounter[phy_idx - DeviceInfo.wSpectraStartBlock];
1581
least_wear_blk_idx = i;
1586
nand_dbg_print(NAND_DBG_WARN,
1587
"find_least_worn_blk_for_l2_cache: "
1588
"find block %d with least worn counter (%d)\n",
1589
least_wear_blk_idx, least_wear_cnt);
1591
return least_wear_blk_idx;
1596
/* Get blocks for Level2 Cache */
1597
static int get_l2_cache_blks(void)
1601
u32 *pbt = (u32 *)g_pBlockTable;
1603
for (n = 0; n < BLK_NUM_FOR_L2_CACHE; n++) {
1604
blk = find_least_worn_blk_for_l2_cache();
1605
if (blk >= DeviceInfo.wDataBlockNum) {
1606
nand_dbg_print(NAND_DBG_WARN,
1607
"find_least_worn_blk_for_l2_cache: "
1608
"No enough free NAND blocks (n: %d) for L2 Cache!\n", n);
1611
/* Tag the free block as discard in block table */
1612
pbt[blk] = (pbt[blk] & (~BAD_BLOCK)) | DISCARD_BLOCK;
1613
/* Add the free block to the L2 Cache block array */
1614
cache_l2.blk_array[n] = pbt[blk] & (~BAD_BLOCK);
1620
static int erase_l2_cache_blocks(void)
1623
u32 pblk, lblk = BAD_BLOCK;
1625
u32 *pbt = (u32 *)g_pBlockTable;
1627
nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1628
__FILE__, __LINE__, __func__);
1630
for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++) {
1631
pblk = cache_l2.blk_array[i];
1633
/* If the L2 cache block is invalid, then just skip it */
1634
if (MAX_U32_VALUE == pblk)
1637
BUG_ON(pblk > DeviceInfo.wSpectraEndBlock);
1639
addr = (u64)pblk << DeviceInfo.nBitsInBlockDataSize;
1640
if (PASS == GLOB_FTL_Block_Erase(addr)) {
1641
/* Get logical block number of the erased block */
1642
lblk = FTL_Get_Block_Index(pblk);
1643
BUG_ON(BAD_BLOCK == lblk);
1644
/* Tag it as free in the block table */
1645
pbt[lblk] &= (u32)(~DISCARD_BLOCK);
1646
pbt[lblk] |= (u32)(SPARE_BLOCK);
1648
MARK_BLOCK_AS_BAD(pbt[lblk]);
1657
* Merge the valid data page in the L2 cache blocks into NAND.
1659
static int flush_l2_cache(void)
1661
struct list_head *p;
1662
struct spectra_l2_cache_list *pnd, *tmp_pnd;
1663
u32 *pbt = (u32 *)g_pBlockTable;
1664
u32 phy_blk, l2_blk;
1669
nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1670
__FILE__, __LINE__, __func__);
1672
if (list_empty(&cache_l2.table.list)) /* No data to flush */
1675
//dump_cache_l2_table();
1677
if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
1678
g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1679
FTL_Write_IN_Progress_Block_Table_Page();
1682
list_for_each(p, &cache_l2.table.list) {
1683
pnd = list_entry(p, struct spectra_l2_cache_list, list);
1684
if (IS_SPARE_BLOCK(pnd->logical_blk_num) ||
1685
IS_BAD_BLOCK(pnd->logical_blk_num) ||
1686
IS_DISCARDED_BLOCK(pnd->logical_blk_num)) {
1687
nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
1688
memset(cache_l2_blk_buf, 0xff, DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize);
1690
nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
1691
phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
1692
ret = GLOB_LLD_Read_Page_Main(cache_l2_blk_buf,
1693
phy_blk, 0, DeviceInfo.wPagesPerBlock);
1695
printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
1699
for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
1700
if (pnd->pages_array[i] != MAX_U32_VALUE) {
1701
l2_blk = cache_l2.blk_array[(pnd->pages_array[i] >> 16) & 0xffff];
1702
l2_page = pnd->pages_array[i] & 0xffff;
1703
ret = GLOB_LLD_Read_Page_Main(cache_l2_page_buf, l2_blk, l2_page, 1);
1705
printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
1707
memcpy(cache_l2_blk_buf + i * DeviceInfo.wPageDataSize, cache_l2_page_buf, DeviceInfo.wPageDataSize);
1711
/* Find a free block and tag the original block as discarded */
1712
addr = (u64)pnd->logical_blk_num << DeviceInfo.nBitsInBlockDataSize;
1713
ret = FTL_Replace_Block(addr);
1715
printk(KERN_ERR "FTL_Replace_Block fail in %s, Line %d\n", __FILE__, __LINE__);
1718
/* Write back the updated data into NAND */
1719
phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
1720
if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
1721
nand_dbg_print(NAND_DBG_WARN,
1722
"Program NAND block %d fail in %s, Line %d\n",
1723
phy_blk, __FILE__, __LINE__);
1724
/* This may not be really a bad block. So just tag it as discarded. */
1725
/* Then it has a chance to be erased when garbage collection. */
1726
/* If it is really bad, then the erase will fail and it will be marked */
1727
/* as bad then. Otherwise it will be marked as free and can be used again */
1728
MARK_BLK_AS_DISCARD(pbt[pnd->logical_blk_num]);
1729
/* Find another free block and write it again */
1730
FTL_Replace_Block(addr);
1731
phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
1732
if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
1733
printk(KERN_ERR "Failed to write back block %d when flush L2 cache."
1734
"Some data will be lost!\n", phy_blk);
1735
MARK_BLOCK_AS_BAD(pbt[pnd->logical_blk_num]);
1738
/* tag the new free block as used block */
1739
pbt[pnd->logical_blk_num] &= (~SPARE_BLOCK);
1743
/* Destroy the L2 Cache table and free the memory of all nodes */
1744
list_for_each_entry_safe(pnd, tmp_pnd, &cache_l2.table.list, list) {
1745
list_del(&pnd->list);
1749
/* Erase discard L2 cache blocks */
1750
if (erase_l2_cache_blocks() != PASS)
1751
nand_dbg_print(NAND_DBG_WARN,
1752
" Erase L2 cache blocks error in %s, Line %d\n",
1753
__FILE__, __LINE__);
1755
/* Init the Level2 Cache data structure */
1756
for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
1757
cache_l2.blk_array[i] = MAX_U32_VALUE;
1758
cache_l2.cur_blk_idx = 0;
1759
cache_l2.cur_page_num = 0;
1760
INIT_LIST_HEAD(&cache_l2.table.list);
1761
cache_l2.table.logical_blk_num = MAX_U32_VALUE;
1767
* Write back a changed victim cache item to the Level2 Cache
1768
* and update the L2 Cache table to map the change.
1769
* If the L2 Cache is full, then start to do the L2 Cache flush.
1771
static int write_back_to_l2_cache(u8 *buf, u64 logical_addr)
1773
u32 logical_blk_num;
1774
u16 logical_page_num;
1775
struct list_head *p;
1776
struct spectra_l2_cache_list *pnd, *pnd_new;
1780
nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
1781
__FILE__, __LINE__, __func__);
1784
* If Level2 Cache table is empty, then it means either:
1785
* 1. This is the first time that the function called after FTL_init
1787
* 2. The Level2 Cache has just been flushed
1789
* So, 'steal' some free blocks from NAND for L2 Cache using
1790
* by just mask them as discard in the block table
1792
if (list_empty(&cache_l2.table.list)) {
1793
BUG_ON(cache_l2.cur_blk_idx != 0);
1794
BUG_ON(cache_l2.cur_page_num!= 0);
1795
BUG_ON(cache_l2.table.logical_blk_num != MAX_U32_VALUE);
1796
if (FAIL == get_l2_cache_blks()) {
1797
GLOB_FTL_Garbage_Collection();
1798
if (FAIL == get_l2_cache_blks()) {
1799
printk(KERN_ALERT "Fail to get L2 cache blks!\n");
1805
logical_blk_num = BLK_FROM_ADDR(logical_addr);
1806
logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
1807
BUG_ON(logical_blk_num == MAX_U32_VALUE);
1809
/* Write the cache item data into the current position of L2 Cache */
1815
if (FAIL == GLOB_LLD_Write_Page_Main(buf,
1816
cache_l2.blk_array[cache_l2.cur_blk_idx],
1817
cache_l2.cur_page_num, 1)) {
1818
nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
1819
"%s, Line %d, new Bad Block %d generated!\n",
1821
cache_l2.blk_array[cache_l2.cur_blk_idx]);
1823
/* TODO: tag the current block as bad and try again */
1830
* Update the L2 Cache table.
1832
* First seaching in the table to see whether the logical block
1833
* has been mapped. If not, then kmalloc a new node for the
1834
* logical block, fill data, and then insert it to the list.
1835
* Otherwise, just update the mapped node directly.
1838
list_for_each(p, &cache_l2.table.list) {
1839
pnd = list_entry(p, struct spectra_l2_cache_list, list);
1840
if (pnd->logical_blk_num == logical_blk_num) {
1841
pnd->pages_array[logical_page_num] =
1842
(cache_l2.cur_blk_idx << 16) |
1843
cache_l2.cur_page_num;
1848
if (!found) { /* Create new node for the logical block here */
1850
/* The logical pages to physical pages map array is
1851
* located at the end of struct spectra_l2_cache_list.
1853
node_size = sizeof(struct spectra_l2_cache_list) +
1854
sizeof(u32) * DeviceInfo.wPagesPerBlock;
1855
pnd_new = kmalloc(node_size, GFP_ATOMIC);
1857
printk(KERN_ERR "Failed to kmalloc in %s Line %d\n",
1858
__FILE__, __LINE__);
1860
* TODO: Need to flush all the L2 cache into NAND ASAP
1861
* since no memory available here
1864
pnd_new->logical_blk_num = logical_blk_num;
1865
for (i = 0; i < DeviceInfo.wPagesPerBlock; i++)
1866
pnd_new->pages_array[i] = MAX_U32_VALUE;
1867
pnd_new->pages_array[logical_page_num] =
1868
(cache_l2.cur_blk_idx << 16) | cache_l2.cur_page_num;
1869
list_add(&pnd_new->list, &cache_l2.table.list);
1872
/* Increasing the current position pointer of the L2 Cache */
1873
cache_l2.cur_page_num++;
1874
if (cache_l2.cur_page_num >= DeviceInfo.wPagesPerBlock) {
1875
cache_l2.cur_blk_idx++;
1876
if (cache_l2.cur_blk_idx >= BLK_NUM_FOR_L2_CACHE) {
1877
/* The L2 Cache is full. Need to flush it now */
1878
nand_dbg_print(NAND_DBG_WARN,
1879
"L2 Cache is full, will start to flush it\n");
1882
cache_l2.cur_page_num = 0;
1890
* Seach in the Level2 Cache table to find the cache item.
1891
* If find, read the data from the NAND page of L2 Cache,
1892
* Otherwise, return FAIL.
1894
static int search_l2_cache(u8 *buf, u64 logical_addr)
1896
u32 logical_blk_num;
1897
u16 logical_page_num;
1898
struct list_head *p;
1899
struct spectra_l2_cache_list *pnd;
1900
u32 tmp = MAX_U32_VALUE;
1905
logical_blk_num = BLK_FROM_ADDR(logical_addr);
1906
logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
1908
list_for_each(p, &cache_l2.table.list) {
1909
pnd = list_entry(p, struct spectra_l2_cache_list, list);
1910
if (pnd->logical_blk_num == logical_blk_num) {
1911
tmp = pnd->pages_array[logical_page_num];
1916
if (tmp != MAX_U32_VALUE) { /* Found valid map */
1917
phy_blk = cache_l2.blk_array[(tmp >> 16) & 0xFFFF];
1918
phy_page = tmp & 0xFFFF;
1922
ret = GLOB_LLD_Read_Page_Main(buf, phy_blk, phy_page, 1);
1929
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1930
* Function: FTL_Cache_Write_Page
1931
* Inputs: Pointer to buffer, page address, cache block number
1932
* Outputs: PASS=0 / FAIL=1
1933
* Description: It writes the data in Cache Block
1934
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1935
static void FTL_Cache_Write_Page(u8 *pData, u64 page_addr,
1936
u8 cache_blk, u16 flag)
1941
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1942
__FILE__, __LINE__, __func__);
1944
addr = Cache.array[cache_blk].address;
1945
pDest = Cache.array[cache_blk].buf;
1947
pDest += (unsigned long)(page_addr - addr);
1948
Cache.array[cache_blk].changed = SET;
1950
#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1951
int_cache[ftl_cmd_cnt].item = cache_blk;
1952
int_cache[ftl_cmd_cnt].cache.address =
1953
Cache.array[cache_blk].address;
1954
int_cache[ftl_cmd_cnt].cache.changed =
1955
Cache.array[cache_blk].changed;
1957
GLOB_LLD_MemCopy_CMD(pDest, pData, DeviceInfo.wPageDataSize, flag);
1960
memcpy(pDest, pData, DeviceInfo.wPageDataSize);
1962
if (Cache.array[cache_blk].use_cnt < MAX_WORD_VALUE)
1963
Cache.array[cache_blk].use_cnt++;
1966
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1967
* Function: FTL_Cache_Write
1969
* Outputs: PASS=0 / FAIL=1
1970
* Description: It writes least frequently used Cache block to flash if it
1972
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1973
static int FTL_Cache_Write(void)
1975
int i, bResult = PASS;
1976
u16 bNO, least_count = 0xFFFF;
1978
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1979
__FILE__, __LINE__, __func__);
1981
FTL_Calculate_LRU();
1984
nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: "
1985
"Least used cache block is %d\n", bNO);
1987
if (Cache.array[bNO].changed != SET)
1990
nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: Cache"
1991
" Block %d containing logical block %d is dirty\n",
1993
(u32)(Cache.array[bNO].address >>
1994
DeviceInfo.nBitsInBlockDataSize));
1996
#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1997
int_cache[ftl_cmd_cnt].item = bNO;
1998
int_cache[ftl_cmd_cnt].cache.address =
1999
Cache.array[bNO].address;
2000
int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
2003
bResult = write_back_to_l2_cache(Cache.array[bNO].buf,
2004
Cache.array[bNO].address);
2006
Cache.array[bNO].changed = CLEAR;
2008
least_count = Cache.array[bNO].use_cnt;
2010
for (i = 0; i < CACHE_ITEM_NUM; i++) {
2013
if (Cache.array[i].use_cnt > 0)
2014
Cache.array[i].use_cnt -= least_count;
2020
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2021
* Function: FTL_Cache_Read
2022
* Inputs: Page address
2023
* Outputs: PASS=0 / FAIL=1
2024
* Description: It reads the block from device in Cache Block
2025
* Set the LRU count to 1
2026
* Mark the Cache Block as clean
2027
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2028
static int FTL_Cache_Read(u64 logical_addr)
2030
u64 item_addr, phy_addr;
2034
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2035
__FILE__, __LINE__, __func__);
2037
num = Cache.LRU; /* The LRU cache item will be overwritten */
2039
item_addr = (u64)GLOB_u64_Div(logical_addr, Cache.cache_item_size) *
2040
Cache.cache_item_size;
2041
Cache.array[num].address = item_addr;
2042
Cache.array[num].use_cnt = 1;
2043
Cache.array[num].changed = CLEAR;
2046
#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
2047
int_cache[ftl_cmd_cnt].item = num;
2048
int_cache[ftl_cmd_cnt].cache.address =
2049
Cache.array[num].address;
2050
int_cache[ftl_cmd_cnt].cache.changed =
2051
Cache.array[num].changed;
2055
* Search in L2 Cache. If hit, fill data into L1 Cache item buffer,
2056
* Otherwise, read it from NAND
2058
ret = search_l2_cache(Cache.array[num].buf, logical_addr);
2059
if (PASS == ret) /* Hit in L2 Cache */
2062
/* Compute the physical start address of NAND device according to */
2063
/* the logical start address of the cache item (LRU cache item) */
2064
phy_addr = FTL_Get_Physical_Block_Addr(item_addr) +
2065
GLOB_u64_Remainder(item_addr, 2);
2067
return FTL_Cache_Read_All(Cache.array[num].buf, phy_addr);
2070
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2071
* Function: FTL_Check_Block_Table
2073
* Outputs: PASS=0 / FAIL=1
2074
* Description: It checks the correctness of each block table entry
2075
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2076
static int FTL_Check_Block_Table(int wOldTable)
2081
u32 *pbt = (u32 *)g_pBlockTable;
2082
u8 *pFlag = flag_check_blk_table;
2084
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2085
__FILE__, __LINE__, __func__);
2087
if (NULL != pFlag) {
2088
memset(pFlag, FAIL, DeviceInfo.wDataBlockNum);
2089
for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
2090
blk_idx = (u32)(pbt[i] & (~BAD_BLOCK));
2093
* 20081006/KBV - Changed to pFlag[i] reference
2094
* to avoid buffer overflow
2098
* 2008-10-20 Yunpeng Note: This change avoid
2099
* buffer overflow, but changed function of
2100
* the code, so it should be re-write later
2102
if ((blk_idx > DeviceInfo.wSpectraEndBlock) ||
2116
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2117
* Function: FTL_Write_Block_Table
2119
* Outputs: 0=Block Table was updated. No write done. 1=Block write needs to
2121
* Description: It writes the block table
2122
* Block table always mapped to LBA 0 which inturn mapped
2123
* to any physical block
2124
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2125
static int FTL_Write_Block_Table(int wForce)
2127
u32 *pbt = (u32 *)g_pBlockTable;
2128
int wSuccess = PASS;
2129
u32 wTempBlockTableIndex;
2130
u16 bt_pages, new_bt_offset;
2131
u8 blockchangeoccured = 0;
2133
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2134
__FILE__, __LINE__, __func__);
2136
bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
2138
if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus)
2141
if (PASS == wForce) {
2142
g_wBlockTableOffset =
2143
(u16)(DeviceInfo.wPagesPerBlock - bt_pages);
2145
p_BTableChangesDelta =
2146
(struct BTableChangesDelta *)g_pBTDelta_Free;
2147
g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2149
p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
2150
p_BTableChangesDelta->g_wBlockTableOffset =
2151
g_wBlockTableOffset;
2152
p_BTableChangesDelta->ValidFields = 0x01;
2156
nand_dbg_print(NAND_DBG_DEBUG,
2157
"Inside FTL_Write_Block_Table: block %d Page:%d\n",
2158
g_wBlockTableIndex, g_wBlockTableOffset);
2161
new_bt_offset = g_wBlockTableOffset + bt_pages + 1;
2162
if ((0 == (new_bt_offset % DeviceInfo.wPagesPerBlock)) ||
2163
(new_bt_offset > DeviceInfo.wPagesPerBlock) ||
2164
(FAIL == wSuccess)) {
2165
wTempBlockTableIndex = FTL_Replace_Block_Table();
2166
if (BAD_BLOCK == wTempBlockTableIndex)
2168
if (!blockchangeoccured) {
2169
bt_block_changed = 1;
2170
blockchangeoccured = 1;
2173
g_wBlockTableIndex = wTempBlockTableIndex;
2174
g_wBlockTableOffset = 0;
2175
pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
2177
p_BTableChangesDelta =
2178
(struct BTableChangesDelta *)g_pBTDelta_Free;
2179
g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2181
p_BTableChangesDelta->ftl_cmd_cnt =
2183
p_BTableChangesDelta->g_wBlockTableOffset =
2184
g_wBlockTableOffset;
2185
p_BTableChangesDelta->g_wBlockTableIndex =
2187
p_BTableChangesDelta->ValidFields = 0x03;
2189
p_BTableChangesDelta =
2190
(struct BTableChangesDelta *)g_pBTDelta_Free;
2192
sizeof(struct BTableChangesDelta);
2194
p_BTableChangesDelta->ftl_cmd_cnt =
2196
p_BTableChangesDelta->BT_Index =
2198
p_BTableChangesDelta->BT_Entry_Value =
2199
pbt[BLOCK_TABLE_INDEX];
2200
p_BTableChangesDelta->ValidFields = 0x0C;
2204
wSuccess = FTL_Write_Block_Table_Data();
2205
if (FAIL == wSuccess)
2206
MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
2207
} while (FAIL == wSuccess);
2209
g_cBlockTableStatus = CURRENT_BLOCK_TABLE;
2214
static int force_format_nand(void)
2218
/* Force erase the whole unprotected physical partiton of NAND */
2219
printk(KERN_ALERT "Start to force erase whole NAND device ...\n");
2220
printk(KERN_ALERT "From phyical block %d to %d\n",
2221
DeviceInfo.wSpectraStartBlock, DeviceInfo.wSpectraEndBlock);
2222
for (i = DeviceInfo.wSpectraStartBlock; i <= DeviceInfo.wSpectraEndBlock; i++) {
2223
if (GLOB_LLD_Erase_Block(i))
2224
printk(KERN_ERR "Failed to force erase NAND block %d\n", i);
2226
printk(KERN_ALERT "Force Erase ends. Please reboot the system ...\n");
2232
int GLOB_FTL_Flash_Format(void)
2234
//return FTL_Format_Flash(1);
2235
return force_format_nand();
2239
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2240
* Function: FTL_Search_Block_Table_IN_Block
2241
* Inputs: Block Number
2243
* Outputs: PASS / FAIL
2244
* Page contatining the block table
2245
* Description: It searches the block table in the block
2246
* passed as an argument.
2248
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2249
static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
2250
u8 BT_Tag, u16 *Page)
2257
u8 *tempbuf = tmp_buf_search_bt_in_block;
2258
u8 *pSpareBuf = spare_buf_search_bt_in_block;
2259
u8 *pSpareBufBTLastPage = spare_buf_bt_search_bt_in_block;
2260
u8 bt_flag_last_page = 0xFF;
2261
u8 search_in_previous_pages = 0;
2264
nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2265
__FILE__, __LINE__, __func__);
2267
nand_dbg_print(NAND_DBG_DEBUG,
2268
"Searching block table in %u block\n",
2269
(unsigned int)BT_Block);
2271
bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
2273
for (i = bt_pages; i < DeviceInfo.wPagesPerBlock;
2274
i += (bt_pages + 1)) {
2275
nand_dbg_print(NAND_DBG_DEBUG,
2276
"Searching last IPF: %d\n", i);
2277
Result = GLOB_LLD_Read_Page_Main_Polling(tempbuf,
2280
if (0 == memcmp(tempbuf, g_pIPF, DeviceInfo.wPageDataSize)) {
2281
if ((i + bt_pages + 1) < DeviceInfo.wPagesPerBlock) {
2284
search_in_previous_pages = 1;
2289
if (!search_in_previous_pages) {
2290
if (i != bt_pages) {
2291
i -= (bt_pages + 1);
2299
if (!search_in_previous_pages) {
2301
nand_dbg_print(NAND_DBG_DEBUG,
2302
"Reading the spare area of Block %u Page %u",
2303
(unsigned int)BT_Block, i);
2304
Result = GLOB_LLD_Read_Page_Spare(pSpareBuf,
2306
nand_dbg_print(NAND_DBG_DEBUG,
2307
"Reading the spare area of Block %u Page %u",
2308
(unsigned int)BT_Block, i + bt_pages - 1);
2309
Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
2310
BT_Block, i + bt_pages - 1, 1);
2313
j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
2315
for (; k < j; k++) {
2316
if (tagarray[k] == BT_Tag)
2322
bt_flag = tagarray[k];
2326
if (Result == PASS) {
2328
j = FTL_Extract_Block_Table_Tag(
2329
pSpareBufBTLastPage, &tagarray);
2331
for (; k < j; k++) {
2332
if (tagarray[k] == BT_Tag)
2338
bt_flag_last_page = tagarray[k];
2342
if (Result == PASS) {
2343
if (bt_flag == bt_flag_last_page) {
2344
nand_dbg_print(NAND_DBG_DEBUG,
2345
"Block table is found"
2346
" in page after IPF "
2352
g_cBlockTableStatus =
2353
CURRENT_BLOCK_TABLE;
2362
if (search_in_previous_pages)
2365
i = i - (bt_pages + 1);
2369
nand_dbg_print(NAND_DBG_DEBUG,
2370
"Reading the spare area of Block %d Page %d",
2373
Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
2374
nand_dbg_print(NAND_DBG_DEBUG,
2375
"Reading the spare area of Block %u Page %u",
2376
(unsigned int)BT_Block, i + bt_pages - 1);
2378
Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
2379
BT_Block, i + bt_pages - 1, 1);
2382
j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
2384
for (; k < j; k++) {
2385
if (tagarray[k] == BT_Tag)
2391
bt_flag = tagarray[k];
2395
if (Result == PASS) {
2397
j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
2400
for (; k < j; k++) {
2401
if (tagarray[k] == BT_Tag)
2407
bt_flag_last_page = tagarray[k];
2413
if (Result == PASS) {
2414
if (bt_flag == bt_flag_last_page) {
2415
nand_dbg_print(NAND_DBG_DEBUG,
2416
"Block table is found "
2417
"in page prior to IPF "
2418
"at block %u page %d\n",
2419
(unsigned int)BT_Block, i);
2422
g_cBlockTableStatus =
2423
IN_PROGRESS_BLOCK_TABLE;
2433
if (Result == FAIL) {
2434
if ((Last_IPF > bt_pages) && (i < Last_IPF) && (!BT_Found)) {
2436
*Page = i - (bt_pages + 1);
2438
if ((Last_IPF == bt_pages) && (i < Last_IPF) && (!BT_Found))
2442
if (Last_IPF == 0) {
2445
nand_dbg_print(NAND_DBG_DEBUG, "Reading the spare area of "
2446
"Block %u Page %u", (unsigned int)BT_Block, i);
2448
Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
2449
nand_dbg_print(NAND_DBG_DEBUG,
2450
"Reading the spare area of Block %u Page %u",
2451
(unsigned int)BT_Block, i + bt_pages - 1);
2452
Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
2453
BT_Block, i + bt_pages - 1, 1);
2456
j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
2458
for (; k < j; k++) {
2459
if (tagarray[k] == BT_Tag)
2465
bt_flag = tagarray[k];
2469
if (Result == PASS) {
2471
j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
2474
for (; k < j; k++) {
2475
if (tagarray[k] == BT_Tag)
2481
bt_flag_last_page = tagarray[k];
2485
if (Result == PASS) {
2486
if (bt_flag == bt_flag_last_page) {
2487
nand_dbg_print(NAND_DBG_DEBUG,
2488
"Block table is found "
2489
"in page after IPF at "
2490
"block %u page %u\n",
2491
(unsigned int)BT_Block,
2495
g_cBlockTableStatus =
2496
CURRENT_BLOCK_TABLE;
2511
u8 *get_blk_table_start_addr(void)
2513
return g_pBlockTable;
2516
unsigned long get_blk_table_len(void)
2518
return DeviceInfo.wDataBlockNum * sizeof(u32);
2521
u8 *get_wear_leveling_table_start_addr(void)
2523
return g_pWearCounter;
2526
unsigned long get_wear_leveling_table_len(void)
2528
return DeviceInfo.wDataBlockNum * sizeof(u8);
2531
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2532
* Function: FTL_Read_Block_Table
2534
* Outputs: PASS / FAIL
2535
* Description: read the flash spare area and find a block containing the
2536
* most recent block table(having largest block_table_counter).
2537
* Find the last written Block table in this block.
2538
* Check the correctness of Block Table
2539
* If CDMA is enabled, this function is called in
2541
* We don't need to store changes in Block table in this
2542
* function as it is called only at initialization
2544
* Note: Currently this function is called at initialization
2545
* before any read/erase/write command issued to flash so,
2546
* there is no need to wait for CDMA list to complete as of now
2547
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2548
static int FTL_Read_Block_Table(void)
2552
u8 *tempBuf, *tagarray;
2555
u8 block_table_found = 0;
2561
int wBytesCopied = 0, tempvar;
2563
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2564
__FILE__, __LINE__, __func__);
2566
tempBuf = tmp_buf1_read_blk_table;
2567
bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
2569
for (j = DeviceInfo.wSpectraStartBlock;
2570
j <= (int)DeviceInfo.wSpectraEndBlock;
2572
status = GLOB_LLD_Read_Page_Spare(tempBuf, j, 0, 1);
2574
i = FTL_Extract_Block_Table_Tag(tempBuf, &tagarray);
2576
status = GLOB_LLD_Read_Page_Main_Polling(tempBuf,
2578
for (; k < i; k++) {
2579
if (tagarray[k] == tempBuf[3])
2589
nand_dbg_print(NAND_DBG_DEBUG,
2590
"Block table is contained in Block %d %d\n",
2591
(unsigned int)j, (unsigned int)k);
2593
if (g_pBTBlocks[k-FIRST_BT_ID] == BTBLOCK_INVAL) {
2594
g_pBTBlocks[k-FIRST_BT_ID] = j;
2595
block_table_found = 1;
2597
printk(KERN_ERR "FTL_Read_Block_Table -"
2598
"This should never happens. "
2599
"Two block table have same counter %u!\n", k);
2603
if (block_table_found) {
2604
if (g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL &&
2605
g_pBTBlocks[LAST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) {
2607
while ((j > FIRST_BT_ID) &&
2608
(g_pBTBlocks[j - FIRST_BT_ID] != BTBLOCK_INVAL))
2610
if (j == FIRST_BT_ID) {
2612
last_erased = LAST_BT_ID;
2614
last_erased = (u8)j + 1;
2615
while ((j > FIRST_BT_ID) && (BTBLOCK_INVAL ==
2616
g_pBTBlocks[j - FIRST_BT_ID]))
2621
while (g_pBTBlocks[j - FIRST_BT_ID] == BTBLOCK_INVAL)
2623
last_erased = (u8)j;
2624
while ((j < LAST_BT_ID) && (BTBLOCK_INVAL !=
2625
g_pBTBlocks[j - FIRST_BT_ID]))
2627
if (g_pBTBlocks[j-FIRST_BT_ID] == BTBLOCK_INVAL)
2631
if (last_erased > j)
2632
j += (1 + LAST_BT_ID - FIRST_BT_ID);
2634
for (; (j >= last_erased) && (FAIL == wResult); j--) {
2635
i = (j - FIRST_BT_ID) %
2636
(1 + LAST_BT_ID - FIRST_BT_ID);
2638
FTL_Search_Block_Table_IN_Block(g_pBTBlocks[i],
2639
i + FIRST_BT_ID, &Page);
2640
if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
2641
block_table_found = 0;
2643
while ((search_result == PASS) && (FAIL == wResult)) {
2644
nand_dbg_print(NAND_DBG_DEBUG,
2645
"FTL_Read_Block_Table:"
2646
"Block: %u Page: %u "
2647
"contains block table\n",
2648
(unsigned int)g_pBTBlocks[i],
2649
(unsigned int)Page);
2651
tempBuf = tmp_buf2_read_blk_table;
2653
for (k = 0; k < bt_pages; k++) {
2654
Block = g_pBTBlocks[i];
2658
GLOB_LLD_Read_Page_Main_Polling(
2659
tempBuf, Block, Page, PageCount);
2661
tempvar = k ? 0 : 4;
2664
FTL_Copy_Block_Table_From_Flash(
2666
DeviceInfo.wPageDataSize - tempvar,
2672
wResult = FTL_Check_Block_Table(FAIL);
2673
if (FAIL == wResult) {
2674
block_table_found = 0;
2675
if (Page > bt_pages)
2676
Page -= ((bt_pages<<1) + 1);
2678
search_result = FAIL;
2684
if (PASS == wResult) {
2685
if (!block_table_found)
2686
FTL_Execute_SPL_Recovery();
2688
if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
2689
g_wBlockTableOffset = (u16)Page + 1;
2691
g_wBlockTableOffset = (u16)Page - bt_pages;
2693
g_wBlockTableIndex = (u32)g_pBTBlocks[i];
2696
if (DeviceInfo.MLCDevice)
2697
memcpy(g_pBTStartingCopy, g_pBlockTable,
2698
DeviceInfo.wDataBlockNum * sizeof(u32)
2699
+ DeviceInfo.wDataBlockNum * sizeof(u8)
2700
+ DeviceInfo.wDataBlockNum * sizeof(u16));
2702
memcpy(g_pBTStartingCopy, g_pBlockTable,
2703
DeviceInfo.wDataBlockNum * sizeof(u32)
2704
+ DeviceInfo.wDataBlockNum * sizeof(u8));
2708
if (FAIL == wResult)
2709
printk(KERN_ERR "Yunpeng - "
2710
"Can not find valid spectra block table!\n");
2712
#if AUTO_FORMAT_FLASH
2713
if (FAIL == wResult) {
2714
nand_dbg_print(NAND_DBG_DEBUG, "doing auto-format\n");
2715
wResult = FTL_Format_Flash(0);
2722
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2723
* Function: FTL_Get_Page_Num
2724
* Inputs: Size in bytes
2725
* Outputs: Size in pages
2726
* Description: It calculates the pages required for the length passed
2727
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2728
static u32 FTL_Get_Page_Num(u64 length)
2730
return (u32)((length >> DeviceInfo.nBitsInPageDataSize) +
2731
(GLOB_u64_Remainder(length , 1) > 0 ? 1 : 0));
2734
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2735
* Function: FTL_Get_Physical_Block_Addr
2736
* Inputs: Block Address (byte format)
2737
* Outputs: Physical address of the block.
2738
* Description: It translates LBA to PBA by returning address stored
2739
* at the LBA location in the block table
2740
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2741
static u64 FTL_Get_Physical_Block_Addr(u64 logical_addr)
2746
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2747
__FILE__, __LINE__, __func__);
2749
pbt = (u32 *)g_pBlockTable;
2750
physical_addr = (u64) DeviceInfo.wBlockDataSize *
2751
(pbt[BLK_FROM_ADDR(logical_addr)] & (~BAD_BLOCK));
2753
return physical_addr;
2756
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2757
* Function: FTL_Get_Block_Index
2758
* Inputs: Physical Block no.
2759
* Outputs: Logical block no. /BAD_BLOCK
2760
* Description: It returns the logical block no. for the PBA passed
2761
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2762
static u32 FTL_Get_Block_Index(u32 wBlockNum)
2764
u32 *pbt = (u32 *)g_pBlockTable;
2767
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2768
__FILE__, __LINE__, __func__);
2770
for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
2771
if (wBlockNum == (pbt[i] & (~BAD_BLOCK)))
2777
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2778
* Function: GLOB_FTL_Wear_Leveling
2781
* Description: This is static wear leveling (done by explicit call)
2782
* do complete static wear leveling
2783
* do complete garbage collection
2784
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2785
int GLOB_FTL_Wear_Leveling(void)
2787
nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2788
__FILE__, __LINE__, __func__);
2790
FTL_Static_Wear_Leveling();
2791
GLOB_FTL_Garbage_Collection();
2796
static void find_least_most_worn(u8 *chg,
2797
u32 *least_idx, u8 *least_cnt,
2798
u32 *most_idx, u8 *most_cnt)
2800
u32 *pbt = (u32 *)g_pBlockTable;
2805
for (i = BLOCK_TABLE_INDEX + 1; i < DeviceInfo.wDataBlockNum; i++) {
2806
if (IS_BAD_BLOCK(i) || PASS == chg[i])
2809
idx = (u32) ((~BAD_BLOCK) & pbt[i]);
2810
cnt = g_pWearCounter[idx - DeviceInfo.wSpectraStartBlock];
2812
if (IS_SPARE_BLOCK(i)) {
2813
if (cnt > *most_cnt) {
2819
if (IS_DATA_BLOCK(i)) {
2820
if (cnt < *least_cnt) {
2826
if (PASS == chg[*most_idx] || PASS == chg[*least_idx]) {
2827
debug_boundary_error(*most_idx,
2828
DeviceInfo.wDataBlockNum, 0);
2829
debug_boundary_error(*least_idx,
2830
DeviceInfo.wDataBlockNum, 0);
2836
static int move_blks_for_wear_leveling(u8 *chg,
2837
u32 *least_idx, u32 *rep_blk_num, int *result)
2839
u32 *pbt = (u32 *)g_pBlockTable;
2841
int j, ret_cp_blk, ret_erase;
2844
chg[*least_idx] = PASS;
2845
debug_boundary_error(*least_idx, DeviceInfo.wDataBlockNum, 0);
2847
rep_blk = FTL_Replace_MWBlock();
2848
if (rep_blk != BAD_BLOCK) {
2849
nand_dbg_print(NAND_DBG_DEBUG,
2850
"More than two spare blocks exist so do it\n");
2851
nand_dbg_print(NAND_DBG_DEBUG, "Block Replaced is %d\n",
2854
chg[rep_blk] = PASS;
2856
if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
2857
g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
2858
FTL_Write_IN_Progress_Block_Table_Page();
2861
for (j = 0; j < RETRY_TIMES; j++) {
2862
ret_cp_blk = FTL_Copy_Block((u64)(*least_idx) *
2863
DeviceInfo.wBlockDataSize,
2864
(u64)rep_blk * DeviceInfo.wBlockDataSize);
2865
if (FAIL == ret_cp_blk) {
2866
ret_erase = GLOB_FTL_Block_Erase((u64)rep_blk
2867
* DeviceInfo.wBlockDataSize);
2868
if (FAIL == ret_erase)
2869
MARK_BLOCK_AS_BAD(pbt[rep_blk]);
2871
nand_dbg_print(NAND_DBG_DEBUG,
2872
"FTL_Copy_Block == OK\n");
2877
if (j < RETRY_TIMES) {
2879
u32 old_idx = FTL_Get_Block_Index(*least_idx);
2880
u32 rep_idx = FTL_Get_Block_Index(rep_blk);
2881
tmp = (u32)(DISCARD_BLOCK | pbt[old_idx]);
2882
pbt[old_idx] = (u32)((~SPARE_BLOCK) &
2886
p_BTableChangesDelta = (struct BTableChangesDelta *)
2888
g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2889
p_BTableChangesDelta->ftl_cmd_cnt =
2891
p_BTableChangesDelta->BT_Index = old_idx;
2892
p_BTableChangesDelta->BT_Entry_Value = pbt[old_idx];
2893
p_BTableChangesDelta->ValidFields = 0x0C;
2895
p_BTableChangesDelta = (struct BTableChangesDelta *)
2897
g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2899
p_BTableChangesDelta->ftl_cmd_cnt =
2901
p_BTableChangesDelta->BT_Index = rep_idx;
2902
p_BTableChangesDelta->BT_Entry_Value = pbt[rep_idx];
2903
p_BTableChangesDelta->ValidFields = 0x0C;
2906
pbt[FTL_Get_Block_Index(rep_blk)] |= BAD_BLOCK;
2908
p_BTableChangesDelta = (struct BTableChangesDelta *)
2910
g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2912
p_BTableChangesDelta->ftl_cmd_cnt =
2914
p_BTableChangesDelta->BT_Index =
2915
FTL_Get_Block_Index(rep_blk);
2916
p_BTableChangesDelta->BT_Entry_Value =
2917
pbt[FTL_Get_Block_Index(rep_blk)];
2918
p_BTableChangesDelta->ValidFields = 0x0C;
2924
if (((*rep_blk_num)++) > WEAR_LEVELING_BLOCK_NUM)
2927
printk(KERN_ERR "Less than 3 spare blocks exist so quit\n");
2934
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2935
* Function: FTL_Static_Wear_Leveling
2937
* Outputs: PASS=0 / FAIL=1
2938
* Description: This is static wear leveling (done by explicit call)
2939
* search for most&least used
2940
* if difference < GATE:
2941
* update the block table with exhange
2942
* mark block table in flash as IN_PROGRESS
2944
* the caller should handle GC clean up after calling this function
2945
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2946
int FTL_Static_Wear_Leveling(void)
2954
u32 replaced_blks = 0;
2955
u8 *chang_flag = flags_static_wear_leveling;
2957
nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2958
__FILE__, __LINE__, __func__);
2963
memset(chang_flag, FAIL, DeviceInfo.wDataBlockNum);
2964
while (go_on == PASS) {
2965
nand_dbg_print(NAND_DBG_DEBUG,
2966
"starting static wear leveling\n");
2968
least_worn_cnt = 0xFF;
2969
least_worn_idx = BLOCK_TABLE_INDEX;
2970
most_worn_idx = BLOCK_TABLE_INDEX;
2972
find_least_most_worn(chang_flag, &least_worn_idx,
2973
&least_worn_cnt, &most_worn_idx, &most_worn_cnt);
2975
nand_dbg_print(NAND_DBG_DEBUG,
2976
"Used and least worn is block %u, whos count is %u\n",
2977
(unsigned int)least_worn_idx,
2978
(unsigned int)least_worn_cnt);
2980
nand_dbg_print(NAND_DBG_DEBUG,
2981
"Free and most worn is block %u, whos count is %u\n",
2982
(unsigned int)most_worn_idx,
2983
(unsigned int)most_worn_cnt);
2985
if ((most_worn_cnt > least_worn_cnt) &&
2986
(most_worn_cnt - least_worn_cnt > WEAR_LEVELING_GATE))
2987
go_on = move_blks_for_wear_leveling(chang_flag,
2988
&least_worn_idx, &replaced_blks, &result);
2997
static int do_garbage_collection(u32 discard_cnt)
2999
u32 *pbt = (u32 *)g_pBlockTable;
3001
u8 bt_block_erased = 0;
3002
int i, cnt, ret = FAIL;
3006
while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0) &&
3007
((ftl_cmd_cnt + 28) < 256)) {
3008
if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
3009
(pbt[i] & DISCARD_BLOCK)) {
3010
if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
3011
g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3012
FTL_Write_IN_Progress_Block_Table_Page();
3015
addr = FTL_Get_Physical_Block_Addr((u64)i *
3016
DeviceInfo.wBlockDataSize);
3017
pba = BLK_FROM_ADDR(addr);
3019
for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
3020
if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
3021
nand_dbg_print(NAND_DBG_DEBUG,
3022
"GC will erase BT block %u\n",
3026
bt_block_erased = 1;
3031
if (bt_block_erased) {
3032
bt_block_erased = 0;
3036
addr = FTL_Get_Physical_Block_Addr((u64)i *
3037
DeviceInfo.wBlockDataSize);
3039
if (PASS == GLOB_FTL_Block_Erase(addr)) {
3040
pbt[i] &= (u32)(~DISCARD_BLOCK);
3041
pbt[i] |= (u32)(SPARE_BLOCK);
3042
p_BTableChangesDelta =
3043
(struct BTableChangesDelta *)
3046
sizeof(struct BTableChangesDelta);
3047
p_BTableChangesDelta->ftl_cmd_cnt =
3049
p_BTableChangesDelta->BT_Index = i;
3050
p_BTableChangesDelta->BT_Entry_Value = pbt[i];
3051
p_BTableChangesDelta->ValidFields = 0x0C;
3055
MARK_BLOCK_AS_BAD(pbt[i]);
3066
static int do_garbage_collection(u32 discard_cnt)
3068
u32 *pbt = (u32 *)g_pBlockTable;
3070
u8 bt_block_erased = 0;
3071
int i, cnt, ret = FAIL;
3075
while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0)) {
3076
if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
3077
(pbt[i] & DISCARD_BLOCK)) {
3078
if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
3079
g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3080
FTL_Write_IN_Progress_Block_Table_Page();
3083
addr = FTL_Get_Physical_Block_Addr((u64)i *
3084
DeviceInfo.wBlockDataSize);
3085
pba = BLK_FROM_ADDR(addr);
3087
for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
3088
if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
3089
nand_dbg_print(NAND_DBG_DEBUG,
3090
"GC will erase BT block %d\n",
3094
bt_block_erased = 1;
3099
if (bt_block_erased) {
3100
bt_block_erased = 0;
3104
/* If the discard block is L2 cache block, then just skip it */
3105
for (cnt = 0; cnt < BLK_NUM_FOR_L2_CACHE; cnt++) {
3106
if (cache_l2.blk_array[cnt] == pba) {
3107
nand_dbg_print(NAND_DBG_DEBUG,
3108
"GC will erase L2 cache blk %d\n",
3113
if (cnt < BLK_NUM_FOR_L2_CACHE) { /* Skip it */
3119
addr = FTL_Get_Physical_Block_Addr((u64)i *
3120
DeviceInfo.wBlockDataSize);
3122
if (PASS == GLOB_FTL_Block_Erase(addr)) {
3123
pbt[i] &= (u32)(~DISCARD_BLOCK);
3124
pbt[i] |= (u32)(SPARE_BLOCK);
3128
MARK_BLOCK_AS_BAD(pbt[i]);
3139
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3140
* Function: GLOB_FTL_Garbage_Collection
3142
* Outputs: PASS / FAIL (returns the number of un-erased blocks
3143
* Description: search the block table for all discarded blocks to erase
3144
* for each discarded block:
3145
* set the flash block to IN_PROGRESS
3147
* update the block table
3148
* write the block table to flash
3149
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3150
int GLOB_FTL_Garbage_Collection(void)
3155
u32 *pbt = (u32 *)g_pBlockTable;
3157
nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
3158
__FILE__, __LINE__, __func__);
3161
printk(KERN_ALERT "GLOB_FTL_Garbage_Collection() "
3162
"has been re-entered! Exit.\n");
3168
GLOB_FTL_BT_Garbage_Collection();
3170
for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
3171
if (IS_DISCARDED_BLOCK(i))
3175
if (wDiscard <= 0) {
3180
nand_dbg_print(NAND_DBG_DEBUG,
3181
"Found %d discarded blocks\n", wDiscard);
3183
FTL_Write_Block_Table(FAIL);
3185
wResult = do_garbage_collection(wDiscard);
3187
FTL_Write_Block_Table(FAIL);
3196
static int do_bt_garbage_collection(void)
3199
u32 *pbt = (u32 *)g_pBlockTable;
3200
u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
3204
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3205
__FILE__, __LINE__, __func__);
3212
for (i = last_erased; (i <= LAST_BT_ID) &&
3213
(g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
3214
FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) &&
3215
((ftl_cmd_cnt + 28)) < 256; i++) {
3216
pba = pBTBlocksNode[i - FIRST_BT_ID];
3217
lba = FTL_Get_Block_Index(pba);
3218
nand_dbg_print(NAND_DBG_DEBUG,
3219
"do_bt_garbage_collection: pba %d, lba %d\n",
3221
nand_dbg_print(NAND_DBG_DEBUG,
3222
"Block Table Entry: %d", pbt[lba]);
3224
if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
3225
(pbt[lba] & DISCARD_BLOCK)) {
3226
nand_dbg_print(NAND_DBG_DEBUG,
3227
"do_bt_garbage_collection_cdma: "
3228
"Erasing Block tables present in block %d\n",
3230
addr = FTL_Get_Physical_Block_Addr((u64)lba *
3231
DeviceInfo.wBlockDataSize);
3232
if (PASS == GLOB_FTL_Block_Erase(addr)) {
3233
pbt[lba] &= (u32)(~DISCARD_BLOCK);
3234
pbt[lba] |= (u32)(SPARE_BLOCK);
3236
p_BTableChangesDelta =
3237
(struct BTableChangesDelta *)
3240
sizeof(struct BTableChangesDelta);
3242
p_BTableChangesDelta->ftl_cmd_cnt =
3244
p_BTableChangesDelta->BT_Index = lba;
3245
p_BTableChangesDelta->BT_Entry_Value =
3248
p_BTableChangesDelta->ValidFields = 0x0C;
3251
pBTBlocksNode[last_erased - FIRST_BT_ID] =
3253
nand_dbg_print(NAND_DBG_DEBUG,
3254
"resetting bt entry at index %d "
3256
pBTBlocksNode[i - FIRST_BT_ID]);
3257
if (last_erased == LAST_BT_ID)
3258
last_erased = FIRST_BT_ID;
3262
MARK_BLOCK_AS_BAD(pbt[lba]);
3273
static int do_bt_garbage_collection(void)
3276
u32 *pbt = (u32 *)g_pBlockTable;
3277
u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
3281
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3282
__FILE__, __LINE__, __func__);
3289
for (i = last_erased; (i <= LAST_BT_ID) &&
3290
(g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
3291
FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL); i++) {
3292
pba = pBTBlocksNode[i - FIRST_BT_ID];
3293
lba = FTL_Get_Block_Index(pba);
3294
nand_dbg_print(NAND_DBG_DEBUG,
3295
"do_bt_garbage_collection_cdma: pba %d, lba %d\n",
3297
nand_dbg_print(NAND_DBG_DEBUG,
3298
"Block Table Entry: %d", pbt[lba]);
3300
if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
3301
(pbt[lba] & DISCARD_BLOCK)) {
3302
nand_dbg_print(NAND_DBG_DEBUG,
3303
"do_bt_garbage_collection: "
3304
"Erasing Block tables present in block %d\n",
3306
addr = FTL_Get_Physical_Block_Addr((u64)lba *
3307
DeviceInfo.wBlockDataSize);
3308
if (PASS == GLOB_FTL_Block_Erase(addr)) {
3309
pbt[lba] &= (u32)(~DISCARD_BLOCK);
3310
pbt[lba] |= (u32)(SPARE_BLOCK);
3312
pBTBlocksNode[last_erased - FIRST_BT_ID] =
3314
nand_dbg_print(NAND_DBG_DEBUG,
3315
"resetting bt entry at index %d "
3317
pBTBlocksNode[i - FIRST_BT_ID]);
3318
if (last_erased == LAST_BT_ID)
3319
last_erased = FIRST_BT_ID;
3323
MARK_BLOCK_AS_BAD(pbt[lba]);
3335
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3336
* Function: GLOB_FTL_BT_Garbage_Collection
3338
* Outputs: PASS / FAIL (returns the number of un-erased blocks
3339
* Description: Erases discarded blocks containing Block table
3341
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3342
int GLOB_FTL_BT_Garbage_Collection(void)
3344
return do_bt_garbage_collection();
3347
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3348
* Function: FTL_Replace_OneBlock
3349
* Inputs: Block number 1
3351
* Outputs: Replaced Block Number
3352
* Description: Interchange block table entries at wBlockNum and wReplaceNum
3354
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3355
static u32 FTL_Replace_OneBlock(u32 blk, u32 rep_blk)
3358
u32 replace_node = BAD_BLOCK;
3359
u32 *pbt = (u32 *)g_pBlockTable;
3361
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3362
__FILE__, __LINE__, __func__);
3364
if (rep_blk != BAD_BLOCK) {
3365
if (IS_BAD_BLOCK(blk))
3368
tmp_blk = DISCARD_BLOCK | (~SPARE_BLOCK & pbt[blk]);
3370
replace_node = (u32) ((~SPARE_BLOCK) & pbt[rep_blk]);
3371
pbt[blk] = replace_node;
3372
pbt[rep_blk] = tmp_blk;
3375
p_BTableChangesDelta =
3376
(struct BTableChangesDelta *)g_pBTDelta_Free;
3377
g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3379
p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
3380
p_BTableChangesDelta->BT_Index = blk;
3381
p_BTableChangesDelta->BT_Entry_Value = pbt[blk];
3383
p_BTableChangesDelta->ValidFields = 0x0C;
3385
p_BTableChangesDelta =
3386
(struct BTableChangesDelta *)g_pBTDelta_Free;
3387
g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3389
p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
3390
p_BTableChangesDelta->BT_Index = rep_blk;
3391
p_BTableChangesDelta->BT_Entry_Value = pbt[rep_blk];
3392
p_BTableChangesDelta->ValidFields = 0x0C;
3396
return replace_node;
3399
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3400
* Function: FTL_Write_Block_Table_Data
3401
* Inputs: Block table size in pages
3402
* Outputs: PASS=0 / FAIL=1
3403
* Description: Write block table data in flash
3404
* If first page and last page
3405
* Write data+BT flag
3408
* BT flag is a counter. Its value is incremented for block table
3409
* write in a new Block
3410
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3411
static int FTL_Write_Block_Table_Data(void)
3413
u64 dwBlockTableAddr, pTempAddr;
3415
u16 Page, PageCount;
3416
u8 *tempBuf = tmp_buf_write_blk_table_data;
3420
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3421
__FILE__, __LINE__, __func__);
3424
(u64)((u64)g_wBlockTableIndex * DeviceInfo.wBlockDataSize +
3425
(u64)g_wBlockTableOffset * DeviceInfo.wPageDataSize);
3426
pTempAddr = dwBlockTableAddr;
3428
bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
3430
nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: "
3431
"page= %d BlockTableIndex= %d "
3432
"BlockTableOffset=%d\n", bt_pages,
3433
g_wBlockTableIndex, g_wBlockTableOffset);
3435
Block = BLK_FROM_ADDR(pTempAddr);
3436
Page = PAGE_FROM_ADDR(pTempAddr, Block);
3439
if (bt_block_changed) {
3440
if (bt_flag == LAST_BT_ID) {
3441
bt_flag = FIRST_BT_ID;
3442
g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
3443
} else if (bt_flag < LAST_BT_ID) {
3445
g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
3448
if ((bt_flag > (LAST_BT_ID-4)) &&
3449
g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] !=
3451
bt_block_changed = 0;
3452
GLOB_FTL_BT_Garbage_Collection();
3455
bt_block_changed = 0;
3456
nand_dbg_print(NAND_DBG_DEBUG,
3457
"Block Table Counter is %u Block %u\n",
3458
bt_flag, (unsigned int)Block);
3461
memset(tempBuf, 0, 3);
3462
tempBuf[3] = bt_flag;
3463
wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf + 4,
3464
DeviceInfo.wPageDataSize - 4, 0);
3465
memset(&tempBuf[wBytesCopied + 4], 0xff,
3466
DeviceInfo.wPageSize - (wBytesCopied + 4));
3467
FTL_Insert_Block_Table_Signature(&tempBuf[DeviceInfo.wPageDataSize],
3471
memcpy(g_pNextBlockTable, tempBuf,
3472
DeviceInfo.wPageSize * sizeof(u8));
3473
nand_dbg_print(NAND_DBG_DEBUG, "Writing First Page of Block Table "
3474
"Block %u Page %u\n", (unsigned int)Block, Page);
3475
if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(g_pNextBlockTable,
3477
LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
3478
nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
3479
"%s, Line %d, Function: %s, "
3480
"new Bad Block %d generated!\n",
3481
__FILE__, __LINE__, __func__, Block);
3486
g_pNextBlockTable += ((DeviceInfo.wPageSize * sizeof(u8)));
3488
if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf, Block, Page, 1)) {
3489
nand_dbg_print(NAND_DBG_WARN,
3490
"NAND Program fail in %s, Line %d, Function: %s, "
3491
"new Bad Block %d generated!\n",
3492
__FILE__, __LINE__, __func__, Block);
3498
PageCount = bt_pages - 1;
3499
if (PageCount > 1) {
3500
wBytesCopied += FTL_Copy_Block_Table_To_Flash(tempBuf,
3501
DeviceInfo.wPageDataSize * (PageCount - 1),
3505
memcpy(g_pNextBlockTable, tempBuf,
3506
(PageCount - 1) * DeviceInfo.wPageDataSize);
3507
if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
3508
g_pNextBlockTable, Block, Page + 1,
3510
nand_dbg_print(NAND_DBG_WARN,
3511
"NAND Program fail in %s, Line %d, "
3513
"new Bad Block %d generated!\n",
3514
__FILE__, __LINE__, __func__,
3520
g_pNextBlockTable += (PageCount - 1) *
3521
DeviceInfo.wPageDataSize * sizeof(u8);
3523
if (FAIL == GLOB_LLD_Write_Page_Main(tempBuf,
3524
Block, Page + 1, PageCount - 1)) {
3525
nand_dbg_print(NAND_DBG_WARN,
3526
"NAND Program fail in %s, Line %d, "
3528
"new Bad Block %d generated!\n",
3529
__FILE__, __LINE__, __func__,
3536
wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf,
3537
DeviceInfo.wPageDataSize, wBytesCopied);
3538
memset(&tempBuf[wBytesCopied], 0xff,
3539
DeviceInfo.wPageSize-wBytesCopied);
3540
FTL_Insert_Block_Table_Signature(
3541
&tempBuf[DeviceInfo.wPageDataSize], bt_flag);
3543
memcpy(g_pNextBlockTable, tempBuf,
3544
DeviceInfo.wPageSize * sizeof(u8));
3545
nand_dbg_print(NAND_DBG_DEBUG,
3546
"Writing the last Page of Block Table "
3547
"Block %u Page %u\n",
3548
(unsigned int)Block, Page + bt_pages - 1);
3549
if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(
3550
g_pNextBlockTable, Block, Page + bt_pages - 1, 1,
3551
LLD_CMD_FLAG_MODE_CDMA |
3552
LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
3553
nand_dbg_print(NAND_DBG_WARN,
3554
"NAND Program fail in %s, Line %d, "
3555
"Function: %s, new Bad Block %d generated!\n",
3556
__FILE__, __LINE__, __func__, Block);
3561
if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf,
3562
Block, Page+bt_pages - 1, 1)) {
3563
nand_dbg_print(NAND_DBG_WARN,
3564
"NAND Program fail in %s, Line %d, "
3566
"new Bad Block %d generated!\n",
3567
__FILE__, __LINE__, __func__, Block);
3573
nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: done\n");
3579
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3580
* Function: FTL_Replace_Block_Table
3582
* Outputs: PASS=0 / FAIL=1
3583
* Description: Get a new block to write block table
3584
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3585
static u32 FTL_Replace_Block_Table(void)
3590
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3591
__FILE__, __LINE__, __func__);
3593
blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
3595
if ((BAD_BLOCK == blk) && (PASS == gc)) {
3596
GLOB_FTL_Garbage_Collection();
3597
blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
3599
if (BAD_BLOCK == blk)
3600
printk(KERN_ERR "%s, %s: There is no spare block. "
3601
"It should never happen\n",
3602
__FILE__, __func__);
3604
nand_dbg_print(NAND_DBG_DEBUG, "New Block table Block is %d\n", blk);
3609
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3610
* Function: FTL_Replace_LWBlock
3611
* Inputs: Block number
3612
* Pointer to Garbage Collect flag
3614
* Description: Determine the least weared block by traversing
3616
* Set Garbage collection to be called if number of spare
3617
* block is less than Free Block Gate count
3618
* Change Block table entry to map least worn block for current
3620
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3621
static u32 FTL_Replace_LWBlock(u32 wBlockNum, int *pGarbageCollect)
3624
u32 *pbt = (u32 *)g_pBlockTable;
3625
u8 wLeastWornCounter = 0xFF;
3626
u32 wLeastWornIndex = BAD_BLOCK;
3627
u32 wSpareBlockNum = 0;
3628
u32 wDiscardBlockNum = 0;
3630
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3631
__FILE__, __LINE__, __func__);
3633
if (IS_SPARE_BLOCK(wBlockNum)) {
3634
*pGarbageCollect = FAIL;
3635
pbt[wBlockNum] = (u32)(pbt[wBlockNum] & (~SPARE_BLOCK));
3637
p_BTableChangesDelta =
3638
(struct BTableChangesDelta *)g_pBTDelta_Free;
3639
g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3640
p_BTableChangesDelta->ftl_cmd_cnt =
3642
p_BTableChangesDelta->BT_Index = (u32)(wBlockNum);
3643
p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
3644
p_BTableChangesDelta->ValidFields = 0x0C;
3646
return pbt[wBlockNum];
3649
for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
3650
if (IS_DISCARDED_BLOCK(i))
3653
if (IS_SPARE_BLOCK(i)) {
3654
u32 wPhysicalIndex = (u32)((~BAD_BLOCK) & pbt[i]);
3655
if (wPhysicalIndex > DeviceInfo.wSpectraEndBlock)
3656
printk(KERN_ERR "FTL_Replace_LWBlock: "
3657
"This should never occur!\n");
3658
if (g_pWearCounter[wPhysicalIndex -
3659
DeviceInfo.wSpectraStartBlock] <
3660
wLeastWornCounter) {
3662
g_pWearCounter[wPhysicalIndex -
3663
DeviceInfo.wSpectraStartBlock];
3664
wLeastWornIndex = i;
3670
nand_dbg_print(NAND_DBG_WARN,
3671
"FTL_Replace_LWBlock: Least Worn Counter %d\n",
3672
(int)wLeastWornCounter);
3674
if ((wDiscardBlockNum >= NUM_FREE_BLOCKS_GATE) ||
3675
(wSpareBlockNum <= NUM_FREE_BLOCKS_GATE))
3676
*pGarbageCollect = PASS;
3678
*pGarbageCollect = FAIL;
3680
nand_dbg_print(NAND_DBG_DEBUG,
3681
"FTL_Replace_LWBlock: Discarded Blocks %u Spare"
3683
(unsigned int)wDiscardBlockNum,
3684
(unsigned int)wSpareBlockNum);
3686
return FTL_Replace_OneBlock(wBlockNum, wLeastWornIndex);
3689
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3690
* Function: FTL_Replace_MWBlock
3692
* Outputs: most worn spare block no./BAD_BLOCK
3693
* Description: It finds most worn spare block.
3694
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3695
static u32 FTL_Replace_MWBlock(void)
3698
u32 *pbt = (u32 *)g_pBlockTable;
3699
u8 wMostWornCounter = 0;
3700
u32 wMostWornIndex = BAD_BLOCK;
3701
u32 wSpareBlockNum = 0;
3703
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3704
__FILE__, __LINE__, __func__);
3706
for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
3707
if (IS_SPARE_BLOCK(i)) {
3708
u32 wPhysicalIndex = (u32)((~SPARE_BLOCK) & pbt[i]);
3709
if (g_pWearCounter[wPhysicalIndex -
3710
DeviceInfo.wSpectraStartBlock] >
3713
g_pWearCounter[wPhysicalIndex -
3714
DeviceInfo.wSpectraStartBlock];
3715
wMostWornIndex = wPhysicalIndex;
3721
if (wSpareBlockNum <= 2)
3724
return wMostWornIndex;
3727
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3728
* Function: FTL_Replace_Block
3729
* Inputs: Block Address
3730
* Outputs: PASS=0 / FAIL=1
3731
* Description: If block specified by blk_addr parameter is not free,
3732
* replace it with the least worn block.
3733
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3734
static int FTL_Replace_Block(u64 blk_addr)
3736
u32 current_blk = BLK_FROM_ADDR(blk_addr);
3737
u32 *pbt = (u32 *)g_pBlockTable;
3739
int GarbageCollect = FAIL;
3741
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3742
__FILE__, __LINE__, __func__);
3744
if (IS_SPARE_BLOCK(current_blk)) {
3745
pbt[current_blk] = (~SPARE_BLOCK) & pbt[current_blk];
3747
p_BTableChangesDelta =
3748
(struct BTableChangesDelta *)g_pBTDelta_Free;
3749
g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3750
p_BTableChangesDelta->ftl_cmd_cnt =
3752
p_BTableChangesDelta->BT_Index = current_blk;
3753
p_BTableChangesDelta->BT_Entry_Value = pbt[current_blk];
3754
p_BTableChangesDelta->ValidFields = 0x0C ;
3759
FTL_Replace_LWBlock(current_blk, &GarbageCollect);
3761
if (PASS == GarbageCollect)
3762
wResult = GLOB_FTL_Garbage_Collection();
3767
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3768
* Function: GLOB_FTL_Is_BadBlock
3769
* Inputs: block number to test
3770
* Outputs: PASS (block is BAD) / FAIL (block is not bad)
3771
* Description: test if this block number is flagged as bad
3772
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3773
int GLOB_FTL_Is_BadBlock(u32 wBlockNum)
3775
u32 *pbt = (u32 *)g_pBlockTable;
3777
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3778
__FILE__, __LINE__, __func__);
3780
if (wBlockNum >= DeviceInfo.wSpectraStartBlock
3781
&& BAD_BLOCK == (pbt[wBlockNum] & BAD_BLOCK))
3787
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3788
* Function: GLOB_FTL_Flush_Cache
3790
* Outputs: PASS=0 / FAIL=1
3791
* Description: flush all the cache blocks to flash
3792
* if a cache block is not dirty, don't do anything with it
3793
* else, write the block and update the block table
3794
* Note: This function should be called at shutdown/power down.
3795
* to write important data into device
3796
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3797
int GLOB_FTL_Flush_Cache(void)
3801
nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
3802
__FILE__, __LINE__, __func__);
3804
for (i = 0; i < CACHE_ITEM_NUM; i++) {
3805
if (SET == Cache.array[i].changed) {
3807
#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
3808
int_cache[ftl_cmd_cnt].item = i;
3809
int_cache[ftl_cmd_cnt].cache.address =
3810
Cache.array[i].address;
3811
int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
3814
ret = write_back_to_l2_cache(Cache.array[i].buf, Cache.array[i].address);
3816
Cache.array[i].changed = CLEAR;
3818
printk(KERN_ALERT "Failed when write back to L2 cache!\n");
3819
/* TODO - How to handle this? */
3826
return FTL_Write_Block_Table(FAIL);
3829
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3830
* Function: GLOB_FTL_Page_Read
3831
* Inputs: pointer to data
3832
* logical address of data (u64 is LBA * Bytes/Page)
3833
* Outputs: PASS=0 / FAIL=1
3834
* Description: reads a page of data into RAM from the cache
3835
* if the data is not already in cache, read from flash to cache
3836
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3837
int GLOB_FTL_Page_Read(u8 *data, u64 logical_addr)
3842
nand_dbg_print(NAND_DBG_DEBUG, "GLOB_FTL_Page_Read - "
3843
"page_addr: %llu\n", logical_addr);
3845
cache_item = FTL_Cache_If_Hit(logical_addr);
3847
if (UNHIT_CACHE_ITEM == cache_item) {
3848
nand_dbg_print(NAND_DBG_DEBUG,
3849
"GLOB_FTL_Page_Read: Cache not hit\n");
3850
res = FTL_Cache_Write();
3851
if (ERR == FTL_Cache_Read(logical_addr))
3853
cache_item = Cache.LRU;
3856
FTL_Cache_Read_Page(data, logical_addr, cache_item);
3861
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3862
* Function: GLOB_FTL_Page_Write
3863
* Inputs: pointer to data
3864
* address of data (ADDRESSTYPE is LBA * Bytes/Page)
3865
* Outputs: PASS=0 / FAIL=1
3866
* Description: writes a page of data from RAM to the cache
3867
* if the data is not already in cache, write back the
3868
* least recently used block and read the addressed block
3869
* from flash to cache
3870
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3871
int GLOB_FTL_Page_Write(u8 *pData, u64 dwPageAddr)
3874
u32 *pbt = (u32 *)g_pBlockTable;
3877
nand_dbg_print(NAND_DBG_TRACE, "GLOB_FTL_Page_Write - "
3878
"dwPageAddr: %llu\n", dwPageAddr);
3880
cache_blk = FTL_Cache_If_Hit(dwPageAddr);
3882
if (UNHIT_CACHE_ITEM == cache_blk) {
3883
wResult = FTL_Cache_Write();
3884
if (IS_BAD_BLOCK(BLK_FROM_ADDR(dwPageAddr))) {
3885
wResult = FTL_Replace_Block(dwPageAddr);
3886
pbt[BLK_FROM_ADDR(dwPageAddr)] |= SPARE_BLOCK;
3887
if (wResult == FAIL)
3890
if (ERR == FTL_Cache_Read(dwPageAddr))
3892
cache_blk = Cache.LRU;
3893
FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
3896
FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk,
3897
LLD_CMD_FLAG_ORDER_BEFORE_REST);
3899
FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
3906
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3907
* Function: GLOB_FTL_Block_Erase
3908
* Inputs: address of block to erase (now in byte format, should change to
3910
* Outputs: PASS=0 / FAIL=1
3911
* Description: erases the specified block
3912
* increments the erase count
3913
* If erase count reaches its upper limit,call function to
3914
* do the ajustment as per the relative erase count values
3915
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3916
int GLOB_FTL_Block_Erase(u64 blk_addr)
3921
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3922
__FILE__, __LINE__, __func__);
3924
BlkIdx = (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize);
3926
if (BlkIdx < DeviceInfo.wSpectraStartBlock) {
3927
printk(KERN_ERR "GLOB_FTL_Block_Erase: "
3928
"This should never occur\n");
3933
status = GLOB_LLD_Erase_Block_cdma(BlkIdx, LLD_CMD_FLAG_MODE_CDMA);
3935
nand_dbg_print(NAND_DBG_WARN,
3936
"NAND Program fail in %s, Line %d, "
3937
"Function: %s, new Bad Block %d generated!\n",
3938
__FILE__, __LINE__, __func__, BlkIdx);
3940
status = GLOB_LLD_Erase_Block(BlkIdx);
3941
if (status == FAIL) {
3942
nand_dbg_print(NAND_DBG_WARN,
3943
"NAND Program fail in %s, Line %d, "
3944
"Function: %s, new Bad Block %d generated!\n",
3945
__FILE__, __LINE__, __func__, BlkIdx);
3950
if (DeviceInfo.MLCDevice) {
3951
g_pReadCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] = 0;
3952
if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
3953
g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3954
FTL_Write_IN_Progress_Block_Table_Page();
3958
g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock]++;
3961
p_BTableChangesDelta =
3962
(struct BTableChangesDelta *)g_pBTDelta_Free;
3963
g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3964
p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
3965
p_BTableChangesDelta->WC_Index =
3966
BlkIdx - DeviceInfo.wSpectraStartBlock;
3967
p_BTableChangesDelta->WC_Entry_Value =
3968
g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock];
3969
p_BTableChangesDelta->ValidFields = 0x30;
3971
if (DeviceInfo.MLCDevice) {
3972
p_BTableChangesDelta =
3973
(struct BTableChangesDelta *)g_pBTDelta_Free;
3974
g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3975
p_BTableChangesDelta->ftl_cmd_cnt =
3977
p_BTableChangesDelta->RC_Index =
3978
BlkIdx - DeviceInfo.wSpectraStartBlock;
3979
p_BTableChangesDelta->RC_Entry_Value =
3980
g_pReadCounter[BlkIdx -
3981
DeviceInfo.wSpectraStartBlock];
3982
p_BTableChangesDelta->ValidFields = 0xC0;
3988
if (g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] == 0xFE)
3989
FTL_Adjust_Relative_Erase_Count(BlkIdx);
3995
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3996
* Function: FTL_Adjust_Relative_Erase_Count
3997
* Inputs: index to block that was just incremented and is at the max
3998
* Outputs: PASS=0 / FAIL=1
3999
* Description: If any erase counts at MAX, adjusts erase count of every
4000
* block by substracting least worn
4001
* counter from counter value of every entry in wear table
4002
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4003
static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX)
4005
u8 wLeastWornCounter = MAX_BYTE_VALUE;
4008
u32 *pbt = (u32 *)g_pBlockTable;
4011
nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
4012
__FILE__, __LINE__, __func__);
4014
for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
4015
if (IS_BAD_BLOCK(i))
4017
wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
4019
if ((wWearIndex - DeviceInfo.wSpectraStartBlock) < 0)
4020
printk(KERN_ERR "FTL_Adjust_Relative_Erase_Count:"
4021
"This should never occur\n");
4022
wWearCounter = g_pWearCounter[wWearIndex -
4023
DeviceInfo.wSpectraStartBlock];
4024
if (wWearCounter < wLeastWornCounter)
4025
wLeastWornCounter = wWearCounter;
4028
if (wLeastWornCounter == 0) {
4029
nand_dbg_print(NAND_DBG_WARN,
4030
"Adjusting Wear Levelling Counters: Special Case\n");
4031
g_pWearCounter[Index_of_MAX -
4032
DeviceInfo.wSpectraStartBlock]--;
4034
p_BTableChangesDelta =
4035
(struct BTableChangesDelta *)g_pBTDelta_Free;
4036
g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4037
p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
4038
p_BTableChangesDelta->WC_Index =
4039
Index_of_MAX - DeviceInfo.wSpectraStartBlock;
4040
p_BTableChangesDelta->WC_Entry_Value =
4041
g_pWearCounter[Index_of_MAX -
4042
DeviceInfo.wSpectraStartBlock];
4043
p_BTableChangesDelta->ValidFields = 0x30;
4045
FTL_Static_Wear_Leveling();
4047
for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
4048
if (!IS_BAD_BLOCK(i)) {
4049
wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
4050
g_pWearCounter[wWearIndex -
4051
DeviceInfo.wSpectraStartBlock] =
4054
DeviceInfo.wSpectraStartBlock] -
4057
p_BTableChangesDelta =
4058
(struct BTableChangesDelta *)g_pBTDelta_Free;
4060
sizeof(struct BTableChangesDelta);
4062
p_BTableChangesDelta->ftl_cmd_cnt =
4064
p_BTableChangesDelta->WC_Index = wWearIndex -
4065
DeviceInfo.wSpectraStartBlock;
4066
p_BTableChangesDelta->WC_Entry_Value =
4067
g_pWearCounter[wWearIndex -
4068
DeviceInfo.wSpectraStartBlock];
4069
p_BTableChangesDelta->ValidFields = 0x30;
4077
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4078
* Function: FTL_Write_IN_Progress_Block_Table_Page
4081
* Description: It writes in-progress flag page to the page next to
4083
***********************************************************************/
4084
static int FTL_Write_IN_Progress_Block_Table_Page(void)
4091
u32 *pbt = (u32 *)g_pBlockTable;
4092
u32 wTempBlockTableIndex;
4095
nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
4096
__FILE__, __LINE__, __func__);
4098
bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
4100
dwIPFPageAddr = g_wBlockTableOffset + bt_pages;
4102
nand_dbg_print(NAND_DBG_DEBUG, "Writing IPF at "
4103
"Block %d Page %d\n",
4104
g_wBlockTableIndex, dwIPFPageAddr);
4107
wResult = GLOB_LLD_Write_Page_Main_Spare_cdma(g_pIPF,
4108
g_wBlockTableIndex, dwIPFPageAddr, 1,
4109
LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST);
4110
if (wResult == FAIL) {
4111
nand_dbg_print(NAND_DBG_WARN,
4112
"NAND Program fail in %s, Line %d, "
4113
"Function: %s, new Bad Block %d generated!\n",
4114
__FILE__, __LINE__, __func__,
4115
g_wBlockTableIndex);
4117
g_wBlockTableOffset = dwIPFPageAddr + 1;
4118
p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
4119
g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4120
p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
4121
p_BTableChangesDelta->g_wBlockTableOffset = g_wBlockTableOffset;
4122
p_BTableChangesDelta->ValidFields = 0x01;
4125
wResult = GLOB_LLD_Write_Page_Main_Spare(g_pIPF,
4126
g_wBlockTableIndex, dwIPFPageAddr, 1);
4127
if (wResult == FAIL) {
4128
nand_dbg_print(NAND_DBG_WARN,
4129
"NAND Program fail in %s, Line %d, "
4130
"Function: %s, new Bad Block %d generated!\n",
4131
__FILE__, __LINE__, __func__,
4132
(int)g_wBlockTableIndex);
4133
MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
4134
wTempBlockTableIndex = FTL_Replace_Block_Table();
4135
bt_block_changed = 1;
4136
if (BAD_BLOCK == wTempBlockTableIndex)
4138
g_wBlockTableIndex = wTempBlockTableIndex;
4139
g_wBlockTableOffset = 0;
4140
/* Block table tag is '00'. Means it's used one */
4141
pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
4144
g_wBlockTableOffset = dwIPFPageAddr + 1;
4149
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
4150
* Function: FTL_Read_Disturbance
4151
* Inputs: block address
4152
* Outputs: PASS=0 / FAIL=1
4153
* Description: used to handle read disturbance. Data in block that
4154
* reaches its read limit is moved to new block
4155
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
4156
int FTL_Read_Disturbance(u32 blk_addr)
4159
u32 *pbt = (u32 *) g_pBlockTable;
4160
u32 dwOldBlockAddr = blk_addr;
4163
u32 wLeastReadCounter = 0xFFFF;
4164
u32 wLeastReadIndex = BAD_BLOCK;
4165
u32 wSpareBlockNum = 0;
4170
nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
4171
__FILE__, __LINE__, __func__);
4174
g_pTempBuf = cp_back_buf_copies[cp_back_buf_idx];
4176
if (cp_back_buf_idx > COPY_BACK_BUF_NUM) {
4177
printk(KERN_ERR "cp_back_buf_copies overflow! Exit."
4178
"Maybe too many pending commands in your CDMA chain.\n");
4182
g_pTempBuf = tmp_buf_read_disturbance;
4185
wBlockNum = FTL_Get_Block_Index(blk_addr);
4188
/* This is a bug.Here 'i' should be logical block number
4189
* and start from 1 (0 is reserved for block table).
4190
* Have fixed it. - Yunpeng 2008. 12. 19
4192
for (i = 1; i < DeviceInfo.wDataBlockNum; i++) {
4193
if (IS_SPARE_BLOCK(i)) {
4194
u32 wPhysicalIndex =
4195
(u32)((~SPARE_BLOCK) & pbt[i]);
4196
if (g_pReadCounter[wPhysicalIndex -
4197
DeviceInfo.wSpectraStartBlock] <
4198
wLeastReadCounter) {
4200
g_pReadCounter[wPhysicalIndex -
4201
DeviceInfo.wSpectraStartBlock];
4202
wLeastReadIndex = i;
4208
if (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE) {
4209
wResult = GLOB_FTL_Garbage_Collection();
4210
if (PASS == wResult)
4215
wTempNode = (u32)(DISCARD_BLOCK | pbt[wBlockNum]);
4216
wReplacedNode = (u32)((~SPARE_BLOCK) &
4217
pbt[wLeastReadIndex]);
4219
pbt[wBlockNum] = wReplacedNode;
4220
pbt[wLeastReadIndex] = wTempNode;
4221
p_BTableChangesDelta =
4222
(struct BTableChangesDelta *)g_pBTDelta_Free;
4223
g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4225
p_BTableChangesDelta->ftl_cmd_cnt =
4227
p_BTableChangesDelta->BT_Index = wBlockNum;
4228
p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
4229
p_BTableChangesDelta->ValidFields = 0x0C;
4231
p_BTableChangesDelta =
4232
(struct BTableChangesDelta *)g_pBTDelta_Free;
4233
g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4235
p_BTableChangesDelta->ftl_cmd_cnt =
4237
p_BTableChangesDelta->BT_Index = wLeastReadIndex;
4238
p_BTableChangesDelta->BT_Entry_Value =
4239
pbt[wLeastReadIndex];
4240
p_BTableChangesDelta->ValidFields = 0x0C;
4242
wResult = GLOB_LLD_Read_Page_Main_cdma(g_pTempBuf,
4243
dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock,
4244
LLD_CMD_FLAG_MODE_CDMA);
4245
if (wResult == FAIL)
4250
if (wResult != FAIL) {
4251
if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
4252
g_pTempBuf, pbt[wBlockNum], 0,
4253
DeviceInfo.wPagesPerBlock)) {
4254
nand_dbg_print(NAND_DBG_WARN,
4255
"NAND Program fail in "
4256
"%s, Line %d, Function: %s, "
4259
__FILE__, __LINE__, __func__,
4260
(int)pbt[wBlockNum]);
4262
MARK_BLOCK_AS_BAD(pbt[wBlockNum]);
4267
wResult = GLOB_LLD_Read_Page_Main(g_pTempBuf,
4268
dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock);
4269
if (wResult == FAIL)
4272
if (wResult != FAIL) {
4273
/* This is a bug. At this time, pbt[wBlockNum]
4274
is still the physical address of
4275
discard block, and should not be write.
4276
Have fixed it as below.
4277
-- Yunpeng 2008.12.19
4279
wResult = GLOB_LLD_Write_Page_Main(g_pTempBuf,
4281
DeviceInfo.wPagesPerBlock);
4282
if (wResult == FAIL) {
4283
nand_dbg_print(NAND_DBG_WARN,
4284
"NAND Program fail in "
4285
"%s, Line %d, Function: %s, "
4288
__FILE__, __LINE__, __func__,
4289
(int)wReplacedNode);
4290
MARK_BLOCK_AS_BAD(wReplacedNode);
4292
pbt[wBlockNum] = wReplacedNode;
4293
pbt[wLeastReadIndex] = wTempNode;
4297
if ((wResult == PASS) && (g_cBlockTableStatus !=
4298
IN_PROGRESS_BLOCK_TABLE)) {
4299
g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
4300
FTL_Write_IN_Progress_Block_Table_Page();
4304
} while (wResult != PASS)