OpenSSD Cosmos+ Platform Firmware  0.0.2
The firmware of Cosmos+ OpenSSD Platform for TOSHIBA nand flash module.
request_transform.c File Reference
#include "xil_printf.h"
#include <assert.h>
#include "nvme/nvme.h"
#include "nvme/host_lld.h"
#include "memory_map.h"
#include "ftl_config.h"
Include dependency graph for request_transform.c:

Go to the source code of this file.

Functions

void InitDependencyTable ()
 
void ReqTransNvmeToSlice (unsigned int cmdSlotTag, unsigned int startLba, unsigned int nlb, unsigned int cmdCode)
 Split NVMe command into slice requests. More...
 
void EvictDataBufEntry (unsigned int originReqSlotTag)
 Clear the specified data buffer entry and sync dirty data if needed. More...
 
void DataReadFromNand (unsigned int originReqSlotTag)
 Generate and dispatch a flash read request for the given slice request. More...
 
void ReqTransSliceToLowLevel ()
 Data Buffer Manager. Handle all the pending slice requests. More...
 
unsigned int CheckBufDep (unsigned int reqSlotTag)
 Check if this request has the buffer dependency problem. More...
 
unsigned int CheckRowAddrDep (unsigned int reqSlotTag, unsigned int checkRowAddrDepOpt)
 Check if this NAND request has the row address dependency problem. More...
 
unsigned int UpdateRowAddrDepTableForBufBlockedReq (unsigned int reqSlotTag)
 Update the dependency info and dispatch the request if possible. More...
 
void SelectLowLevelReqQ (unsigned int reqSlotTag)
 Dispatch given NVMe/NAND request to corresponding request queue. More...
 
void ReleaseBlockedByBufDepReq (unsigned int reqSlotTag)
 Pop the specified request from the buffer dependency queue. More...
 
void ReleaseBlockedByRowAddrDepReq (unsigned int chNo, unsigned int wayNo)
 Update the row address dependency of all the requests on the specified die. More...
 
void IssueNvmeDmaReq (unsigned int reqSlotTag)
 Allocate data buffer for the specified DMA request and inform the controller. More...
 
void CheckDoneNvmeDmaReq ()
 

Variables

P_ROW_ADDR_DEPENDENCY_TABLE rowAddrDependencyTablePtr
 

Function Documentation

◆ CheckBufDep()

unsigned int CheckBufDep ( unsigned int  reqSlotTag)

Check if this request has the buffer dependency problem.

Requests that share the same data buffer entry must be executed in correct order, and the execution order is identical to the order of the request entry index appended to the blocking request queue, so we can simply check if the previous request in the blocking request queue exists.

See also
UpdateDataBufEntryInfoBlockingReq() and DATA_BUF_ENTRY.
Parameters
reqSlotTagthe request pool entry index of the request to be checked
Returns
unsigned int 1 for pass, 0 for blocked

Definition at line 407 of file request_transform.c.

408{
411 else
413}
P_REQ_POOL reqPoolPtr
#define REQ_SLOT_TAG_NONE
#define BUF_DEPENDENCY_REPORT_PASS
#define BUF_DEPENDENCY_REPORT_BLOCKED
SSD_REQ_FORMAT reqPool[AVAILABLE_OUNTSTANDING_REQ_COUNT]
unsigned int prevBlockingReq
Here is the caller graph for this function:

◆ CheckDoneNvmeDmaReq()

void CheckDoneNvmeDmaReq ( )

Definition at line 918 of file request_transform.c.

919{
920 unsigned int reqSlotTag, prevReq;
921 unsigned int rxDone, txDone;
922
923 reqSlotTag = nvmeDmaReqQ.tailReq;
924 rxDone = 0;
925 txDone = 0;
926
927 while (reqSlotTag != REQ_SLOT_TAG_NONE)
928 {
929 prevReq = reqPoolPtr->reqPool[reqSlotTag].prevReq;
930
931 if (reqPoolPtr->reqPool[reqSlotTag].reqCode == REQ_CODE_RxDMA)
932 {
933 if (!rxDone)
936
937 if (rxDone)
938 SelectiveGetFromNvmeDmaReqQ(reqSlotTag);
939 }
940 else
941 {
942 if (!txDone)
945
946 if (txDone)
947 SelectiveGetFromNvmeDmaReqQ(reqSlotTag);
948 }
949
950 reqSlotTag = prevReq;
951 }
952}
unsigned int check_auto_rx_dma_partial_done(unsigned int tailIndex, unsigned int tailAssistIndex)
Definition: host_lld.c:524
unsigned int check_auto_tx_dma_partial_done(unsigned int tailIndex, unsigned int tailAssistIndex)
Definition: host_lld.c:485
NVME_DMA_REQUEST_QUEUE nvmeDmaReqQ
void SelectiveGetFromNvmeDmaReqQ(unsigned int reqSlotTag)
Move the specified entry from the nvmeDmaReqQ to the freeReqQ.
#define REQ_CODE_RxDMA
unsigned int reqTail
unsigned int overFlowCnt
unsigned int reqCode
NVME_DMA_INFO nvmeDmaInfo
unsigned int prevReq
Here is the call graph for this function:
Here is the caller graph for this function:

◆ CheckRowAddrDep()

unsigned int CheckRowAddrDep ( unsigned int  reqSlotTag,
unsigned int  checkRowAddrDepOpt 
)

Check if this NAND request has the row address dependency problem.

First, the NAND request should already be assigned a VSA, and we need to translate the VSA info physical info.

Now we the physical info of the target address of the specified request, but different type of request have different dependency problem:

  • For a write request:

    In current implementation, pages in the same block will be allocated sequentially, and thus here we should block all the write requests whose target PBN is not the expected page of the current working block on the target die.

  • For a erase request:

    Before erasing a block, we must ensure that there is no pending read request that require access to any of the pages within the target block.

    Warning
    What is the use of programmedPageCnt?
  • For a read request:

    Before performing read operation the a block, we should ensure the pending write and erase requests are already finished.

    Todo:
    Why the address info of the specified request must be VSA.
Warning
This function may update the count of corresponding block info, but won't add or remove the given request to or from the row address dependency queue, so use this function carefully.
See also
SyncReleaseEraseReq(), UpdateRowAddrDepTableForBufBlockedReq().
Parameters
reqSlotTagThe request pool entry index of the request to be checked.
checkRowAddrDepOptIncreased or decreased the count of block info.
Returns
unsigned int The check result, 1 for pass, 0 for blocked.

Definition at line 454 of file request_transform.c.

455{
456 unsigned int dieNo, chNo, wayNo, blockNo, pageNo;
457
459 {
461 chNo = Vdie2PchTranslation(dieNo);
462 wayNo = Vdie2PwayTranslation(dieNo);
465 }
466 else
467 assert(!"[WARNING] Not supported reqOpt-nandAddress [WARNING]");
468
469 if (reqPoolPtr->reqPool[reqSlotTag].reqCode == REQ_CODE_READ)
470 {
471 if (checkRowAddrDepOpt == ROW_ADDR_DEPENDENCY_CHECK_OPT_SELECT)
472 {
473 // release the blocked erase request on the target block
474 if (rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].blockedEraseReqFlag)
475 SyncReleaseEraseReq(chNo, wayNo, blockNo);
476
477 // already programed
478 if (pageNo < rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].permittedProgPage)
480
481 rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].blockedReadReqCnt++;
482 }
483 else if (checkRowAddrDepOpt == ROW_ADDR_DEPENDENCY_CHECK_OPT_RELEASE)
484 {
485 if (pageNo < rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].permittedProgPage)
486 {
487 rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].blockedReadReqCnt--;
489 }
490 }
491 else
492 assert(!"[WARNING] Not supported checkRowAddrDepOpt [WARNING]");
493 }
494 else if (reqPoolPtr->reqPool[reqSlotTag].reqCode == REQ_CODE_WRITE)
495 {
496 if (pageNo == rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].permittedProgPage)
497 {
498 rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].permittedProgPage++;
499
501 }
502 }
503 else if (reqPoolPtr->reqPool[reqSlotTag].reqCode == REQ_CODE_ERASE)
504 {
505 if (rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].permittedProgPage ==
507 if (rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].blockedReadReqCnt == 0)
508 {
509 rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].permittedProgPage = 0;
510 rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].blockedEraseReqFlag = 0;
511
513 }
514
515 if (checkRowAddrDepOpt == ROW_ADDR_DEPENDENCY_CHECK_OPT_SELECT)
516 rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].blockedEraseReqFlag = 1;
517 else if (checkRowAddrDepOpt == ROW_ADDR_DEPENDENCY_CHECK_OPT_RELEASE)
518 {
519 // pass, go to return
520 }
521 else
522 assert(!"[WARNING] Not supported checkRowAddrDepOpt [WARNING]");
523 }
524 else
525 assert(!"[WARNING] Not supported reqCode [WARNING]");
526
528}
#define Vsa2VpageTranslation(virtualSliceAddr)
#define Vdie2PchTranslation(dieNo)
#define Vsa2VdieTranslation(virtualSliceAddr)
#define Vsa2VblockTranslation(virtualSliceAddr)
#define Vdie2PwayTranslation(dieNo)
#define REQ_CODE_WRITE
#define REQ_OPT_NAND_ADDR_VSA
#define REQ_CODE_ERASE
#define REQ_CODE_READ
void SyncReleaseEraseReq(unsigned int chNo, unsigned int wayNo, unsigned int blockNo)
Issuing requests until the specified block can be erased.
P_ROW_ADDR_DEPENDENCY_TABLE rowAddrDependencyTablePtr
#define ROW_ADDR_DEPENDENCY_CHECK_OPT_SELECT
#define ROW_ADDR_DEPENDENCY_REPORT_PASS
#define ROW_ADDR_DEPENDENCY_CHECK_OPT_RELEASE
#define ROW_ADDR_DEPENDENCY_REPORT_BLOCKED
unsigned int programmedPageCnt
unsigned int virtualSliceAddr
unsigned int nandAddr
Type of address stored in the SSD_REQ_FORMAT::nandInfo.
unsigned int blockedReadReqCnt
unsigned int permittedProgPage
unsigned int blockedEraseReqFlag
ROW_ADDR_DEPENDENCY_ENTRY block[USER_CHANNELS][USER_WAYS][MAIN_BLOCKS_PER_DIE]
REQ_OPTION reqOpt
NAND_INFO nandInfo
Here is the call graph for this function:
Here is the caller graph for this function:

◆ DataReadFromNand()

void DataReadFromNand ( unsigned int  originReqSlotTag)

Generate and dispatch a flash read request for the given slice request.

Before issuing NVMe Tx request and migration, we must read the target page into target data buffer entry. To do this, we should create and issue a sub-request for flash read operation.

Warning
In the original implementation, nandInfo.virtualSliceAddr was assigned after calling the function UpdateDataBufEntryInfoBlockingReq().
See also
ReqTransSliceToLowLevel()
Parameters
originReqSlotTagthe request pool entry index of the parent NVMe slice request.

Definition at line 262 of file request_transform.c.

263{
264 unsigned int reqSlotTag, virtualSliceAddr;
265
266 virtualSliceAddr = AddrTransRead(reqPoolPtr->reqPool[originReqSlotTag].logicalSliceAddr);
267
268 /*
269 * Since `ReqTransNvmeToSlice()` only set a part of options for `ReqTransNvmeToSlice`,
270 * we need to set more detailed configs before issuing NAND requests.
271 */
272 if (virtualSliceAddr != VSA_FAIL)
273 {
274 /*
275 * the request entry created by caller is only used for NVMe Tx/Rx, new request
276 * entry is needed for flash read request.
277 */
278 reqSlotTag = GetFromFreeReqQ();
279
282 reqPoolPtr->reqPool[reqSlotTag].nvmeCmdSlotTag = reqPoolPtr->reqPool[originReqSlotTag].nvmeCmdSlotTag;
283 reqPoolPtr->reqPool[reqSlotTag].logicalSliceAddr = reqPoolPtr->reqPool[originReqSlotTag].logicalSliceAddr;
290
291 reqPoolPtr->reqPool[reqSlotTag].dataBufInfo.entry =
292 reqPoolPtr->reqPool[originReqSlotTag].dataBufInfo.entry;
294 reqPoolPtr->reqPool[reqSlotTag].nandInfo.virtualSliceAddr = virtualSliceAddr;
295
296 SelectLowLevelReqQ(reqSlotTag);
297 }
298}
unsigned int AddrTransRead(unsigned int logicalSliceAddr)
Get the virtual slice address of the given logical slice.
#define VSA_FAIL
void UpdateDataBufEntryInfoBlockingReq(unsigned int bufEntry, unsigned int reqSlotTag)
Append the request to the blocking queue of the specified data buffer entry.
Definition: data_buffer.c:268
unsigned int GetFromFreeReqQ()
Get a free request from the free request queue.
#define REQ_OPT_BLOCK_SPACE_MAIN
for the 1 bit flag REQ_OPTION::blockSpace.
#define REQ_OPT_NAND_ECC_WARNING_ON
#define REQ_OPT_ROW_ADDR_DEPENDENCY_CHECK
#define REQ_OPT_DATA_BUF_ENTRY
#define REQ_TYPE_NAND
#define REQ_OPT_NAND_ECC_ON
void SelectLowLevelReqQ(unsigned int reqSlotTag)
Dispatch given NVMe/NAND request to corresponding request queue.
unsigned int entry
unsigned int nandEccWarning
unsigned int dataBufFormat
Type of address stored in the SSD_REQ_FORMAT::dataBufInfo.
unsigned int blockSpace
unsigned int rowAddrDependencyCheck
unsigned int nandEcc
DATA_BUF_INFO dataBufInfo
unsigned int logicalSliceAddr
unsigned int reqType
unsigned int nvmeCmdSlotTag
Here is the call graph for this function:
Here is the caller graph for this function:

◆ EvictDataBufEntry()

void EvictDataBufEntry ( unsigned int  originReqSlotTag)

Clear the specified data buffer entry and sync dirty data if needed.

In current implementation, the write request from host will not write the data directly to the flash memory, but will cache the data in the data buffer and mark the entry as dirty entry instead. Therefore, once the data buffer become full, the fw should check whether the evicted entry is dirty and perform write request if needed before the entry being evicted.

Parameters
originReqSlotTagthe request entry index of the data buffer entry to be evicted.

Definition at line 218 of file request_transform.c.

219{
220 unsigned int reqSlotTag, virtualSliceAddr, dataBufEntry;
221
222 dataBufEntry = reqPoolPtr->reqPool[originReqSlotTag].dataBufInfo.entry;
223 if (dataBufMapPtr->dataBuf[dataBufEntry].dirty == DATA_BUF_DIRTY)
224 {
225 reqSlotTag = GetFromFreeReqQ();
226 virtualSliceAddr = AddrTransWrite(dataBufMapPtr->dataBuf[dataBufEntry].logicalSliceAddr);
227
230 reqPoolPtr->reqPool[reqSlotTag].nvmeCmdSlotTag = reqPoolPtr->reqPool[originReqSlotTag].nvmeCmdSlotTag;
238 reqPoolPtr->reqPool[reqSlotTag].dataBufInfo.entry = dataBufEntry;
239 UpdateDataBufEntryInfoBlockingReq(dataBufEntry, reqSlotTag);
240 reqPoolPtr->reqPool[reqSlotTag].nandInfo.virtualSliceAddr = virtualSliceAddr;
241
242 SelectLowLevelReqQ(reqSlotTag);
243
244 dataBufMapPtr->dataBuf[dataBufEntry].dirty = DATA_BUF_CLEAN;
245 }
246}
unsigned int AddrTransWrite(unsigned int logicalSliceAddr)
Assign a new virtual (physical) page to the specified logical page.
P_DATA_BUF_MAP dataBufMapPtr
Definition: data_buffer.c:50
#define DATA_BUF_DIRTY
Definition: data_buffer.h:59
#define DATA_BUF_CLEAN
Definition: data_buffer.h:60
unsigned int dirty
Definition: data_buffer.h:110
unsigned int logicalSliceAddr
Definition: data_buffer.h:104
DATA_BUF_ENTRY dataBuf[AVAILABLE_DATA_BUFFER_ENTRY_COUNT]
Definition: data_buffer.h:122
Here is the call graph for this function:
Here is the caller graph for this function:

◆ InitDependencyTable()

void InitDependencyTable ( )

Definition at line 58 of file request_transform.c.

59{
60 unsigned int blockNo, wayNo, chNo;
62
63 for (blockNo = 0; blockNo < MAIN_BLOCKS_PER_DIE; blockNo++)
64 {
65 for (wayNo = 0; wayNo < USER_WAYS; wayNo++)
66 {
67 for (chNo = 0; chNo < USER_CHANNELS; chNo++)
68 {
69 rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].permittedProgPage = 0;
70 rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].blockedReadReqCnt = 0;
71 rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].blockedEraseReqFlag = 0;
72 }
73 }
74 }
75}
#define USER_CHANNELS
Definition: ftl_config.h:207
#define MAIN_BLOCKS_PER_DIE
Definition: ftl_config.h:162
#define USER_WAYS
Definition: ftl_config.h:208
#define ROW_ADDR_DEPENDENCY_TABLE_ADDR
Definition: memory_map.h:111
struct _ROW_ADDR_DEPENDENCY_TABLE * P_ROW_ADDR_DEPENDENCY_TABLE
Here is the caller graph for this function:

◆ IssueNvmeDmaReq()

void IssueNvmeDmaReq ( unsigned int  reqSlotTag)

Allocate data buffer for the specified DMA request and inform the controller.

This function is used for issuing a new DMA request, the DMA procedure can be split into 2 steps:

  1. Prepare a buffer based on the member dataBufFormat of the specified DMA request
  2. Inform NVMe controller

    For a DMA request, it might want to rx/tx a data whose size is larger than 4K which is the NVMe block size, so the firmware need to inform the NVMe controller for each NVMe block.

    The tail reg of the DMA queue will be updated during the set_auto_rx_dma() and set_auto_tx_dma(), so we need to update the nvmeDmaInfo.reqTail after issuing the DMA request.

Warning
For a DMA request, the buffer address generated by GenerateDataBufAddr() is chosen based on the REQ_OPT_DATA_BUF_ENTRY, however, since the size of a data entry is BYTES_PER_DATA_REGION_OF_SLICE (default 4), will the data buffer used by the DMA request overlap with other requests' data buffer if the numOfNvmeBlock of the DMA request is larger than NVME_BLOCKS_PER_PAGE?
Parameters
reqSlotTagthe request pool index of the given request.

Definition at line 878 of file request_transform.c.

879{
880 unsigned int devAddr, dmaIndex, numOfNvmeBlock;
881
882 dmaIndex = reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.startIndex;
883 devAddr = GenerateDataBufAddr(reqSlotTag);
884 numOfNvmeBlock = 0;
885
886 if (reqPoolPtr->reqPool[reqSlotTag].reqCode == REQ_CODE_RxDMA)
887 {
888 while (numOfNvmeBlock < reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.numOfNvmeBlock)
889 {
890 set_auto_rx_dma(reqPoolPtr->reqPool[reqSlotTag].nvmeCmdSlotTag, dmaIndex, devAddr,
892
893 numOfNvmeBlock++;
894 dmaIndex++;
895 devAddr += BYTES_PER_NVME_BLOCK;
896 }
899 }
900 else if (reqPoolPtr->reqPool[reqSlotTag].reqCode == REQ_CODE_TxDMA)
901 {
902 while (numOfNvmeBlock < reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.numOfNvmeBlock)
903 {
904 set_auto_tx_dma(reqPoolPtr->reqPool[reqSlotTag].nvmeCmdSlotTag, dmaIndex, devAddr,
906
907 numOfNvmeBlock++;
908 dmaIndex++;
909 devAddr += BYTES_PER_NVME_BLOCK;
910 }
913 }
914 else
915 assert(!"[WARNING] Not supported reqCode [WARNING]");
916}
#define BYTES_PER_NVME_BLOCK
Definition: ftl_config.h:194
HOST_DMA_STATUS g_hostDmaStatus
Definition: host_lld.c:63
void set_auto_rx_dma(unsigned int cmdSlotTag, unsigned int cmd4KBOffset, unsigned int devAddr, unsigned int autoCompletion)
Definition: host_lld.c:419
void set_auto_tx_dma(unsigned int cmdSlotTag, unsigned int cmd4KBOffset, unsigned int devAddr, unsigned int autoCompletion)
Definition: host_lld.c:385
HOST_DMA_ASSIST_STATUS g_hostDmaAssistStatus
Definition: host_lld.c:64
#define REQ_CODE_TxDMA
unsigned int GenerateDataBufAddr(unsigned int reqSlotTag)
Get the corresponding data buffer entry address of the given request.
#define NVME_COMMAND_AUTO_COMPLETION_ON
unsigned int autoDmaRxOverFlowCnt
Definition: host_lld.h:340
unsigned int autoDmaTxOverFlowCnt
Definition: host_lld.h:339
unsigned char autoDmaRx
Definition: host_lld.h:288
unsigned char autoDmaTx
Definition: host_lld.h:289
HOST_DMA_FIFO_CNT_REG fifoTail
Definition: host_lld.h:330
unsigned int startIndex
Here is the call graph for this function:
Here is the caller graph for this function:

◆ ReleaseBlockedByBufDepReq()

void ReleaseBlockedByBufDepReq ( unsigned int  reqSlotTag)

Pop the specified request from the buffer dependency queue.

In the current implementation, this function is only called after the specified request entry is moved to the free request queue, which means that the previous request has released the data buffer entry it occupied. Therefore, we now need to update the relevant information about the data buffer dependency.

Warning
Only the NAND requests with VSA can use this function.
Since the struct DATA_BUF_ENTRY maintains only the tail of blocked requests, the specified request should be the head of blocked requests to ensure that the request order is not messed up.
See also
UpdateDataBufEntryInfoBlockingReq(), CheckBufDep(), SSD_REQ_FORMAT.
Parameters
reqSlotTagThe request entry index of the given request

Definition at line 729 of file request_transform.c.

730{
731 unsigned int targetReqSlotTag, dieNo, chNo, wayNo, rowAddrDepCheckReport;
732
733 // split the blocking request queue into 2 parts at `reqSlotTag`
734 targetReqSlotTag = REQ_SLOT_TAG_NONE;
736 {
737 targetReqSlotTag = reqPoolPtr->reqPool[reqSlotTag].nextBlockingReq;
740 }
741
742 // reset blocking request queue if it is the last request blocked by the buffer dependency
744 {
746 reqSlotTag)
749 }
751 {
753 reqSlotTag)
756 }
757
758 /*
759 * the specified request is released, so if its next request is only blocked by data
760 * buffer, it can be released now.
761 */
762 if ((targetReqSlotTag != REQ_SLOT_TAG_NONE) &&
764 {
765 SelectiveGetFromBlockedByBufDepReqQ(targetReqSlotTag);
766
767 if (reqPoolPtr->reqPool[targetReqSlotTag].reqType == REQ_TYPE_NVME_DMA)
768 {
769 IssueNvmeDmaReq(targetReqSlotTag);
770 PutToNvmeDmaReqQ(targetReqSlotTag);
771 }
772 else if (reqPoolPtr->reqPool[targetReqSlotTag].reqType == REQ_TYPE_NAND)
773 {
774 if (reqPoolPtr->reqPool[targetReqSlotTag].reqOpt.nandAddr == REQ_OPT_NAND_ADDR_VSA)
775 {
777 chNo = Vdie2PchTranslation(dieNo);
778 wayNo = Vdie2PwayTranslation(dieNo);
779 }
780 else
781 assert(!"[WARNING] Not supported reqOpt-nandAddress [WARNING]");
782
783 // check the row address dependency if needed
784 if (reqPoolPtr->reqPool[targetReqSlotTag].reqOpt.rowAddrDependencyCheck ==
786 {
787 rowAddrDepCheckReport = CheckRowAddrDep(targetReqSlotTag, ROW_ADDR_DEPENDENCY_CHECK_OPT_RELEASE);
788
789 if (rowAddrDepCheckReport == ROW_ADDR_DEPENDENCY_REPORT_PASS)
790 PutToNandReqQ(targetReqSlotTag, chNo, wayNo);
791 else if (rowAddrDepCheckReport == ROW_ADDR_DEPENDENCY_REPORT_BLOCKED)
792 PutToBlockedByRowAddrDepReqQ(targetReqSlotTag, chNo, wayNo);
793 else
794 assert(!"[WARNING] Not supported report [WARNING]");
795 }
796 else if (reqPoolPtr->reqPool[targetReqSlotTag].reqOpt.rowAddrDependencyCheck ==
798 PutToNandReqQ(targetReqSlotTag, chNo, wayNo);
799 else
800 assert(!"[WARNING] Not supported reqOpt [WARNING]");
801 }
802 }
803}
P_TEMPORARY_DATA_BUF_MAP tempDataBufMapPtr
Definition: data_buffer.c:53
void PutToNandReqQ(unsigned int reqSlotTag, unsigned chNo, unsigned wayNo)
Add the given request to nandReqQ of the specified die.
void PutToNvmeDmaReqQ(unsigned int reqSlotTag)
Add the given request to the NVMe DMA request queue and update its status.
void PutToBlockedByRowAddrDepReqQ(unsigned int reqSlotTag, unsigned int chNo, unsigned int wayNo)
Add the given request to blockedByRowAddrDepReqQ.
void SelectiveGetFromBlockedByBufDepReqQ(unsigned int reqSlotTag)
Remove the given request from the blockedByBufDepReqQ.
#define REQ_QUEUE_TYPE_BLOCKED_BY_BUF_DEP
#define REQ_TYPE_NVME_DMA
#define REQ_OPT_DATA_BUF_TEMP_ENTRY
#define REQ_OPT_ROW_ADDR_DEPENDENCY_NONE
void IssueNvmeDmaReq(unsigned int reqSlotTag)
Allocate data buffer for the specified DMA request and inform the controller.
unsigned int CheckRowAddrDep(unsigned int reqSlotTag, unsigned int checkRowAddrDepOpt)
Check if this NAND request has the row address dependency problem.
unsigned int blockingReqTail
Definition: data_buffer.h:107
unsigned int nextBlockingReq
unsigned int reqQueueType
unsigned int blockingReqTail
Definition: data_buffer.h:166
TEMPORARY_DATA_BUF_ENTRY tempDataBuf[AVAILABLE_TEMPORARY_DATA_BUFFER_ENTRY_COUNT]
Definition: data_buffer.h:179
Here is the call graph for this function:
Here is the caller graph for this function:

◆ ReleaseBlockedByRowAddrDepReq()

void ReleaseBlockedByRowAddrDepReq ( unsigned int  chNo,
unsigned int  wayNo 
)

Update the row address dependency of all the requests on the specified die.

Traverse the blockedByRowAddrDepReqQ of the specified die, and then recheck the row address dependency for all the requests on that die. When a request is found that it can pass the dependency check, it will be dispatched (move to the NAND request queue).

By updating the row address dependency info, some requests on the target die may be released.

See also
CheckRowAddrDep().
Parameters
chNoThe channel number of the specified die.
wayNoThe way number of the specified die.

Definition at line 820 of file request_transform.c.

821{
822 unsigned int reqSlotTag, nextReq, rowAddrDepCheckReport;
823
824 reqSlotTag = blockedByRowAddrDepReqQ[chNo][wayNo].headReq;
825
826 while (reqSlotTag != REQ_SLOT_TAG_NONE)
827 {
828 nextReq = reqPoolPtr->reqPool[reqSlotTag].nextReq;
829
831 {
832 rowAddrDepCheckReport = CheckRowAddrDep(reqSlotTag, ROW_ADDR_DEPENDENCY_CHECK_OPT_RELEASE);
833
834 if (rowAddrDepCheckReport == ROW_ADDR_DEPENDENCY_REPORT_PASS)
835 {
836 SelectiveGetFromBlockedByRowAddrDepReqQ(reqSlotTag, chNo, wayNo);
837 PutToNandReqQ(reqSlotTag, chNo, wayNo);
838 }
839 else if (rowAddrDepCheckReport == ROW_ADDR_DEPENDENCY_REPORT_BLOCKED)
840 {
841 // pass, go to while loop
842 }
843 else
844 assert(!"[WARNING] Not supported report [WARNING]");
845 }
846 else
847 assert(!"[WARNING] Not supported reqOpt [WARNING]");
848
849 reqSlotTag = nextReq;
850 }
851}
void SelectiveGetFromBlockedByRowAddrDepReqQ(unsigned int reqSlotTag, unsigned int chNo, unsigned int wayNo)
Remove the given request from the blockedByRowAddrDepReqQ.
BLOCKED_BY_ROW_ADDR_DEPENDENCY_REQUEST_QUEUE blockedByRowAddrDepReqQ[USER_CHANNELS][USER_WAYS]
unsigned int nextReq
Here is the call graph for this function:
Here is the caller graph for this function:

◆ ReqTransNvmeToSlice()

void ReqTransNvmeToSlice ( unsigned int  cmdSlotTag,
unsigned int  startLba,
unsigned int  nlb,
unsigned int  cmdCode 
)

Split NVMe command into slice requests.

Note
The unit of the given startLba and nlb is NVMe block, not NAND block.

To get the starting LSA of this NVMe command, we need to divide the given startLba by NVME_BLOCKS_PER_SLICE which indicates that how many NVMe blocks can be merged into a slice request.

To get the number of NAND blocks needed by this NVMe command, we should first align the starting NVMe block address startLba to slice 0, then convert the ending NVMe block address (startLba % NVME_BLOCKS_PER_SLICE + requestedNvmeBlock) to LSA, then the result indicates the number of slice requests needed by this NVMe command.

Note
Accroding to the NVMe spec, NLB is a 0's based value, so we should increase the requestedNvmeBlock by 1 to get the real number of NVMe blocks to be read/written by this NVMe command.

Now the address translation part is finished and we can start to split the NVMe command into slice requests. The splitting process can be separated into 3 steps:

  1. Fill the remaining NVMe blocks in first slice request (head)

    Since the startLba may not perfectly align to the first NVMe block of first slice command, we should access the trailing N NVMe blocks in the first slice request, where N is the number of misaligned NVMe blocks in the first slice requests.

  2. Generate slice requests for the aligned NVMe blocks (body)

    General case. The number of the NVMe blocks to be filled by these slice requests is exactly NVME_BLOCKS_PER_SLICE. So here just simply use a loop to generate same slice requests.

  3. Generate slice request for the remaining NVMe blocks (tail)

    Similar to the first step, but here we need to access the first K NVMe blocks in the last slice request, where K is the number of remaining NVMe blocks in this slice request.

    Todo:
    generalize the three steps
Parameters
cmdSlotTag
Todo:
//TODO
Parameters
startLbaaddress of the first logical NVMe block to read/write.
nlbnumber of logical NVMe blocks to read/write.
cmdCodeopcode of the given NVMe command.

Definition at line 123 of file request_transform.c.

124{
125 unsigned int reqSlotTag, requestedNvmeBlock, tempNumOfNvmeBlock, transCounter, tempLsa, loop, nvmeBlockOffset,
126 nvmeDmaStartIndex, reqCode;
127
128 requestedNvmeBlock = nlb + 1;
129 transCounter = 0;
130 nvmeDmaStartIndex = 0;
131 tempLsa = startLba / NVME_BLOCKS_PER_SLICE;
132 loop = ((startLba % NVME_BLOCKS_PER_SLICE) + requestedNvmeBlock) / NVME_BLOCKS_PER_SLICE;
133
134 // translate the opcode for NVMe command into that for slice requests.
135 if (cmdCode == IO_NVM_WRITE)
136 reqCode = REQ_CODE_WRITE;
137 else if (cmdCode == IO_NVM_READ)
138 reqCode = REQ_CODE_READ;
139 else
140 assert(!"[WARNING] Not supported command code [WARNING]");
141
142 // first transform
143 nvmeBlockOffset = (startLba % NVME_BLOCKS_PER_SLICE);
144 if (loop)
145 tempNumOfNvmeBlock = NVME_BLOCKS_PER_SLICE - nvmeBlockOffset;
146 else
147 tempNumOfNvmeBlock = requestedNvmeBlock;
148
149 reqSlotTag = GetFromFreeReqQ();
150
152 reqPoolPtr->reqPool[reqSlotTag].reqCode = reqCode;
153 reqPoolPtr->reqPool[reqSlotTag].nvmeCmdSlotTag = cmdSlotTag;
154 reqPoolPtr->reqPool[reqSlotTag].logicalSliceAddr = tempLsa;
155 reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.startIndex = nvmeDmaStartIndex;
156 reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.nvmeBlockOffset = nvmeBlockOffset;
157 reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.numOfNvmeBlock = tempNumOfNvmeBlock;
158
159 PutToSliceReqQ(reqSlotTag);
160
161 tempLsa++;
162 transCounter++;
163 nvmeDmaStartIndex += tempNumOfNvmeBlock;
164
165 // transform continue
166 while (transCounter < loop)
167 {
168 nvmeBlockOffset = 0;
169 tempNumOfNvmeBlock = NVME_BLOCKS_PER_SLICE;
170
171 reqSlotTag = GetFromFreeReqQ();
172
174 reqPoolPtr->reqPool[reqSlotTag].reqCode = reqCode;
175 reqPoolPtr->reqPool[reqSlotTag].nvmeCmdSlotTag = cmdSlotTag;
176 reqPoolPtr->reqPool[reqSlotTag].logicalSliceAddr = tempLsa;
177 reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.startIndex = nvmeDmaStartIndex;
178 reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.nvmeBlockOffset = nvmeBlockOffset;
179 reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.numOfNvmeBlock = tempNumOfNvmeBlock;
180
181 PutToSliceReqQ(reqSlotTag);
182
183 tempLsa++;
184 transCounter++;
185 nvmeDmaStartIndex += tempNumOfNvmeBlock;
186 }
187
188 // last transform
189 nvmeBlockOffset = 0;
190 tempNumOfNvmeBlock = (startLba + requestedNvmeBlock) % NVME_BLOCKS_PER_SLICE;
191 if ((tempNumOfNvmeBlock == 0) || (loop == 0))
192 return;
193
194 reqSlotTag = GetFromFreeReqQ();
195
197 reqPoolPtr->reqPool[reqSlotTag].reqCode = reqCode;
198 reqPoolPtr->reqPool[reqSlotTag].nvmeCmdSlotTag = cmdSlotTag;
199 reqPoolPtr->reqPool[reqSlotTag].logicalSliceAddr = tempLsa;
200 reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.startIndex = nvmeDmaStartIndex;
201 reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.nvmeBlockOffset = nvmeBlockOffset;
202 reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.numOfNvmeBlock = tempNumOfNvmeBlock;
203
204 PutToSliceReqQ(reqSlotTag);
205}
#define NVME_BLOCKS_PER_SLICE
Definition: ftl_config.h:217
#define IO_NVM_READ
Definition: nvme.h:85
#define IO_NVM_WRITE
Definition: nvme.h:84
void PutToSliceReqQ(unsigned int reqSlotTag)
Add the given request to the slice request queue.
#define REQ_TYPE_SLICE
unsigned int numOfNvmeBlock
unsigned int nvmeBlockOffset
Here is the call graph for this function:
Here is the caller graph for this function:

◆ ReqTransSliceToLowLevel()

void ReqTransSliceToLowLevel ( )

Data Buffer Manager. Handle all the pending slice requests.

This function will repeat the following steps until all the pending slice requests are consumed:

  1. Select a slice request from the slice request queue sliceReqQ.
  2. Allocate a data buffer entry for the request and generate flash requests if needed.

    Warning
    Why no need to modify logicalSliceAddr and generate flash request when buffer hit? data cache hit??
  3. Generate NVMe transfer/receive request for read/write request.

    Warning
    Why mark the data buffer dirty for write request?
  4. Dispatch the transfer/receive request by calling SelectLowLevelReqQ().
Note
This function is currently only called after handle_nvme_io_cmd() during the process of handling NVMe I/O commands in nvme_main.c.

Definition at line 323 of file request_transform.c.

324{
325 unsigned int reqSlotTag, dataBufEntry;
326
327 // consume all pending slice requests in slice request queue
329 {
330 // get the request pool entry index of the slice request
331 reqSlotTag = GetFromSliceReqQ();
332 if (reqSlotTag == REQ_SLOT_TAG_FAIL)
333 return;
334
335 /*
336 * In current implementation, the data buffer to be used is determined on the
337 * `logicalSliceAddr` of this request, so the data buffer may already be allocated
338 * before and so we can simply reuse that data buffer.
339 *
340 * If the data buffer not exists, we must allocate a data buffer entry by calling
341 * `AllocateDataBuf()` and initialize the newly created data buffer.
342 */
343 dataBufEntry = CheckDataBufHit(reqSlotTag);
344 if (dataBufEntry != DATA_BUF_FAIL)
345 {
346 // data buffer hit
347 reqPoolPtr->reqPool[reqSlotTag].dataBufInfo.entry = dataBufEntry;
348 }
349 else
350 {
351 // data buffer miss, allocate a new buffer entry
352 dataBufEntry = AllocateDataBuf();
353 reqPoolPtr->reqPool[reqSlotTag].dataBufInfo.entry = dataBufEntry;
354
355 // initialize the newly allocated data buffer entry for this request
356 EvictDataBufEntry(reqSlotTag);
357 dataBufMapPtr->dataBuf[dataBufEntry].logicalSliceAddr =
359 PutToDataBufHashList(dataBufEntry);
360
361 /*
362 * The allocated buffer will be used to store the data to be sent to host, or
363 * received from the host. So before transfering the data to host, we need to
364 * call the function `DataReadFromNand()` to read the desired data to buffer.
365 */
366 if (reqPoolPtr->reqPool[reqSlotTag].reqCode == REQ_CODE_READ)
367 DataReadFromNand(reqSlotTag);
368 else if (reqPoolPtr->reqPool[reqSlotTag].reqCode == REQ_CODE_WRITE)
369 // in case of not overwriting a whole page, read current page content for migration
371 // for read modify write
372 DataReadFromNand(reqSlotTag);
373 }
374
375 // generate NVMe request by replacing the slice request entry directly
376 if (reqPoolPtr->reqPool[reqSlotTag].reqCode == REQ_CODE_WRITE)
377 {
378 dataBufMapPtr->dataBuf[dataBufEntry].dirty = DATA_BUF_DIRTY;
380 }
381 else if (reqPoolPtr->reqPool[reqSlotTag].reqCode == REQ_CODE_READ)
383 else
384 assert(!"[WARNING] Not supported reqCode. [WARNING]");
385
388
389 UpdateDataBufEntryInfoBlockingReq(dataBufEntry, reqSlotTag);
390 SelectLowLevelReqQ(reqSlotTag);
391 }
392}
void PutToDataBufHashList(unsigned int bufEntry)
Insert the given data buffer entry into the hash table.
Definition: data_buffer.c:319
unsigned int CheckDataBufHit(unsigned int reqSlotTag)
Get the data buffer entry index of the given request.
Definition: data_buffer.c:127
unsigned int AllocateDataBuf()
Retrieve a LRU data buffer entry from the LRU list.
Definition: data_buffer.c:220
#define DATA_BUF_FAIL
Definition: data_buffer.h:58
SLICE_REQUEST_QUEUE sliceReqQ
unsigned int GetFromSliceReqQ()
Get a slice request from the slice request queue.
#define REQ_SLOT_TAG_FAIL
void EvictDataBufEntry(unsigned int originReqSlotTag)
Clear the specified data buffer entry and sync dirty data if needed.
void DataReadFromNand(unsigned int originReqSlotTag)
Generate and dispatch a flash read request for the given slice request.
unsigned int headReq
Definition: request_queue.h:60
Here is the call graph for this function:
Here is the caller graph for this function:

◆ SelectLowLevelReqQ()

void SelectLowLevelReqQ ( unsigned int  reqSlotTag)

Dispatch given NVMe/NAND request to corresponding request queue.

This function is in charge of issuing the given NVMe/NAND request. But before issuing the request, we should first make sure that this request is safe to be issued.

We first need to check whether this request is blocked by any other request that uses the same data buffer (check UpdateDataBufEntryInfoBlockingReq() for details).

  • If the request is not blocked by the blocking request queue, we can start issuing the request now, but NVMe/NAND request have different process:
    • For a NVNe DMA request (Tx from/to data buffer to/from host), we can just issue the request and wait for completion.
    • For a NAND request, we must do something before issuing the request:

      However, for NAND requests, since there may be some dependency problems between the requests (e.g., ERASE cannot be simply reordered before READ), we must check this kind of dependency problems (called "row address dependency" here) before dispatching the NAND requests by using the function CheckRowAddrDep().

      Once it was confirmed to have no row address dependency problem on this request, the request can then be dispatched; otherwise, the request should be blocked and inserted to the row address dependency queue.

      Note
      In current implementation, the address format of the request to be check must be VSA. So, for requests that using the physical address, the check option should be set to REQ_OPT_ROW_ADDR_DEPENDENCY_CHECK.
  • If the request is blocked by data buffer dependency

    The fw will try to recheck the data buffer dependency problem and release the request if possible, by calling UpdateRowAddrDepTableForBufBlockedReq(), which is similar to CheckRowAddrDep().

See also
CheckRowAddrDep(), UpdateDataBufEntryInfoBlockingReq(), NAND_INFO.
Parameters
reqSlotTagthe request pool index of the given request.

Definition at line 634 of file request_transform.c.

635{
636 unsigned int dieNo, chNo, wayNo, bufDepCheckReport, rowAddrDepCheckReport, rowAddrDepTableUpdateReport;
637
638 bufDepCheckReport = CheckBufDep(reqSlotTag);
639
640 if (bufDepCheckReport == BUF_DEPENDENCY_REPORT_PASS)
641 {
642 if (reqPoolPtr->reqPool[reqSlotTag].reqType == REQ_TYPE_NVME_DMA)
643 {
644 IssueNvmeDmaReq(reqSlotTag);
645 PutToNvmeDmaReqQ(reqSlotTag);
646 }
647 else if (reqPoolPtr->reqPool[reqSlotTag].reqType == REQ_TYPE_NAND)
648 {
649 // get physical organization info from VSA
651 {
653 chNo = Vdie2PchTranslation(dieNo);
654 wayNo = Vdie2PwayTranslation(dieNo);
655 }
656 // if the physical organization info is already specified, use it without translating
658 {
659 chNo = reqPoolPtr->reqPool[reqSlotTag].nandInfo.physicalCh;
660 wayNo = reqPoolPtr->reqPool[reqSlotTag].nandInfo.physicalWay;
661 }
662 else
663 assert(!"[WARNING] Not supported reqOpt-nandAddress [WARNING]");
664
665 // check row address dependency problem before dispatching
667 {
668 // NOTE: VSA translation in `CheckRowAddrDep()` could be skipped
669 rowAddrDepCheckReport = CheckRowAddrDep(reqSlotTag, ROW_ADDR_DEPENDENCY_CHECK_OPT_SELECT);
670
671 if (rowAddrDepCheckReport == ROW_ADDR_DEPENDENCY_REPORT_PASS)
672 PutToNandReqQ(reqSlotTag, chNo, wayNo);
673 else if (rowAddrDepCheckReport == ROW_ADDR_DEPENDENCY_REPORT_BLOCKED)
674 PutToBlockedByRowAddrDepReqQ(reqSlotTag, chNo, wayNo);
675 else
676 assert(!"[WARNING] Not supported report [WARNING]");
677 }
678 else if (reqPoolPtr->reqPool[reqSlotTag].reqOpt.rowAddrDependencyCheck ==
680 PutToNandReqQ(reqSlotTag, chNo, wayNo);
681 else
682 assert(!"[WARNING] Not supported reqOpt [WARNING]");
683 }
684 else
685 assert(!"[WARNING] Not supported reqType [WARNING]");
686 }
687 else if (bufDepCheckReport == BUF_DEPENDENCY_REPORT_BLOCKED)
688 {
689 if (reqPoolPtr->reqPool[reqSlotTag].reqType == REQ_TYPE_NAND)
691 {
692 // update row addr dep info and insert to `blockedByRowAddrDepReqQ` if needed
693 rowAddrDepTableUpdateReport = UpdateRowAddrDepTableForBufBlockedReq(reqSlotTag);
694
695 if (rowAddrDepTableUpdateReport == ROW_ADDR_DEPENDENCY_TABLE_UPDATE_REPORT_DONE)
696 {
697 // no row addr dep problem, so put to blockedByBufDepReqQ
698 }
699 else if (rowAddrDepTableUpdateReport == ROW_ADDR_DEPENDENCY_TABLE_UPDATE_REPORT_SYNC)
700 return;
701 else
702 assert(!"[WARNING] Not supported report [WARNING]");
703 }
704
705 PutToBlockedByBufDepReqQ(reqSlotTag);
706 }
707 else
708 assert(!"[WARNING] Not supported report [WARNING]");
709}
void PutToBlockedByBufDepReqQ(unsigned int reqSlotTag)
Add the given request to blockedByBufDepReqQ.
#define REQ_OPT_NAND_ADDR_PHY_ORG
unsigned int CheckBufDep(unsigned int reqSlotTag)
Check if this request has the buffer dependency problem.
unsigned int UpdateRowAddrDepTableForBufBlockedReq(unsigned int reqSlotTag)
Update the dependency info and dispatch the request if possible.
#define ROW_ADDR_DEPENDENCY_TABLE_UPDATE_REPORT_SYNC
#define ROW_ADDR_DEPENDENCY_TABLE_UPDATE_REPORT_DONE
unsigned int physicalWay
unsigned int physicalCh
Here is the call graph for this function:
Here is the caller graph for this function:

◆ UpdateRowAddrDepTableForBufBlockedReq()

unsigned int UpdateRowAddrDepTableForBufBlockedReq ( unsigned int  reqSlotTag)

Update the dependency info and dispatch the request if possible.

This function will update the data buffer and row address dependency info of the given request. If the given request is READ request and have no dependency problem, it will be dispatched in this function.

// FIXME: why only update buf dep for READ request? why ignore WRITE request?

Warning
Unlike CheckRowAddrDep(), this function may insert the given READ request into blockedByRowAddrDepReqQ and update relevant blocking info directly.
See also
CheckRowAddrDep(), SelectLowLevelReqQ().
Parameters
reqSlotTagRequest entry index of the request to be checked.
Returns
unsigned int The result of dependency check.

Definition at line 547 of file request_transform.c.

548{
549 unsigned int dieNo, chNo, wayNo, blockNo, pageNo, bufDepCheckReport;
550
552 {
554 chNo = Vdie2PchTranslation(dieNo);
555 wayNo = Vdie2PwayTranslation(dieNo);
558 }
559 else
560 assert(!"[WARNING] Not supported reqOpt-nandAddress [WARNING]");
561
562 if (reqPoolPtr->reqPool[reqSlotTag].reqCode == REQ_CODE_READ)
563 {
564 if (rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].blockedEraseReqFlag)
565 {
566 // release the blocked erase request on the target block
567 SyncReleaseEraseReq(chNo, wayNo, blockNo);
568
569 // check if this request is still blocked by buffer dependency
570 bufDepCheckReport = CheckBufDep(reqSlotTag);
571 if (bufDepCheckReport == BUF_DEPENDENCY_REPORT_PASS)
572 {
573 // check row address dependency problem
574 if (pageNo < rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].permittedProgPage)
575 PutToNandReqQ(reqSlotTag, chNo, wayNo);
576 else
577 {
578 rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].blockedReadReqCnt++;
579 PutToBlockedByRowAddrDepReqQ(reqSlotTag, chNo, wayNo);
580 }
581
583 }
584 }
585 // still blocked by data buffer
586 rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].blockedReadReqCnt++;
587 }
588 else if (reqPoolPtr->reqPool[reqSlotTag].reqCode == REQ_CODE_ERASE)
589 rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].blockedEraseReqFlag = 1;
590
592}
Here is the call graph for this function:
Here is the caller graph for this function:

Variable Documentation

◆ rowAddrDependencyTablePtr

P_ROW_ADDR_DEPENDENCY_TABLE rowAddrDependencyTablePtr

Definition at line 56 of file request_transform.c.