OpenSSD Cosmos+ Platform Firmware  0.0.2
The firmware of Cosmos+ OpenSSD Platform for TOSHIBA nand flash module.
request_transform.h File Reference
#include "ftl_config.h"
#include "nvme/nvme.h"
Include dependency graph for request_transform.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  _ROW_ADDR_DEPENDENCY_ENTRY
 The dependency info of this physical block. More...
 
struct  _ROW_ADDR_DEPENDENCY_TABLE
 The row address dependency table for all the user blocks. More...
 

Macros

#define NVME_COMMAND_AUTO_COMPLETION_OFF   0
 
#define NVME_COMMAND_AUTO_COMPLETION_ON   1
 
#define ROW_ADDR_DEPENDENCY_CHECK_OPT_SELECT   0
 
#define ROW_ADDR_DEPENDENCY_CHECK_OPT_RELEASE   1
 
#define BUF_DEPENDENCY_REPORT_BLOCKED   0
 
#define BUF_DEPENDENCY_REPORT_PASS   1
 
#define ROW_ADDR_DEPENDENCY_REPORT_BLOCKED   0
 
#define ROW_ADDR_DEPENDENCY_REPORT_PASS   1
 
#define ROW_ADDR_DEPENDENCY_TABLE_UPDATE_REPORT_DONE   0
 
#define ROW_ADDR_DEPENDENCY_TABLE_UPDATE_REPORT_SYNC   1
 
#define ROW_ADDR_DEP_ENTRY(iCh, iWay, iBlk)   (&rowAddrDependencyTablePtr->block[(iCh)][(iWay)][(iBlk)])
 

Typedefs

typedef struct _ROW_ADDR_DEPENDENCY_ENTRY ROW_ADDR_DEPENDENCY_ENTRY
 The dependency info of this physical block. More...
 
typedef struct _ROW_ADDR_DEPENDENCY_ENTRYP_ROW_ADDR_DEPENDENCY_ENTRY
 
typedef struct _ROW_ADDR_DEPENDENCY_TABLE ROW_ADDR_DEPENDENCY_TABLE
 The row address dependency table for all the user blocks. More...
 
typedef struct _ROW_ADDR_DEPENDENCY_TABLEP_ROW_ADDR_DEPENDENCY_TABLE
 

Functions

void InitDependencyTable ()
 
void ReqTransNvmeToSlice (unsigned int cmdSlotTag, unsigned int startLba, unsigned int nlb, unsigned int cmdCode)
 Split NVMe command into slice requests. More...
 
void ReqTransSliceToLowLevel ()
 Data Buffer Manager. Handle all the pending slice requests. More...
 
void IssueNvmeDmaReq (unsigned int reqSlotTag)
 Allocate data buffer for the specified DMA request and inform the controller. More...
 
void CheckDoneNvmeDmaReq ()
 
void SelectLowLevelReqQ (unsigned int reqSlotTag)
 Dispatch given NVMe/NAND request to corresponding request queue. More...
 
void ReleaseBlockedByBufDepReq (unsigned int reqSlotTag)
 Pop the specified request from the buffer dependency queue. More...
 
void ReleaseBlockedByRowAddrDepReq (unsigned int chNo, unsigned int wayNo)
 Update the row address dependency of all the requests on the specified die. More...
 

Variables

P_ROW_ADDR_DEPENDENCY_TABLE rowAddrDependencyTablePtr
 

Macro Definition Documentation

◆ BUF_DEPENDENCY_REPORT_BLOCKED

#define BUF_DEPENDENCY_REPORT_BLOCKED   0

Definition at line 58 of file request_transform.h.

◆ BUF_DEPENDENCY_REPORT_PASS

#define BUF_DEPENDENCY_REPORT_PASS   1

Definition at line 59 of file request_transform.h.

◆ NVME_COMMAND_AUTO_COMPLETION_OFF

#define NVME_COMMAND_AUTO_COMPLETION_OFF   0

Definition at line 52 of file request_transform.h.

◆ NVME_COMMAND_AUTO_COMPLETION_ON

#define NVME_COMMAND_AUTO_COMPLETION_ON   1

Definition at line 53 of file request_transform.h.

◆ ROW_ADDR_DEP_ENTRY

#define ROW_ADDR_DEP_ENTRY (   iCh,
  iWay,
  iBlk 
)    (&rowAddrDependencyTablePtr->block[(iCh)][(iWay)][(iBlk)])

Definition at line 112 of file request_transform.h.

◆ ROW_ADDR_DEPENDENCY_CHECK_OPT_RELEASE

#define ROW_ADDR_DEPENDENCY_CHECK_OPT_RELEASE   1

Definition at line 56 of file request_transform.h.

◆ ROW_ADDR_DEPENDENCY_CHECK_OPT_SELECT

#define ROW_ADDR_DEPENDENCY_CHECK_OPT_SELECT   0

Definition at line 55 of file request_transform.h.

◆ ROW_ADDR_DEPENDENCY_REPORT_BLOCKED

#define ROW_ADDR_DEPENDENCY_REPORT_BLOCKED   0

Definition at line 61 of file request_transform.h.

◆ ROW_ADDR_DEPENDENCY_REPORT_PASS

#define ROW_ADDR_DEPENDENCY_REPORT_PASS   1

Definition at line 62 of file request_transform.h.

◆ ROW_ADDR_DEPENDENCY_TABLE_UPDATE_REPORT_DONE

#define ROW_ADDR_DEPENDENCY_TABLE_UPDATE_REPORT_DONE   0

Definition at line 64 of file request_transform.h.

◆ ROW_ADDR_DEPENDENCY_TABLE_UPDATE_REPORT_SYNC

#define ROW_ADDR_DEPENDENCY_TABLE_UPDATE_REPORT_SYNC   1

Definition at line 65 of file request_transform.h.

Typedef Documentation

◆ P_ROW_ADDR_DEPENDENCY_ENTRY

◆ P_ROW_ADDR_DEPENDENCY_TABLE

◆ ROW_ADDR_DEPENDENCY_ENTRY

The dependency info of this physical block.

To ensure the integrity of the data will not affected by the scheduler, the fw should maintain some information for each flash block to avoid situations such as the erase request was being reorderd before the read requests.

Note
important for scheduling
See also
CheckRowAddrDep(), UpdateRowAddrDepTableForBufBlockedReq().

◆ ROW_ADDR_DEPENDENCY_TABLE

The row address dependency table for all the user blocks.

See also
ROW_ADDR_DEPENDENCY_ENTRY.

Function Documentation

◆ CheckDoneNvmeDmaReq()

void CheckDoneNvmeDmaReq ( )

Definition at line 918 of file request_transform.c.

919{
920 unsigned int reqSlotTag, prevReq;
921 unsigned int rxDone, txDone;
922
923 reqSlotTag = nvmeDmaReqQ.tailReq;
924 rxDone = 0;
925 txDone = 0;
926
927 while (reqSlotTag != REQ_SLOT_TAG_NONE)
928 {
929 prevReq = reqPoolPtr->reqPool[reqSlotTag].prevReq;
930
931 if (reqPoolPtr->reqPool[reqSlotTag].reqCode == REQ_CODE_RxDMA)
932 {
933 if (!rxDone)
936
937 if (rxDone)
938 SelectiveGetFromNvmeDmaReqQ(reqSlotTag);
939 }
940 else
941 {
942 if (!txDone)
945
946 if (txDone)
947 SelectiveGetFromNvmeDmaReqQ(reqSlotTag);
948 }
949
950 reqSlotTag = prevReq;
951 }
952}
unsigned int check_auto_rx_dma_partial_done(unsigned int tailIndex, unsigned int tailAssistIndex)
Definition: host_lld.c:524
unsigned int check_auto_tx_dma_partial_done(unsigned int tailIndex, unsigned int tailAssistIndex)
Definition: host_lld.c:485
NVME_DMA_REQUEST_QUEUE nvmeDmaReqQ
void SelectiveGetFromNvmeDmaReqQ(unsigned int reqSlotTag)
Move the specified entry from the nvmeDmaReqQ to the freeReqQ.
P_REQ_POOL reqPoolPtr
#define REQ_SLOT_TAG_NONE
#define REQ_CODE_RxDMA
unsigned int reqTail
unsigned int overFlowCnt
SSD_REQ_FORMAT reqPool[AVAILABLE_OUNTSTANDING_REQ_COUNT]
unsigned int reqCode
NVME_DMA_INFO nvmeDmaInfo
unsigned int prevReq
Here is the call graph for this function:
Here is the caller graph for this function:

◆ InitDependencyTable()

void InitDependencyTable ( )

Definition at line 58 of file request_transform.c.

59{
60 unsigned int blockNo, wayNo, chNo;
62
63 for (blockNo = 0; blockNo < MAIN_BLOCKS_PER_DIE; blockNo++)
64 {
65 for (wayNo = 0; wayNo < USER_WAYS; wayNo++)
66 {
67 for (chNo = 0; chNo < USER_CHANNELS; chNo++)
68 {
69 rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].permittedProgPage = 0;
70 rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].blockedReadReqCnt = 0;
71 rowAddrDependencyTablePtr->block[chNo][wayNo][blockNo].blockedEraseReqFlag = 0;
72 }
73 }
74 }
75}
#define USER_CHANNELS
Definition: ftl_config.h:207
#define MAIN_BLOCKS_PER_DIE
Definition: ftl_config.h:162
#define USER_WAYS
Definition: ftl_config.h:208
#define ROW_ADDR_DEPENDENCY_TABLE_ADDR
Definition: memory_map.h:111
P_ROW_ADDR_DEPENDENCY_TABLE rowAddrDependencyTablePtr
struct _ROW_ADDR_DEPENDENCY_TABLE * P_ROW_ADDR_DEPENDENCY_TABLE
unsigned int blockedReadReqCnt
unsigned int permittedProgPage
unsigned int blockedEraseReqFlag
ROW_ADDR_DEPENDENCY_ENTRY block[USER_CHANNELS][USER_WAYS][MAIN_BLOCKS_PER_DIE]
Here is the caller graph for this function:

◆ IssueNvmeDmaReq()

void IssueNvmeDmaReq ( unsigned int  reqSlotTag)

Allocate data buffer for the specified DMA request and inform the controller.

This function is used for issuing a new DMA request, the DMA procedure can be split into 2 steps:

  1. Prepare a buffer based on the member dataBufFormat of the specified DMA request
  2. Inform NVMe controller

    For a DMA request, it might want to rx/tx a data whose size is larger than 4K which is the NVMe block size, so the firmware need to inform the NVMe controller for each NVMe block.

    The tail reg of the DMA queue will be updated during the set_auto_rx_dma() and set_auto_tx_dma(), so we need to update the nvmeDmaInfo.reqTail after issuing the DMA request.

Warning
For a DMA request, the buffer address generated by GenerateDataBufAddr() is chosen based on the REQ_OPT_DATA_BUF_ENTRY, however, since the size of a data entry is BYTES_PER_DATA_REGION_OF_SLICE (default 4), will the data buffer used by the DMA request overlap with other requests' data buffer if the numOfNvmeBlock of the DMA request is larger than NVME_BLOCKS_PER_PAGE?
Parameters
reqSlotTagthe request pool index of the given request.

Definition at line 878 of file request_transform.c.

879{
880 unsigned int devAddr, dmaIndex, numOfNvmeBlock;
881
882 dmaIndex = reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.startIndex;
883 devAddr = GenerateDataBufAddr(reqSlotTag);
884 numOfNvmeBlock = 0;
885
886 if (reqPoolPtr->reqPool[reqSlotTag].reqCode == REQ_CODE_RxDMA)
887 {
888 while (numOfNvmeBlock < reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.numOfNvmeBlock)
889 {
890 set_auto_rx_dma(reqPoolPtr->reqPool[reqSlotTag].nvmeCmdSlotTag, dmaIndex, devAddr,
892
893 numOfNvmeBlock++;
894 dmaIndex++;
895 devAddr += BYTES_PER_NVME_BLOCK;
896 }
899 }
900 else if (reqPoolPtr->reqPool[reqSlotTag].reqCode == REQ_CODE_TxDMA)
901 {
902 while (numOfNvmeBlock < reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.numOfNvmeBlock)
903 {
904 set_auto_tx_dma(reqPoolPtr->reqPool[reqSlotTag].nvmeCmdSlotTag, dmaIndex, devAddr,
906
907 numOfNvmeBlock++;
908 dmaIndex++;
909 devAddr += BYTES_PER_NVME_BLOCK;
910 }
913 }
914 else
915 assert(!"[WARNING] Not supported reqCode [WARNING]");
916}
#define BYTES_PER_NVME_BLOCK
Definition: ftl_config.h:194
HOST_DMA_STATUS g_hostDmaStatus
Definition: host_lld.c:63
void set_auto_rx_dma(unsigned int cmdSlotTag, unsigned int cmd4KBOffset, unsigned int devAddr, unsigned int autoCompletion)
Definition: host_lld.c:419
void set_auto_tx_dma(unsigned int cmdSlotTag, unsigned int cmd4KBOffset, unsigned int devAddr, unsigned int autoCompletion)
Definition: host_lld.c:385
HOST_DMA_ASSIST_STATUS g_hostDmaAssistStatus
Definition: host_lld.c:64
#define REQ_CODE_TxDMA
unsigned int GenerateDataBufAddr(unsigned int reqSlotTag)
Get the corresponding data buffer entry address of the given request.
#define NVME_COMMAND_AUTO_COMPLETION_ON
unsigned int autoDmaRxOverFlowCnt
Definition: host_lld.h:340
unsigned int autoDmaTxOverFlowCnt
Definition: host_lld.h:339
unsigned char autoDmaRx
Definition: host_lld.h:288
unsigned char autoDmaTx
Definition: host_lld.h:289
HOST_DMA_FIFO_CNT_REG fifoTail
Definition: host_lld.h:330
unsigned int startIndex
unsigned int nvmeCmdSlotTag
Here is the call graph for this function:
Here is the caller graph for this function:

◆ ReleaseBlockedByBufDepReq()

void ReleaseBlockedByBufDepReq ( unsigned int  reqSlotTag)

Pop the specified request from the buffer dependency queue.

In the current implementation, this function is only called after the specified request entry is moved to the free request queue, which means that the previous request has released the data buffer entry it occupied. Therefore, we now need to update the relevant information about the data buffer dependency.

Warning
Only the NAND requests with VSA can use this function.
Since the struct DATA_BUF_ENTRY maintains only the tail of blocked requests, the specified request should be the head of blocked requests to ensure that the request order is not messed up.
See also
UpdateDataBufEntryInfoBlockingReq(), CheckBufDep(), SSD_REQ_FORMAT.
Parameters
reqSlotTagThe request entry index of the given request

Definition at line 729 of file request_transform.c.

730{
731 unsigned int targetReqSlotTag, dieNo, chNo, wayNo, rowAddrDepCheckReport;
732
733 // split the blocking request queue into 2 parts at `reqSlotTag`
734 targetReqSlotTag = REQ_SLOT_TAG_NONE;
736 {
737 targetReqSlotTag = reqPoolPtr->reqPool[reqSlotTag].nextBlockingReq;
740 }
741
742 // reset blocking request queue if it is the last request blocked by the buffer dependency
744 {
746 reqSlotTag)
749 }
751 {
753 reqSlotTag)
756 }
757
758 /*
759 * the specified request is released, so if its next request is only blocked by data
760 * buffer, it can be released now.
761 */
762 if ((targetReqSlotTag != REQ_SLOT_TAG_NONE) &&
764 {
765 SelectiveGetFromBlockedByBufDepReqQ(targetReqSlotTag);
766
767 if (reqPoolPtr->reqPool[targetReqSlotTag].reqType == REQ_TYPE_NVME_DMA)
768 {
769 IssueNvmeDmaReq(targetReqSlotTag);
770 PutToNvmeDmaReqQ(targetReqSlotTag);
771 }
772 else if (reqPoolPtr->reqPool[targetReqSlotTag].reqType == REQ_TYPE_NAND)
773 {
774 if (reqPoolPtr->reqPool[targetReqSlotTag].reqOpt.nandAddr == REQ_OPT_NAND_ADDR_VSA)
775 {
777 chNo = Vdie2PchTranslation(dieNo);
778 wayNo = Vdie2PwayTranslation(dieNo);
779 }
780 else
781 assert(!"[WARNING] Not supported reqOpt-nandAddress [WARNING]");
782
783 // check the row address dependency if needed
784 if (reqPoolPtr->reqPool[targetReqSlotTag].reqOpt.rowAddrDependencyCheck ==
786 {
787 rowAddrDepCheckReport = CheckRowAddrDep(targetReqSlotTag, ROW_ADDR_DEPENDENCY_CHECK_OPT_RELEASE);
788
789 if (rowAddrDepCheckReport == ROW_ADDR_DEPENDENCY_REPORT_PASS)
790 PutToNandReqQ(targetReqSlotTag, chNo, wayNo);
791 else if (rowAddrDepCheckReport == ROW_ADDR_DEPENDENCY_REPORT_BLOCKED)
792 PutToBlockedByRowAddrDepReqQ(targetReqSlotTag, chNo, wayNo);
793 else
794 assert(!"[WARNING] Not supported report [WARNING]");
795 }
796 else if (reqPoolPtr->reqPool[targetReqSlotTag].reqOpt.rowAddrDependencyCheck ==
798 PutToNandReqQ(targetReqSlotTag, chNo, wayNo);
799 else
800 assert(!"[WARNING] Not supported reqOpt [WARNING]");
801 }
802 }
803}
#define Vdie2PchTranslation(dieNo)
#define Vsa2VdieTranslation(virtualSliceAddr)
#define Vdie2PwayTranslation(dieNo)
P_TEMPORARY_DATA_BUF_MAP tempDataBufMapPtr
Definition: data_buffer.c:53
P_DATA_BUF_MAP dataBufMapPtr
Definition: data_buffer.c:50
void PutToNandReqQ(unsigned int reqSlotTag, unsigned chNo, unsigned wayNo)
Add the given request to nandReqQ of the specified die.
void PutToNvmeDmaReqQ(unsigned int reqSlotTag)
Add the given request to the NVMe DMA request queue and update its status.
void PutToBlockedByRowAddrDepReqQ(unsigned int reqSlotTag, unsigned int chNo, unsigned int wayNo)
Add the given request to blockedByRowAddrDepReqQ.
void SelectiveGetFromBlockedByBufDepReqQ(unsigned int reqSlotTag)
Remove the given request from the blockedByBufDepReqQ.
#define REQ_QUEUE_TYPE_BLOCKED_BY_BUF_DEP
#define REQ_TYPE_NVME_DMA
#define REQ_OPT_DATA_BUF_TEMP_ENTRY
#define REQ_OPT_ROW_ADDR_DEPENDENCY_CHECK
#define REQ_OPT_NAND_ADDR_VSA
#define REQ_OPT_DATA_BUF_ENTRY
#define REQ_TYPE_NAND
#define REQ_OPT_ROW_ADDR_DEPENDENCY_NONE
void IssueNvmeDmaReq(unsigned int reqSlotTag)
Allocate data buffer for the specified DMA request and inform the controller.
unsigned int CheckRowAddrDep(unsigned int reqSlotTag, unsigned int checkRowAddrDepOpt)
Check if this NAND request has the row address dependency problem.
#define ROW_ADDR_DEPENDENCY_REPORT_PASS
#define ROW_ADDR_DEPENDENCY_CHECK_OPT_RELEASE
#define ROW_ADDR_DEPENDENCY_REPORT_BLOCKED
unsigned int blockingReqTail
Definition: data_buffer.h:107
unsigned int entry
DATA_BUF_ENTRY dataBuf[AVAILABLE_DATA_BUFFER_ENTRY_COUNT]
Definition: data_buffer.h:122
unsigned int virtualSliceAddr
unsigned int dataBufFormat
Type of address stored in the SSD_REQ_FORMAT::dataBufInfo.
unsigned int nandAddr
Type of address stored in the SSD_REQ_FORMAT::nandInfo.
unsigned int rowAddrDependencyCheck
REQ_OPTION reqOpt
DATA_BUF_INFO dataBufInfo
NAND_INFO nandInfo
unsigned int reqType
unsigned int nextBlockingReq
unsigned int prevBlockingReq
unsigned int reqQueueType
unsigned int blockingReqTail
Definition: data_buffer.h:166
TEMPORARY_DATA_BUF_ENTRY tempDataBuf[AVAILABLE_TEMPORARY_DATA_BUFFER_ENTRY_COUNT]
Definition: data_buffer.h:179
Here is the call graph for this function:
Here is the caller graph for this function:

◆ ReleaseBlockedByRowAddrDepReq()

void ReleaseBlockedByRowAddrDepReq ( unsigned int  chNo,
unsigned int  wayNo 
)

Update the row address dependency of all the requests on the specified die.

Traverse the blockedByRowAddrDepReqQ of the specified die, and then recheck the row address dependency for all the requests on that die. When a request is found that it can pass the dependency check, it will be dispatched (move to the NAND request queue).

By updating the row address dependency info, some requests on the target die may be released.

See also
CheckRowAddrDep().
Parameters
chNoThe channel number of the specified die.
wayNoThe way number of the specified die.

Definition at line 820 of file request_transform.c.

821{
822 unsigned int reqSlotTag, nextReq, rowAddrDepCheckReport;
823
824 reqSlotTag = blockedByRowAddrDepReqQ[chNo][wayNo].headReq;
825
826 while (reqSlotTag != REQ_SLOT_TAG_NONE)
827 {
828 nextReq = reqPoolPtr->reqPool[reqSlotTag].nextReq;
829
831 {
832 rowAddrDepCheckReport = CheckRowAddrDep(reqSlotTag, ROW_ADDR_DEPENDENCY_CHECK_OPT_RELEASE);
833
834 if (rowAddrDepCheckReport == ROW_ADDR_DEPENDENCY_REPORT_PASS)
835 {
836 SelectiveGetFromBlockedByRowAddrDepReqQ(reqSlotTag, chNo, wayNo);
837 PutToNandReqQ(reqSlotTag, chNo, wayNo);
838 }
839 else if (rowAddrDepCheckReport == ROW_ADDR_DEPENDENCY_REPORT_BLOCKED)
840 {
841 // pass, go to while loop
842 }
843 else
844 assert(!"[WARNING] Not supported report [WARNING]");
845 }
846 else
847 assert(!"[WARNING] Not supported reqOpt [WARNING]");
848
849 reqSlotTag = nextReq;
850 }
851}
void SelectiveGetFromBlockedByRowAddrDepReqQ(unsigned int reqSlotTag, unsigned int chNo, unsigned int wayNo)
Remove the given request from the blockedByRowAddrDepReqQ.
BLOCKED_BY_ROW_ADDR_DEPENDENCY_REQUEST_QUEUE blockedByRowAddrDepReqQ[USER_CHANNELS][USER_WAYS]
unsigned int nextReq
Here is the call graph for this function:
Here is the caller graph for this function:

◆ ReqTransNvmeToSlice()

void ReqTransNvmeToSlice ( unsigned int  cmdSlotTag,
unsigned int  startLba,
unsigned int  nlb,
unsigned int  cmdCode 
)

Split NVMe command into slice requests.

Note
The unit of the given startLba and nlb is NVMe block, not NAND block.

To get the starting LSA of this NVMe command, we need to divide the given startLba by NVME_BLOCKS_PER_SLICE which indicates that how many NVMe blocks can be merged into a slice request.

To get the number of NAND blocks needed by this NVMe command, we should first align the starting NVMe block address startLba to slice 0, then convert the ending NVMe block address (startLba % NVME_BLOCKS_PER_SLICE + requestedNvmeBlock) to LSA, then the result indicates the number of slice requests needed by this NVMe command.

Note
Accroding to the NVMe spec, NLB is a 0's based value, so we should increase the requestedNvmeBlock by 1 to get the real number of NVMe blocks to be read/written by this NVMe command.

Now the address translation part is finished and we can start to split the NVMe command into slice requests. The splitting process can be separated into 3 steps:

  1. Fill the remaining NVMe blocks in first slice request (head)

    Since the startLba may not perfectly align to the first NVMe block of first slice command, we should access the trailing N NVMe blocks in the first slice request, where N is the number of misaligned NVMe blocks in the first slice requests.

  2. Generate slice requests for the aligned NVMe blocks (body)

    General case. The number of the NVMe blocks to be filled by these slice requests is exactly NVME_BLOCKS_PER_SLICE. So here just simply use a loop to generate same slice requests.

  3. Generate slice request for the remaining NVMe blocks (tail)

    Similar to the first step, but here we need to access the first K NVMe blocks in the last slice request, where K is the number of remaining NVMe blocks in this slice request.

    Todo:
    generalize the three steps
Parameters
cmdSlotTag
Todo:
//TODO
Parameters
startLbaaddress of the first logical NVMe block to read/write.
nlbnumber of logical NVMe blocks to read/write.
cmdCodeopcode of the given NVMe command.

Definition at line 123 of file request_transform.c.

124{
125 unsigned int reqSlotTag, requestedNvmeBlock, tempNumOfNvmeBlock, transCounter, tempLsa, loop, nvmeBlockOffset,
126 nvmeDmaStartIndex, reqCode;
127
128 requestedNvmeBlock = nlb + 1;
129 transCounter = 0;
130 nvmeDmaStartIndex = 0;
131 tempLsa = startLba / NVME_BLOCKS_PER_SLICE;
132 loop = ((startLba % NVME_BLOCKS_PER_SLICE) + requestedNvmeBlock) / NVME_BLOCKS_PER_SLICE;
133
134 // translate the opcode for NVMe command into that for slice requests.
135 if (cmdCode == IO_NVM_WRITE)
136 reqCode = REQ_CODE_WRITE;
137 else if (cmdCode == IO_NVM_READ)
138 reqCode = REQ_CODE_READ;
139 else
140 assert(!"[WARNING] Not supported command code [WARNING]");
141
142 // first transform
143 nvmeBlockOffset = (startLba % NVME_BLOCKS_PER_SLICE);
144 if (loop)
145 tempNumOfNvmeBlock = NVME_BLOCKS_PER_SLICE - nvmeBlockOffset;
146 else
147 tempNumOfNvmeBlock = requestedNvmeBlock;
148
149 reqSlotTag = GetFromFreeReqQ();
150
152 reqPoolPtr->reqPool[reqSlotTag].reqCode = reqCode;
153 reqPoolPtr->reqPool[reqSlotTag].nvmeCmdSlotTag = cmdSlotTag;
154 reqPoolPtr->reqPool[reqSlotTag].logicalSliceAddr = tempLsa;
155 reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.startIndex = nvmeDmaStartIndex;
156 reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.nvmeBlockOffset = nvmeBlockOffset;
157 reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.numOfNvmeBlock = tempNumOfNvmeBlock;
158
159 PutToSliceReqQ(reqSlotTag);
160
161 tempLsa++;
162 transCounter++;
163 nvmeDmaStartIndex += tempNumOfNvmeBlock;
164
165 // transform continue
166 while (transCounter < loop)
167 {
168 nvmeBlockOffset = 0;
169 tempNumOfNvmeBlock = NVME_BLOCKS_PER_SLICE;
170
171 reqSlotTag = GetFromFreeReqQ();
172
174 reqPoolPtr->reqPool[reqSlotTag].reqCode = reqCode;
175 reqPoolPtr->reqPool[reqSlotTag].nvmeCmdSlotTag = cmdSlotTag;
176 reqPoolPtr->reqPool[reqSlotTag].logicalSliceAddr = tempLsa;
177 reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.startIndex = nvmeDmaStartIndex;
178 reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.nvmeBlockOffset = nvmeBlockOffset;
179 reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.numOfNvmeBlock = tempNumOfNvmeBlock;
180
181 PutToSliceReqQ(reqSlotTag);
182
183 tempLsa++;
184 transCounter++;
185 nvmeDmaStartIndex += tempNumOfNvmeBlock;
186 }
187
188 // last transform
189 nvmeBlockOffset = 0;
190 tempNumOfNvmeBlock = (startLba + requestedNvmeBlock) % NVME_BLOCKS_PER_SLICE;
191 if ((tempNumOfNvmeBlock == 0) || (loop == 0))
192 return;
193
194 reqSlotTag = GetFromFreeReqQ();
195
197 reqPoolPtr->reqPool[reqSlotTag].reqCode = reqCode;
198 reqPoolPtr->reqPool[reqSlotTag].nvmeCmdSlotTag = cmdSlotTag;
199 reqPoolPtr->reqPool[reqSlotTag].logicalSliceAddr = tempLsa;
200 reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.startIndex = nvmeDmaStartIndex;
201 reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.nvmeBlockOffset = nvmeBlockOffset;
202 reqPoolPtr->reqPool[reqSlotTag].nvmeDmaInfo.numOfNvmeBlock = tempNumOfNvmeBlock;
203
204 PutToSliceReqQ(reqSlotTag);
205}
#define NVME_BLOCKS_PER_SLICE
Definition: ftl_config.h:217
#define IO_NVM_READ
Definition: nvme.h:85
#define IO_NVM_WRITE
Definition: nvme.h:84
unsigned int GetFromFreeReqQ()
Get a free request from the free request queue.
void PutToSliceReqQ(unsigned int reqSlotTag)
Add the given request to the slice request queue.
#define REQ_CODE_WRITE
#define REQ_CODE_READ
#define REQ_TYPE_SLICE
unsigned int numOfNvmeBlock
unsigned int nvmeBlockOffset
unsigned int logicalSliceAddr
Here is the call graph for this function:
Here is the caller graph for this function:

◆ ReqTransSliceToLowLevel()

void ReqTransSliceToLowLevel ( )

Data Buffer Manager. Handle all the pending slice requests.

This function will repeat the following steps until all the pending slice requests are consumed:

  1. Select a slice request from the slice request queue sliceReqQ.
  2. Allocate a data buffer entry for the request and generate flash requests if needed.

    Warning
    Why no need to modify logicalSliceAddr and generate flash request when buffer hit? data cache hit??
  3. Generate NVMe transfer/receive request for read/write request.

    Warning
    Why mark the data buffer dirty for write request?
  4. Dispatch the transfer/receive request by calling SelectLowLevelReqQ().
Note
This function is currently only called after handle_nvme_io_cmd() during the process of handling NVMe I/O commands in nvme_main.c.

Definition at line 323 of file request_transform.c.

324{
325 unsigned int reqSlotTag, dataBufEntry;
326
327 // consume all pending slice requests in slice request queue
329 {
330 // get the request pool entry index of the slice request
331 reqSlotTag = GetFromSliceReqQ();
332 if (reqSlotTag == REQ_SLOT_TAG_FAIL)
333 return;
334
335 /*
336 * In current implementation, the data buffer to be used is determined on the
337 * `logicalSliceAddr` of this request, so the data buffer may already be allocated
338 * before and so we can simply reuse that data buffer.
339 *
340 * If the data buffer not exists, we must allocate a data buffer entry by calling
341 * `AllocateDataBuf()` and initialize the newly created data buffer.
342 */
343 dataBufEntry = CheckDataBufHit(reqSlotTag);
344 if (dataBufEntry != DATA_BUF_FAIL)
345 {
346 // data buffer hit
347 reqPoolPtr->reqPool[reqSlotTag].dataBufInfo.entry = dataBufEntry;
348 }
349 else
350 {
351 // data buffer miss, allocate a new buffer entry
352 dataBufEntry = AllocateDataBuf();
353 reqPoolPtr->reqPool[reqSlotTag].dataBufInfo.entry = dataBufEntry;
354
355 // initialize the newly allocated data buffer entry for this request
356 EvictDataBufEntry(reqSlotTag);
357 dataBufMapPtr->dataBuf[dataBufEntry].logicalSliceAddr =
359 PutToDataBufHashList(dataBufEntry);
360
361 /*
362 * The allocated buffer will be used to store the data to be sent to host, or
363 * received from the host. So before transfering the data to host, we need to
364 * call the function `DataReadFromNand()` to read the desired data to buffer.
365 */
366 if (reqPoolPtr->reqPool[reqSlotTag].reqCode == REQ_CODE_READ)
367 DataReadFromNand(reqSlotTag);
368 else if (reqPoolPtr->reqPool[reqSlotTag].reqCode == REQ_CODE_WRITE)
369 // in case of not overwriting a whole page, read current page content for migration
371 // for read modify write
372 DataReadFromNand(reqSlotTag);
373 }
374
375 // generate NVMe request by replacing the slice request entry directly
376 if (reqPoolPtr->reqPool[reqSlotTag].reqCode == REQ_CODE_WRITE)
377 {
378 dataBufMapPtr->dataBuf[dataBufEntry].dirty = DATA_BUF_DIRTY;
380 }
381 else if (reqPoolPtr->reqPool[reqSlotTag].reqCode == REQ_CODE_READ)
383 else
384 assert(!"[WARNING] Not supported reqCode. [WARNING]");
385
388
389 UpdateDataBufEntryInfoBlockingReq(dataBufEntry, reqSlotTag);
390 SelectLowLevelReqQ(reqSlotTag);
391 }
392}
void PutToDataBufHashList(unsigned int bufEntry)
Insert the given data buffer entry into the hash table.
Definition: data_buffer.c:319
void UpdateDataBufEntryInfoBlockingReq(unsigned int bufEntry, unsigned int reqSlotTag)
Append the request to the blocking queue of the specified data buffer entry.
Definition: data_buffer.c:268
unsigned int CheckDataBufHit(unsigned int reqSlotTag)
Get the data buffer entry index of the given request.
Definition: data_buffer.c:127
unsigned int AllocateDataBuf()
Retrieve a LRU data buffer entry from the LRU list.
Definition: data_buffer.c:220
#define DATA_BUF_FAIL
Definition: data_buffer.h:58
#define DATA_BUF_DIRTY
Definition: data_buffer.h:59
SLICE_REQUEST_QUEUE sliceReqQ
unsigned int GetFromSliceReqQ()
Get a slice request from the slice request queue.
#define REQ_SLOT_TAG_FAIL
void EvictDataBufEntry(unsigned int originReqSlotTag)
Clear the specified data buffer entry and sync dirty data if needed.
void SelectLowLevelReqQ(unsigned int reqSlotTag)
Dispatch given NVMe/NAND request to corresponding request queue.
void DataReadFromNand(unsigned int originReqSlotTag)
Generate and dispatch a flash read request for the given slice request.
unsigned int dirty
Definition: data_buffer.h:110
unsigned int logicalSliceAddr
Definition: data_buffer.h:104
unsigned int headReq
Definition: request_queue.h:60
Here is the call graph for this function:
Here is the caller graph for this function:

◆ SelectLowLevelReqQ()

void SelectLowLevelReqQ ( unsigned int  reqSlotTag)

Dispatch given NVMe/NAND request to corresponding request queue.

This function is in charge of issuing the given NVMe/NAND request. But before issuing the request, we should first make sure that this request is safe to be issued.

We first need to check whether this request is blocked by any other request that uses the same data buffer (check UpdateDataBufEntryInfoBlockingReq() for details).

  • If the request is not blocked by the blocking request queue, we can start issuing the request now, but NVMe/NAND request have different process:
    • For a NVNe DMA request (Tx from/to data buffer to/from host), we can just issue the request and wait for completion.
    • For a NAND request, we must do something before issuing the request:

      However, for NAND requests, since there may be some dependency problems between the requests (e.g., ERASE cannot be simply reordered before READ), we must check this kind of dependency problems (called "row address dependency" here) before dispatching the NAND requests by using the function CheckRowAddrDep().

      Once it was confirmed to have no row address dependency problem on this request, the request can then be dispatched; otherwise, the request should be blocked and inserted to the row address dependency queue.

      Note
      In current implementation, the address format of the request to be check must be VSA. So, for requests that using the physical address, the check option should be set to REQ_OPT_ROW_ADDR_DEPENDENCY_CHECK.
  • If the request is blocked by data buffer dependency

    The fw will try to recheck the data buffer dependency problem and release the request if possible, by calling UpdateRowAddrDepTableForBufBlockedReq(), which is similar to CheckRowAddrDep().

See also
CheckRowAddrDep(), UpdateDataBufEntryInfoBlockingReq(), NAND_INFO.
Parameters
reqSlotTagthe request pool index of the given request.

Definition at line 634 of file request_transform.c.

635{
636 unsigned int dieNo, chNo, wayNo, bufDepCheckReport, rowAddrDepCheckReport, rowAddrDepTableUpdateReport;
637
638 bufDepCheckReport = CheckBufDep(reqSlotTag);
639
640 if (bufDepCheckReport == BUF_DEPENDENCY_REPORT_PASS)
641 {
642 if (reqPoolPtr->reqPool[reqSlotTag].reqType == REQ_TYPE_NVME_DMA)
643 {
644 IssueNvmeDmaReq(reqSlotTag);
645 PutToNvmeDmaReqQ(reqSlotTag);
646 }
647 else if (reqPoolPtr->reqPool[reqSlotTag].reqType == REQ_TYPE_NAND)
648 {
649 // get physical organization info from VSA
651 {
653 chNo = Vdie2PchTranslation(dieNo);
654 wayNo = Vdie2PwayTranslation(dieNo);
655 }
656 // if the physical organization info is already specified, use it without translating
658 {
659 chNo = reqPoolPtr->reqPool[reqSlotTag].nandInfo.physicalCh;
660 wayNo = reqPoolPtr->reqPool[reqSlotTag].nandInfo.physicalWay;
661 }
662 else
663 assert(!"[WARNING] Not supported reqOpt-nandAddress [WARNING]");
664
665 // check row address dependency problem before dispatching
667 {
668 // NOTE: VSA translation in `CheckRowAddrDep()` could be skipped
669 rowAddrDepCheckReport = CheckRowAddrDep(reqSlotTag, ROW_ADDR_DEPENDENCY_CHECK_OPT_SELECT);
670
671 if (rowAddrDepCheckReport == ROW_ADDR_DEPENDENCY_REPORT_PASS)
672 PutToNandReqQ(reqSlotTag, chNo, wayNo);
673 else if (rowAddrDepCheckReport == ROW_ADDR_DEPENDENCY_REPORT_BLOCKED)
674 PutToBlockedByRowAddrDepReqQ(reqSlotTag, chNo, wayNo);
675 else
676 assert(!"[WARNING] Not supported report [WARNING]");
677 }
678 else if (reqPoolPtr->reqPool[reqSlotTag].reqOpt.rowAddrDependencyCheck ==
680 PutToNandReqQ(reqSlotTag, chNo, wayNo);
681 else
682 assert(!"[WARNING] Not supported reqOpt [WARNING]");
683 }
684 else
685 assert(!"[WARNING] Not supported reqType [WARNING]");
686 }
687 else if (bufDepCheckReport == BUF_DEPENDENCY_REPORT_BLOCKED)
688 {
689 if (reqPoolPtr->reqPool[reqSlotTag].reqType == REQ_TYPE_NAND)
691 {
692 // update row addr dep info and insert to `blockedByRowAddrDepReqQ` if needed
693 rowAddrDepTableUpdateReport = UpdateRowAddrDepTableForBufBlockedReq(reqSlotTag);
694
695 if (rowAddrDepTableUpdateReport == ROW_ADDR_DEPENDENCY_TABLE_UPDATE_REPORT_DONE)
696 {
697 // no row addr dep problem, so put to blockedByBufDepReqQ
698 }
699 else if (rowAddrDepTableUpdateReport == ROW_ADDR_DEPENDENCY_TABLE_UPDATE_REPORT_SYNC)
700 return;
701 else
702 assert(!"[WARNING] Not supported report [WARNING]");
703 }
704
705 PutToBlockedByBufDepReqQ(reqSlotTag);
706 }
707 else
708 assert(!"[WARNING] Not supported report [WARNING]");
709}
void PutToBlockedByBufDepReqQ(unsigned int reqSlotTag)
Add the given request to blockedByBufDepReqQ.
#define REQ_OPT_NAND_ADDR_PHY_ORG
unsigned int CheckBufDep(unsigned int reqSlotTag)
Check if this request has the buffer dependency problem.
unsigned int UpdateRowAddrDepTableForBufBlockedReq(unsigned int reqSlotTag)
Update the dependency info and dispatch the request if possible.
#define BUF_DEPENDENCY_REPORT_PASS
#define ROW_ADDR_DEPENDENCY_TABLE_UPDATE_REPORT_SYNC
#define ROW_ADDR_DEPENDENCY_CHECK_OPT_SELECT
#define BUF_DEPENDENCY_REPORT_BLOCKED
#define ROW_ADDR_DEPENDENCY_TABLE_UPDATE_REPORT_DONE
unsigned int physicalWay
unsigned int physicalCh
Here is the call graph for this function:
Here is the caller graph for this function:

Variable Documentation

◆ rowAddrDependencyTablePtr

P_ROW_ADDR_DEPENDENCY_TABLE rowAddrDependencyTablePtr
extern

Definition at line 56 of file request_transform.c.