--- a/src/lte/model/rr-ff-mac-scheduler.cc Tue Nov 20 18:18:01 2012 +0100
+++ b/src/lte/model/rr-ff-mac-scheduler.cc Thu Nov 22 12:05:46 2012 +0100
@@ -19,17 +19,19 @@
*/
#ifdef __FreeBSD__
-#define log2(x) (log(x) / M_LN2)
+#define log2(x) (log (x) / M_LN2)
#endif /* __FreeBSD__ */
#include <ns3/log.h>
#include <ns3/pointer.h>
+#include <set>
#include <ns3/lte-amc.h>
#include <ns3/rr-ff-mac-scheduler.h>
#include <ns3/simulator.h>
#include <ns3/lte-common.h>
#include <ns3/lte-vendor-specific-parameters.h>
+#include <ns3/boolean.h>
NS_LOG_COMPONENT_DEFINE ("RrFfMacScheduler");
@@ -234,6 +236,12 @@
RrFfMacScheduler::DoDispose ()
{
NS_LOG_FUNCTION (this);
+ m_dlHarqProcessesDciBuffer.clear ();
+ m_dlHarqProcessesRlcPduListBuffer.clear ();
+ m_dlInfoListBuffered.clear ();
+ m_ulHarqCurrentProcessId.clear ();
+ m_ulHarqProcessesStatus.clear ();
+ m_ulHarqProcessesDciBuffer.clear ();
delete m_cschedSapProvider;
delete m_schedSapProvider;
}
@@ -249,7 +257,12 @@
UintegerValue (1000),
MakeUintegerAccessor (&RrFfMacScheduler::m_cqiTimersThreshold),
MakeUintegerChecker<uint32_t> ())
- ;
+ .AddAttribute ("HarqEnabled",
+ "Activate/Deactivate the HARQ [by default is active].",
+ BooleanValue (true),
+ MakeBooleanAccessor (&RrFfMacScheduler::m_harqOn),
+ MakeBooleanChecker ())
+ ;
return tid;
}
@@ -296,9 +309,29 @@
{
NS_LOG_FUNCTION (this << " RNTI " << params.m_rnti << " txMode " << (uint16_t)params.m_transmissionMode);
std::map <uint16_t,uint8_t>::iterator it = m_uesTxMode.find (params.m_rnti);
- if (it==m_uesTxMode.end ())
+ if (it == m_uesTxMode.end ())
{
m_uesTxMode.insert (std::pair <uint16_t, double> (params.m_rnti, params.m_transmissionMode));
+ // generate HARQ buffers
+ m_dlHarqCurrentProcessId.insert (std::pair <uint16_t,uint8_t > (params.m_rnti, 0));
+ DlHarqProcessesStatus_t dlHarqPrcStatus;
+ dlHarqPrcStatus.resize (8,0);
+ m_dlHarqProcessesStatus.insert (std::pair <uint16_t, DlHarqProcessesStatus_t> (params.m_rnti, dlHarqPrcStatus));
+ DlHarqProcessesDciBuffer_t dlHarqdci;
+ dlHarqdci.resize (8);
+ m_dlHarqProcessesDciBuffer.insert (std::pair <uint16_t, DlHarqProcessesDciBuffer_t> (params.m_rnti, dlHarqdci));
+ DlHarqRlcPduListBuffer_t dlHarqRlcPdu;
+ dlHarqRlcPdu.resize (2);
+ dlHarqRlcPdu.at (0).resize (8);
+ dlHarqRlcPdu.at (1).resize (8);
+ m_dlHarqProcessesRlcPduListBuffer.insert (std::pair <uint16_t, DlHarqRlcPduListBuffer_t> (params.m_rnti, dlHarqRlcPdu));
+ m_ulHarqCurrentProcessId.insert (std::pair <uint16_t,uint8_t > (params.m_rnti, 0));
+ UlHarqProcessesStatus_t ulHarqPrcStatus;
+ ulHarqPrcStatus.resize (8,0);
+ m_ulHarqProcessesStatus.insert (std::pair <uint16_t, UlHarqProcessesStatus_t> (params.m_rnti, ulHarqPrcStatus));
+ UlHarqProcessesDciBuffer_t ulHarqdci;
+ ulHarqdci.resize (8);
+ m_ulHarqProcessesDciBuffer.insert (std::pair <uint16_t, UlHarqProcessesDciBuffer_t> (params.m_rnti, ulHarqdci));
}
else
{
@@ -398,7 +431,81 @@
bool
RrFfMacScheduler::SortRlcBufferReq (FfMacSchedSapProvider::SchedDlRlcBufferReqParameters i,FfMacSchedSapProvider::SchedDlRlcBufferReqParameters j)
{
- return (i.m_rnti<j.m_rnti);
+ return (i.m_rnti < j.m_rnti);
+}
+
+
+uint8_t
+RrFfMacScheduler::HarqProcessAvailability (uint16_t rnti)
+{
+ NS_LOG_FUNCTION (this << rnti);
+
+ std::map <uint16_t, uint8_t>::iterator it = m_dlHarqCurrentProcessId.find (rnti);
+ if (it == m_dlHarqCurrentProcessId.end ())
+ {
+ NS_FATAL_ERROR ("No Process Id found for this RNTI " << rnti);
+ }
+ std::map <uint16_t, DlHarqProcessesStatus_t>::iterator itStat = m_dlHarqProcessesStatus.find (rnti);
+ if (itStat == m_dlHarqProcessesStatus.end ())
+ {
+ NS_FATAL_ERROR ("No Process Id Statusfound for this RNTI " << rnti);
+ }
+ uint8_t i = (*it).second;
+ do
+ {
+ i = (i + 1) % HARQ_PROC_NUM;
+ }
+ while ( ((*itStat).second.at (i) != 0)&&(i != (*it).second));
+ if ((*itStat).second.at (i) == 0)
+ {
+ return (true);
+ }
+ else
+ {
+ return (false); // return a not valid harq proc id
+ }
+}
+
+
+
+uint8_t
+RrFfMacScheduler::UpdateHarqProcessId (uint16_t rnti)
+{
+ NS_LOG_FUNCTION (this << rnti);
+
+
+ if (m_harqOn == false)
+ {
+ return (0);
+ }
+
+ std::map <uint16_t, uint8_t>::iterator it = m_dlHarqCurrentProcessId.find (rnti);
+ if (it == m_dlHarqCurrentProcessId.end ())
+ {
+ NS_FATAL_ERROR ("No Process Id found for this RNTI " << rnti);
+ }
+ std::map <uint16_t, DlHarqProcessesStatus_t>::iterator itStat = m_dlHarqProcessesStatus.find (rnti);
+ if (itStat == m_dlHarqProcessesStatus.end ())
+ {
+ NS_FATAL_ERROR ("No Process Id Statusfound for this RNTI " << rnti);
+ }
+ uint8_t i = (*it).second;
+ do
+ {
+ i = (i + 1) % HARQ_PROC_NUM;
+ }
+ while ( ((*itStat).second.at (i) != 0)&&(i != (*it).second));
+ if ((*itStat).second.at (i) == 0)
+ {
+ (*it).second = i;
+ (*itStat).second.at (i) = 1;
+ }
+ else
+ {
+ return (9); // return a not valid harq proc id
+ }
+
+ return ((*it).second);
}
@@ -407,8 +514,267 @@
{
NS_LOG_FUNCTION (this << " DL Frame no. " << (params.m_sfnSf >> 4) << " subframe no. " << (0xF & params.m_sfnSf));
// API generated by RLC for triggering the scheduling of a DL subframe
-
+
RefreshDlCqiMaps ();
+ int rbgSize = GetRbgSize (m_cschedCellConfig.m_dlBandwidth);
+ int rbgNum = m_cschedCellConfig.m_dlBandwidth / rbgSize;
+ FfMacSchedSapUser::SchedDlConfigIndParameters ret;
+
+ // Generate RBGs map
+ std::vector <bool> rbgMap;
+ uint16_t rbgAllocatedNum = 0;
+ std::set <uint16_t> rntiAllocated;
+ rbgMap.resize (m_cschedCellConfig.m_dlBandwidth / rbgSize, false);
+
+ // Process DL HARQ feedback
+ // retrieve past HARQ retx buffered
+ if (m_dlInfoListBuffered.size () > 0)
+ {
+ if (params.m_dlInfoList.size () > 0)
+ {
+ NS_LOG_INFO (this << " Received DL-HARQ feedback");
+ m_dlInfoListBuffered.insert (m_dlInfoListBuffered.end (), params.m_dlInfoList.begin (), params.m_dlInfoList.end ());
+ }
+ }
+ else
+ {
+ if (params.m_dlInfoList.size () > 0)
+ {
+ m_dlInfoListBuffered = params.m_dlInfoList;
+ }
+ }
+ if (m_harqOn == false)
+ {
+ // Ignore HARQ feedback
+ m_dlInfoListBuffered.clear ();
+ }
+ std::vector <struct DlInfoListElement_s> dlInfoListUntxed;
+ for (uint8_t i = 0; i < m_dlInfoListBuffered.size (); i++)
+ {
+ std::set <uint16_t>::iterator itRnti = rntiAllocated.find (m_dlInfoListBuffered.at (i).m_rnti);
+ if (itRnti != rntiAllocated.end ())
+ {
+ // RNTI already allocated for retx
+ continue;
+ }
+ uint8_t nLayers = m_dlInfoListBuffered.at (i).m_harqStatus.size ();
+ std::vector <bool> retx;
+ NS_LOG_INFO (this << " Processing DLHARQ feedback");
+ if (nLayers == 1)
+ {
+ retx.push_back (m_dlInfoListBuffered.at (i).m_harqStatus.at (0) == DlInfoListElement_s::NACK);
+ retx.push_back (false);
+ }
+ else
+ {
+ retx.push_back (m_dlInfoListBuffered.at (i).m_harqStatus.at (0) == DlInfoListElement_s::NACK);
+ retx.push_back (m_dlInfoListBuffered.at (i).m_harqStatus.at (1) == DlInfoListElement_s::NACK);
+ }
+ if (retx.at (0) || retx.at (1))
+ {
+ // retrieve HARQ process information
+ uint16_t rnti = m_dlInfoListBuffered.at (i).m_rnti;
+ uint8_t harqId = m_dlInfoListBuffered.at (i).m_harqProcessId;
+ NS_LOG_INFO (this << " HARQ retx RNTI " << rnti << " harqId " << (uint16_t)harqId);
+ std::map <uint16_t, DlHarqProcessesDciBuffer_t>::iterator itHarq = m_dlHarqProcessesDciBuffer.find (rnti);
+ if (itHarq == m_dlHarqProcessesDciBuffer.end ())
+ {
+ NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << rnti);
+ }
+
+ DlDciListElement_s dci = (*itHarq).second.at (harqId);
+ int rv = 0;
+ if (dci.m_rv.size () == 1)
+ {
+ rv = dci.m_rv.at (0);
+ }
+ else
+ {
+ rv = (dci.m_rv.at (0) > dci.m_rv.at (1) ? dci.m_rv.at (0) : dci.m_rv.at (1));
+ }
+
+ if (rv == 3)
+ {
+ // maximum number of retx reached -> drop process
+ NS_LOG_INFO ("Max number of retransmissions reached -> drop process");
+ std::map <uint16_t, DlHarqProcessesStatus_t>::iterator it = m_dlHarqProcessesStatus.find (rnti);
+ if (it == m_dlHarqProcessesStatus.end ())
+ {
+ NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << m_dlInfoListBuffered.at (i).m_rnti);
+ }
+ (*it).second.at (harqId) = 0;
+ std::map <uint16_t, DlHarqRlcPduListBuffer_t>::iterator itRlcPdu = m_dlHarqProcessesRlcPduListBuffer.find (rnti);
+ if (itRlcPdu == m_dlHarqProcessesRlcPduListBuffer.end ())
+ {
+ NS_FATAL_ERROR ("Unable to find RlcPdcList in HARQ buffer for RNTI " << m_dlInfoListBuffered.at (i).m_rnti);
+ }
+ for (uint16_t k = 0; k < (*itRlcPdu).second.size (); k++)
+ {
+ (*itRlcPdu).second.at (k).at (harqId).clear ();
+ }
+ continue;
+ }
+ // check the feasibility of retransmitting on the same RBGs
+ // translate the DCI to Spectrum framework
+ std::vector <int> dciRbg;
+ uint32_t mask = 0x1;
+ NS_LOG_INFO ("Original RBGs " << dci.m_rbBitmap << " rnti " << dci.m_rnti);
+ for (int j = 0; j < 32; j++)
+ {
+ if (((dci.m_rbBitmap & mask) >> j) == 1)
+ {
+ dciRbg.push_back (j);
+ NS_LOG_INFO ("\t" << j);
+ }
+ mask = (mask << 1);
+ }
+ bool free = true;
+ for (uint8_t j = 0; j < dciRbg.size (); j++)
+ {
+ if (rbgMap.at (dciRbg.at (j)) == true)
+ {
+ free = false;
+ break;
+ }
+ }
+ if (free)
+ {
+ // use the same RBGs for the retx
+ // reserve RBGs
+ for (uint8_t j = 0; j < dciRbg.size (); j++)
+ {
+ rbgMap.at (dciRbg.at (j)) = true;
+ NS_LOG_INFO ("RBG " << dciRbg.at (j) << " assigned");
+ rbgAllocatedNum++;
+ }
+
+ NS_LOG_INFO (this << " Send retx in the same RBGs");
+ }
+ else
+ {
+ // find RBGs for sending HARQ retx
+ uint8_t j = 0;
+ uint8_t rbgId = (dciRbg.at (dciRbg.size () - 1) + 1) % rbgNum;
+ uint8_t startRbg = dciRbg.at (dciRbg.size () - 1);
+ std::vector <bool> rbgMapCopy = rbgMap;
+ while ((j < dciRbg.size ())&&(startRbg != rbgId))
+ {
+ if (rbgMapCopy.at (rbgId) == false)
+ {
+ rbgMapCopy.at (rbgId) = true;
+ dciRbg.at (j) = rbgId;
+ j++;
+ }
+ rbgId++;
+ }
+ if (j == dciRbg.size ())
+ {
+ // find new RBGs -> update DCI map
+ uint32_t rbgMask = 0;
+ for (uint16_t k = 0; k < dciRbg.size (); k++)
+ {
+ rbgMask = rbgMask + (0x1 << dciRbg.at (k));
+ NS_LOG_INFO (this << " New allocated RBG " << dciRbg.at (k));
+ rbgAllocatedNum++;
+ }
+ dci.m_rbBitmap = rbgMask;
+ rbgMap = rbgMapCopy;
+ }
+ else
+ {
+ // HARQ retx cannot be performed on this TTI -> store it
+ dlInfoListUntxed.push_back (params.m_dlInfoList.at (i));
+ NS_LOG_INFO (this << " No resource for this retx -> buffer it");
+ }
+ }
+ // retrieve RLC PDU list for retx TBsize and update DCI
+ BuildDataListElement_s newEl;
+ std::map <uint16_t, DlHarqRlcPduListBuffer_t>::iterator itRlcPdu = m_dlHarqProcessesRlcPduListBuffer.find (rnti);
+ if (itRlcPdu == m_dlHarqProcessesRlcPduListBuffer.end ())
+ {
+ NS_FATAL_ERROR ("Unable to find RlcPdcList in HARQ buffer for RNTI " << rnti);
+ }
+ for (uint8_t j = 0; j < nLayers; j++)
+ {
+ if (retx.at (j))
+ {
+ if (j >= dci.m_ndi.size ())
+ {
+ // for avoiding errors in MIMO transient phases
+ dci.m_ndi.push_back (0);
+ dci.m_rv.push_back (0);
+ dci.m_mcs.push_back (0);
+ dci.m_tbsSize.push_back (0);
+ NS_LOG_INFO (this << " layer " << (uint16_t)j << " no txed (MIMO transition)");
+
+ }
+ else
+ {
+ dci.m_ndi.at (j) = 0;
+ dci.m_rv.at (j)++;
+ (*itHarq).second.at (harqId).m_rv.at (j)++;
+ NS_LOG_INFO (this << " layer " << (uint16_t)j << " RV " << (uint16_t)dci.m_rv.at (j));
+ }
+ }
+ else
+ {
+ // empty TB of layer j
+ dci.m_ndi.at (j) = 0;
+ dci.m_rv.at (j) = 0;
+ dci.m_mcs.at (j) = 0;
+ dci.m_tbsSize.at (j) = 0;
+ NS_LOG_INFO (this << " layer " << (uint16_t)j << " no retx");
+ }
+ }
+
+ for (uint16_t k = 0; k < (*itRlcPdu).second.at (0).at (dci.m_harqProcess).size (); k++)
+ {
+ std::vector <struct RlcPduListElement_s> rlcPduListPerLc;
+ for (uint8_t j = 0; j < nLayers; j++)
+ {
+ if (retx.at (j))
+ {
+ if (j < dci.m_ndi.size ())
+ {
+ rlcPduListPerLc.push_back ((*itRlcPdu).second.at (j).at (dci.m_harqProcess).at (k));
+ }
+ }
+ }
+
+ if (rlcPduListPerLc.size () > 0)
+ {
+ newEl.m_rlcPduList.push_back (rlcPduListPerLc);
+ }
+ }
+ newEl.m_rnti = rnti;
+ newEl.m_dci = dci;
+ (*itHarq).second.at (harqId).m_rv = dci.m_rv;
+ ret.m_buildDataList.push_back (newEl);
+ rntiAllocated.insert (rnti);
+ }
+ else
+ {
+ // update HARQ process status
+ NS_LOG_INFO (this << " HARQ ACK UE " << m_dlInfoListBuffered.at (i).m_rnti);
+ std::map <uint16_t, DlHarqProcessesStatus_t>::iterator it = m_dlHarqProcessesStatus.find (m_dlInfoListBuffered.at (i).m_rnti);
+ if (it == m_dlHarqProcessesStatus.end ())
+ {
+ NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << m_dlInfoListBuffered.at (i).m_rnti);
+ }
+ (*it).second.at (m_dlInfoListBuffered.at (i).m_harqProcessId) = 0;
+ std::map <uint16_t, DlHarqRlcPduListBuffer_t>::iterator itRlcPdu = m_dlHarqProcessesRlcPduListBuffer.find (m_dlInfoListBuffered.at (i).m_rnti);
+ if (itRlcPdu == m_dlHarqProcessesRlcPduListBuffer.end ())
+ {
+ NS_FATAL_ERROR ("Unable to find RlcPdcList in HARQ buffer for RNTI " << m_dlInfoListBuffered.at (i).m_rnti);
+ }
+ for (uint16_t k = 0; k < (*itRlcPdu).second.size (); k++)
+ {
+ (*itRlcPdu).second.at (k).at (m_dlInfoListBuffered.at (i).m_harqProcessId).clear ();
+ }
+ }
+ }
+ m_dlInfoListBuffered.clear ();
+ m_dlInfoListBuffered = dlInfoListUntxed;
// Get the actual active flows (queue!=0)
std::list<FfMacSchedSapProvider::SchedDlRlcBufferReqParameters>::iterator it;
@@ -421,9 +787,13 @@
{
NS_LOG_LOGIC (this << " User " << (*it).m_rnti << " LC " << (uint16_t)(*it).m_logicalChannelIdentity);
// remove old entries of this UE-LC
- if ( ((*it).m_rlcTransmissionQueueSize > 0)
- || ((*it).m_rlcRetransmissionQueueSize > 0)
- || ((*it).m_rlcStatusPduSize > 0) )
+ std::set <uint16_t>::iterator itRnti = rntiAllocated.find ((*it).m_rnti);
+ if ( (((*it).m_rlcTransmissionQueueSize > 0)
+ || ((*it).m_rlcRetransmissionQueueSize > 0)
+ || ((*it).m_rlcStatusPduSize > 0))
+ && (itRnti == rntiAllocated.end ()) // UE must not be allocated for HARQ retx
+ && (HarqProcessAvailability ((*it).m_rnti)) ) // UE needs HARQ proc free
+
{
std::map <uint16_t,uint8_t>::iterator itCqi = m_p10CqiRxed.find ((*it).m_rnti);
uint8_t cqi = 0;
@@ -449,30 +819,31 @@
lcActivesPerRnti.insert (std::pair<uint16_t, uint8_t > ((*it).m_rnti, 1));
nTbs++;
}
-
+
}
}
}
-
+
if (nflows == 0)
{
+ if (ret.m_buildDataList.size () > 0)
+ {
+ m_schedSapUser->SchedDlConfigInd (ret);
+ }
return;
}
// Divide the resource equally among the active users according to
// Resource allocation type 0 (see sec 7.1.6.1 of 36.213)
- int rbgSize = GetRbgSize (m_cschedCellConfig.m_dlBandwidth);
- int rbgNum = m_cschedCellConfig.m_dlBandwidth / rbgSize;
- int rbgPerTb = rbgNum / nTbs;
+
+ int rbgPerTb = (rbgNum - rbgAllocatedNum) / nTbs;
if (rbgPerTb == 0)
{
rbgPerTb = 1; // at least 1 rbg per TB (till available resource)
}
int rbgAllocated = 0;
- FfMacSchedSapUser::SchedDlConfigIndParameters ret;
- // round robin assignment to all UE-LC registered starting from the subsequent of the one
- // served last scheduling trigger
- //NS_LOG_DEBUG (this << " next to be served " << m_nextRntiDl << " nflows " << nflows);
+ // round robin assignment to all UEs registered starting from the subsequent of the one
+ // served last scheduling trigger event
if (m_nextRntiDl != 0)
{
for (it = m_rlcBufferReq.begin (); it != m_rlcBufferReq.end (); it++)
@@ -497,9 +868,10 @@
do
{
itLcRnti = lcActivesPerRnti.find ((*it).m_rnti);
- if (itLcRnti == lcActivesPerRnti.end ())
+ std::set <uint16_t>::iterator itRnti = rntiAllocated.find ((*it).m_rnti);
+ if ((itLcRnti == lcActivesPerRnti.end ())||(itRnti != rntiAllocated.end ()))
{
- // skip this entry
+ // skip this entry (no active queue or yet allocated for HARQ)
it++;
if (it == m_rlcBufferReq.end ())
{
@@ -509,7 +881,7 @@
continue;
}
itTxMode = m_uesTxMode.find ((*it).m_rnti);
- if (itTxMode == m_uesTxMode.end())
+ if (itTxMode == m_uesTxMode.end ())
{
NS_FATAL_ERROR ("No Transmission Mode info on user " << (*it).m_rnti);
}
@@ -521,10 +893,11 @@
// create the DlDciListElement_s
DlDciListElement_s newDci;
newDci.m_rnti = (*it).m_rnti;
+ newDci.m_harqProcess = UpdateHarqProcessId ((*it).m_rnti);
newDci.m_resAlloc = 0;
newDci.m_rbBitmap = 0;
std::map <uint16_t,uint8_t>::iterator itCqi = m_p10CqiRxed.find (newEl.m_rnti);
- for (uint8_t i = 0; i < nLayer; i++)
+ for (uint8_t i = 0; i < nLayer; i++)
{
if (itCqi == m_p10CqiRxed.end ())
{
@@ -535,14 +908,9 @@
newDci.m_mcs.push_back ( m_amc->GetMcsFromCqi ((*itCqi).second) );
}
}
- // group the LCs of this RNTI
- std::vector <struct RlcPduListElement_s> newRlcPduLe;
-// int totRbg = lcNum * rbgPerFlow;
-// totRbg = rbgNum / nTbs;
int tbSize = (m_amc->GetTbSizeFromMcs (newDci.m_mcs.at (0), rbgPerTb * rbgSize) / 8);
- NS_LOG_DEBUG (this << " DL - Allocate user " << newEl.m_rnti << " LCs " << (uint16_t)(*itLcRnti).second << " bytes " << tbSize << " PRBs " << rbgAllocated * rbgSize << "..." << (rbgAllocated* rbgSize) + (rbgPerTb * rbgSize) - 1 << " mcs " << (uint16_t) newDci.m_mcs.at (0) << " layers " << nLayer);
uint16_t rlcPduSize = tbSize / lcNum;
- while (lcNum>0)
+ while (lcNum > 0)
{
if ( ((*it).m_rlcTransmissionQueueSize > 0)
|| ((*it).m_rlcRetransmissionQueueSize > 0)
@@ -553,10 +921,22 @@
{
RlcPduListElement_s newRlcEl;
newRlcEl.m_logicalChannelIdentity = (*it).m_logicalChannelIdentity;
- // NS_LOG_DEBUG (this << "LCID " << (uint32_t) newRlcEl.m_logicalChannelIdentity << " size " << rlcPduSize << " ID " << (*it).m_rnti << " layer " << (uint16_t)j);
+ NS_LOG_INFO (this << "LCID " << (uint32_t) newRlcEl.m_logicalChannelIdentity << " size " << rlcPduSize << " ID " << (*it).m_rnti << " layer " << (uint16_t)j);
newRlcEl.m_size = rlcPduSize;
UpdateDlRlcBufferInfo ((*it).m_rnti, newRlcEl.m_logicalChannelIdentity, rlcPduSize);
newRlcPduLe.push_back (newRlcEl);
+
+ if (m_harqOn == true)
+ {
+ // store RLC PDU list for HARQ
+ std::map <uint16_t, DlHarqRlcPduListBuffer_t>::iterator itRlcPdu = m_dlHarqProcessesRlcPduListBuffer.find ((*it).m_rnti);
+ if (itRlcPdu == m_dlHarqProcessesRlcPduListBuffer.end ())
+ {
+ NS_FATAL_ERROR ("Unable to find RlcPdcList in HARQ buffer for RNTI " << (*it).m_rnti);
+ }
+ (*itRlcPdu).second.at (j).at (newDci.m_harqProcess).push_back (newRlcEl);
+ }
+
}
newEl.m_rlcPduList.push_back (newRlcPduLe);
lcNum--;
@@ -569,9 +949,18 @@
}
}
uint32_t rbgMask = 0;
- for (int i = 0; i < rbgPerTb; i++)
+ uint16_t i = 0;
+ NS_LOG_INFO (this << " DL - Allocate user " << newEl.m_rnti << " LCs " << (uint16_t)(*itLcRnti).second << " bytes " << tbSize << " mcs " << (uint16_t) newDci.m_mcs.at (0) << " harqId " << (uint16_t)newDci.m_harqProcess << " layers " << nLayer);
+ NS_LOG_DEBUG ("RBG:");
+ while (i < rbgPerTb)
{
- rbgMask = rbgMask + (0x1 << rbgAllocated);
+ if (rbgMap.at (rbgAllocated) == false)
+ {
+ rbgMask = rbgMask + (0x1 << rbgAllocated);
+ NS_LOG_DEBUG ("\t " << rbgAllocated);
+ i++;
+ rbgMap.at (rbgAllocated) = true;
+ }
rbgAllocated++;
}
newDci.m_rbBitmap = rbgMask; // (32 bit bitmap see 7.1.6 of 36.213)
@@ -579,20 +968,25 @@
for (int i = 0; i < nLayer; i++)
{
newDci.m_tbsSize.push_back (tbSize);
- newDci.m_ndi.push_back (1); // TBD (new data indicator)
- newDci.m_rv.push_back (0); // TBD (redundancy version)
+ newDci.m_ndi.push_back (1);
+ newDci.m_rv.push_back (0);
}
newEl.m_dci = newDci;
+ if (m_harqOn == true)
+ {
+ // store DCI for HARQ
+ std::map <uint16_t, DlHarqProcessesDciBuffer_t>::iterator itDci = m_dlHarqProcessesDciBuffer.find (newEl.m_rnti);
+ if (itDci == m_dlHarqProcessesDciBuffer.end ())
+ {
+ NS_FATAL_ERROR ("Unable to find RNTI entry in DCI HARQ buffer for RNTI " << (*it).m_rnti);
+ }
+ (*itDci).second.at (newDci.m_harqProcess) = newDci;
+ }
// ...more parameters -> ignored in this version
-
-
-
- newEl.m_rlcPduList.push_back (newRlcPduLe);
ret.m_buildDataList.push_back (newEl);
if (rbgAllocated == rbgNum)
{
- //NS_LOG_DEBUG (this << " FULL " << (*it).m_rnti);
m_nextRntiDl = (*it).m_rnti; // store last RNTI served
break; // no more RGB to be allocated
}
@@ -646,7 +1040,7 @@
}
else if ( params.m_cqiList.at (i).m_cqiType == CqiListElement_s::A30 )
{
- // subband CQI reporting high layer configured
+ // subband CQI reporting high layer configured
// Not used by RR Scheduler
}
else
@@ -665,14 +1059,99 @@
RefreshUlCqiMaps ();
-
- std::map <uint16_t,uint32_t>::iterator it;
+
+ // Generate RBs map
+ FfMacSchedSapUser::SchedUlConfigIndParameters ret;
+ std::vector <bool> rbMap;
+ uint16_t rbAllocatedNum = 0;
+ std::set <uint16_t> rntiAllocated;
+ std::vector <uint16_t> rbgAllocationMap;
+ rbgAllocationMap.resize (m_cschedCellConfig.m_ulBandwidth, 0);
+
+ rbMap.resize (m_cschedCellConfig.m_ulBandwidth, false);
+
+ if (m_harqOn == true)
+ {
+ // Process UL HARQ feedback
+ // update UL HARQ proc id
+ std::map <uint16_t, uint8_t>::iterator itProcId;
+ for (itProcId = m_ulHarqCurrentProcessId.begin (); itProcId != m_ulHarqCurrentProcessId.end (); itProcId++)
+ {
+ (*itProcId).second = ((*itProcId).second + 1) % HARQ_PROC_NUM;
+ }
+ for (uint8_t i = 0; i < params.m_ulInfoList.size (); i++)
+ {
+ if (params.m_ulInfoList.at (i).m_receptionStatus == UlInfoListElement_s::NotOk)
+ {
+ // retx correspondent block: retrieve the UL-DCI
+ uint16_t rnti = params.m_ulInfoList.at (i).m_rnti;
+ itProcId = m_ulHarqCurrentProcessId.find (rnti);
+ if (itProcId == m_ulHarqCurrentProcessId.end ())
+ {
+ NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << rnti);
+ }
+ uint8_t harqId = (uint8_t)((*itProcId).second - HARQ_PERIOD) % HARQ_PROC_NUM;
+ NS_LOG_INFO (this << " UL-HARQ retx RNTI " << rnti << " harqId " << (uint16_t)harqId);
+ std::map <uint16_t, UlHarqProcessesDciBuffer_t>::iterator itHarq = m_ulHarqProcessesDciBuffer.find (rnti);
+ if (itHarq == m_ulHarqProcessesDciBuffer.end ())
+ {
+ NS_FATAL_ERROR ("No info find in UL-HARQ buffer for UE " << rnti);
+ }
+ UlDciListElement_s dci = (*itHarq).second.at (harqId);
+ std::map <uint16_t, UlHarqProcessesStatus_t>::iterator itStat = m_ulHarqProcessesStatus.find (rnti);
+ if (itStat == m_ulHarqProcessesStatus.end ())
+ {
+ NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << rnti);
+ }
+ if ((*itStat).second.at (harqId) > 3)
+ {
+ NS_LOG_INFO ("Max number of retransmissions reached (UL)-> drop process");
+ continue;
+ }
+ bool free = true;
+ for (int j = dci.m_rbStart; j < dci.m_rbStart + dci.m_rbLen; j++)
+ {
+ if (rbMap.at (j) == true)
+ {
+ free = false;
+ NS_LOG_INFO (this << " BUSY " << j);
+ }
+ }
+ if (free)
+ {
+ // retx on the same RBs
+ for (int j = dci.m_rbStart; j < dci.m_rbStart + dci.m_rbLen; j++)
+ {
+ rbMap.at (j) = true;
+ rbgAllocationMap.at (j) = dci.m_rnti;
+ NS_LOG_INFO ("\tRB " << j);
+ rbAllocatedNum++;
+ }
+ NS_LOG_INFO (this << " Send retx in the same RBGs " << (uint16_t)dci.m_rbStart << " to " << dci.m_rbStart + dci.m_rbLen << " RV " << (*itStat).second.at (harqId) + 1);
+ }
+ else
+ {
+ NS_FATAL_ERROR ("Cannot allocare retx for UE " << rnti);
+ }
+ dci.m_ndi = 0;
+ // Update HARQ buffers with new HarqId
+ (*itStat).second.at ((*itProcId).second) = (*itStat).second.at (harqId) + 1;
+ (*itStat).second.at (harqId) = 0;
+ (*itHarq).second.at ((*itProcId).second) = dci;
+ ret.m_dciList.push_back (dci);
+ rntiAllocated.insert (dci.m_rnti);
+ }
+ }
+ }
+
+ std::map <uint16_t,uint32_t>::iterator it;
int nflows = 0;
for (it = m_ceBsrRxed.begin (); it != m_ceBsrRxed.end (); it++)
{
- // remove old entries of this UE-LC
- if ((*it).second > 0)
+ std::set <uint16_t>::iterator itRnti = rntiAllocated.find ((*it).first);
+ // select UEs with queues not empty and not yet allocated for HARQ
+ if (((*it).second > 0)&&(itRnti == rntiAllocated.end ()))
{
nflows++;
}
@@ -680,20 +1159,18 @@
if (nflows == 0)
{
- return ; // no flows to be scheduled
+ return; // no flows to be scheduled
}
- // Divide the resource equally among the active users starting from the subsequent one served last scheduling trigger
- int rbPerFlow = m_cschedCellConfig.m_ulBandwidth / nflows;
+ // Divide the remaining resources equally among the active users starting from the subsequent one served last scheduling trigger
+ uint16_t rbPerFlow = (m_cschedCellConfig.m_ulBandwidth) / (nflows + rntiAllocated.size ());
if (rbPerFlow == 0)
{
rbPerFlow = 1; // at least 1 rbg per flow (till available resource)
}
- int rbAllocated = 0;
+ uint16_t rbAllocated = 0;
- FfMacSchedSapUser::SchedUlConfigIndParameters ret;
- std::vector <uint16_t> rbgAllocationMap;
if (m_nextRntiUl != 0)
{
for (it = m_ceBsrRxed.begin (); it != m_ceBsrRxed.end (); it++)
@@ -713,25 +1190,82 @@
it = m_ceBsrRxed.begin ();
m_nextRntiUl = (*it).first;
}
+ NS_LOG_INFO (this << " RB per Flow " << rbPerFlow);
do
{
- if (rbAllocated + rbPerFlow > m_cschedCellConfig.m_ulBandwidth)
+ std::set <uint16_t>::iterator itRnti = rntiAllocated.find ((*it).first);
+ if ((itRnti != rntiAllocated.end ())||((*it).second == 0))
+ {
+ // UE already allocated for UL-HARQ -> skip it
+ it++;
+ if (it == m_ceBsrRxed.end ())
+ {
+ // restart from the first
+ it = m_ceBsrRxed.begin ();
+ }
+ continue;
+ }
+ if (rbAllocated + rbPerFlow - 1 > m_cschedCellConfig.m_ulBandwidth)
{
// limit to physical resources last resource assignment
rbPerFlow = m_cschedCellConfig.m_ulBandwidth - rbAllocated;
}
-
+
UlDciListElement_s uldci;
uldci.m_rnti = (*it).first;
- uldci.m_rbStart = rbAllocated;
uldci.m_rbLen = rbPerFlow;
+ bool allocated = false;
+ while ((!allocated)&&((rbAllocated + rbPerFlow - 1) < m_cschedCellConfig.m_ulBandwidth))
+ {
+ // check availability
+ bool free = true;
+ for (uint16_t j = rbAllocated; j < rbAllocated + rbPerFlow; j++)
+ {
+ if (rbMap.at (j) == true)
+ {
+ free = false;
+ break;
+ }
+ }
+ if (free)
+ {
+ uldci.m_rbStart = rbAllocated;
+
+ for (uint16_t j = rbAllocated; j < rbAllocated + rbPerFlow; j++)
+ {
+ rbMap.at (j) = true;
+ // store info on allocation for managing ul-cqi interpretation
+ rbgAllocationMap.at (j) = (*it).first;
+ NS_LOG_DEBUG ("\t " << j);
+ }
+ rbAllocated += rbPerFlow;
+ allocated = true;
+ break;
+ }
+ rbAllocated++;
+ if (rbAllocated + rbPerFlow - 1 > m_cschedCellConfig.m_ulBandwidth)
+ {
+ // limit to physical resources last resource assignment
+ rbPerFlow = m_cschedCellConfig.m_ulBandwidth - rbAllocated;
+ }
+ }
+ if (!allocated)
+ {
+ // unable to allocate new resource: finish scheduling
+ if (ret.m_dciList.size () > 0)
+ {
+ m_schedSapUser->SchedUlConfigInd (ret);
+ }
+ m_allocationMaps.insert (std::pair <uint16_t, std::vector <uint16_t> > (params.m_sfnSf, rbgAllocationMap));
+ return;
+ }
std::map <uint16_t, std::vector <double> >::iterator itCqi = m_ueCqi.find ((*it).first);
int cqi = 0;
if (itCqi == m_ueCqi.end ())
{
// no cqi info about this UE
uldci.m_mcs = 0; // MCS 0 -> UL-AMC TBD
- NS_LOG_DEBUG (this << " UE does not have ULCQI " << (*it).first );
+ NS_LOG_INFO (this << " UE does not have ULCQI " << (*it).first );
}
else
{
@@ -762,19 +1296,9 @@
continue; // CQI == 0 means "out of range" (see table 7.2.3-1 of 36.213)
}
uldci.m_mcs = m_amc->GetMcsFromCqi (cqi);
-// NS_LOG_DEBUG (this << " UE " << (*it).first << " minsinr " << minSinr << " -> mcs " << (uint16_t)uldci.m_mcs);
+ }
+ uldci.m_tbSize = (m_amc->GetTbSizeFromMcs (uldci.m_mcs, rbPerFlow) / 8); // MCS 0 -> UL-AMC TBD
- }
-
- rbAllocated += rbPerFlow;
- // store info on allocation for managing ul-cqi interpretation
- for (int i = 0; i < rbPerFlow; i++)
- {
- rbgAllocationMap.push_back ((*it).first);
- }
-
- uldci.m_tbSize = (m_amc->GetTbSizeFromMcs (uldci.m_mcs, rbPerFlow) / 8); // MCS 0 -> UL-AMC TBD
- NS_LOG_DEBUG (this << " UL - UE " << (*it).first << " startPRB " << (uint32_t)uldci.m_rbStart << " nPRB " << (uint32_t)uldci.m_rbLen << " CQI " << cqi << " MCS " << (uint32_t)uldci.m_mcs << " TBsize " << uldci.m_tbSize);
UpdateUlRlcBufferInfo (uldci.m_rnti, uldci.m_tbSize);
uldci.m_ndi = 1;
uldci.m_cceIndex = 0;
@@ -789,6 +1313,26 @@
uldci.m_freqHopping = 0;
uldci.m_pdcchPowerOffset = 0; // not used
ret.m_dciList.push_back (uldci);
+ // store DCI for HARQ_PERIOD
+ uint8_t harqId = 0;
+ if (m_harqOn == true)
+ {
+ std::map <uint16_t, uint8_t>::iterator itProcId;
+ itProcId = m_ulHarqCurrentProcessId.find (uldci.m_rnti);
+ if (itProcId == m_ulHarqCurrentProcessId.end ())
+ {
+ NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << uldci.m_rnti);
+ }
+ harqId = (*itProcId).second;
+ std::map <uint16_t, UlHarqProcessesDciBuffer_t>::iterator itDci = m_ulHarqProcessesDciBuffer.find (uldci.m_rnti);
+ if (itDci == m_ulHarqProcessesDciBuffer.end ())
+ {
+ NS_FATAL_ERROR ("Unable to find RNTI entry in UL DCI HARQ buffer for RNTI " << uldci.m_rnti);
+ }
+ (*itDci).second.at (harqId) = uldci;
+ }
+ NS_LOG_INFO (this << " UL Allocation - UE " << (*it).first << " startPRB " << (uint32_t)uldci.m_rbStart << " nPRB " << (uint32_t)uldci.m_rbLen << " CQI " << cqi << " MCS " << (uint32_t)uldci.m_mcs << " TBsize " << uldci.m_tbSize << " harqId " << (uint16_t)harqId);
+
it++;
if (it == m_ceBsrRxed.end ())
{
@@ -835,7 +1379,7 @@
{
if ( params.m_macCeList.at (i).m_macCeType == MacCeListElement_s::BSR )
{
- // buffer status report
+ // buffer status report
// note that this scheduler does not differentiate the
// allocation according to which LCGs have more/less bytes
// to send.
@@ -871,159 +1415,156 @@
RrFfMacScheduler::DoSchedUlCqiInfoReq (const struct FfMacSchedSapProvider::SchedUlCqiInfoReqParameters& params)
{
NS_LOG_FUNCTION (this);
- NS_LOG_DEBUG (this << " RX SFNID " << params.m_sfnSf);
-// NS_LOG_DEBUG (this << " Actual sfn " << frameNo << " sbfn " << subframeNo << " sfnSf " << sfnSf);
+
switch (m_ulCqiFilter)
{
- case FfMacScheduler::SRS_UL_CQI:
- {
- // filter all the CQIs that are not SRS based
- if (params.m_ulCqi.m_type!=UlCqi_s::SRS)
- {
- return;
- }
- }
+ case FfMacScheduler::SRS_UL_CQI:
+ {
+ // filter all the CQIs that are not SRS based
+ if (params.m_ulCqi.m_type != UlCqi_s::SRS)
+ {
+ return;
+ }
+ }
break;
- case FfMacScheduler::PUSCH_UL_CQI:
- {
- // filter all the CQIs that are not SRS based
- if (params.m_ulCqi.m_type!=UlCqi_s::PUSCH)
- {
- return;
- }
- }
- case FfMacScheduler::ALL_UL_CQI:
- break;
-
- default:
- NS_FATAL_ERROR ("Unknown UL CQI type");
+ case FfMacScheduler::PUSCH_UL_CQI:
+ {
+ // filter all the CQIs that are not SRS based
+ if (params.m_ulCqi.m_type != UlCqi_s::PUSCH)
+ {
+ return;
+ }
+ }
+ case FfMacScheduler::ALL_UL_CQI:
+ break;
+
+ default:
+ NS_FATAL_ERROR ("Unknown UL CQI type");
}
- switch (params.m_ulCqi.m_type)
+ switch (params.m_ulCqi.m_type)
{
- case UlCqi_s::PUSCH:
- {
- std::map <uint16_t, std::vector <uint16_t> >::iterator itMap;
- std::map <uint16_t, std::vector <double> >::iterator itCqi;
- itMap = m_allocationMaps.find (params.m_sfnSf);
- if (itMap == m_allocationMaps.end ())
- {
- NS_LOG_DEBUG (this << " Does not find info on allocation, size : " << m_allocationMaps.size ());
- return;
- }
- for (uint32_t i = 0; i < (*itMap).second.size (); i++)
- {
- // convert from fixed point notation Sxxxxxxxxxxx.xxx to double
- // NS_LOG_INFO (this << " i " << i << " size " << params.m_ulCqi.m_sinr.size () << " mapSIze " << (*itMap).second.size ());
- double sinr = LteFfConverter::fpS11dot3toDouble (params.m_ulCqi.m_sinr.at (i));
- //NS_LOG_DEBUG (this << " UE " << (*itMap).second.at (i) << " SINRfp " << params.m_ulCqi.m_sinr.at (i) << " sinrdb " << sinr);
- itCqi = m_ueCqi.find ((*itMap).second.at (i));
- if (itCqi == m_ueCqi.end ())
- {
- // create a new entry
- std::vector <double> newCqi;
- for (uint32_t j = 0; j < m_cschedCellConfig.m_ulBandwidth; j++)
- {
- if (i == j)
- {
- newCqi.push_back (sinr);
- }
- else
- {
- // initialize with NO_SINR value.
- newCqi.push_back (30.0);
- }
-
- }
- m_ueCqi.insert (std::pair <uint16_t, std::vector <double> > ((*itMap).second.at (i), newCqi));
- // generate correspondent timer
- m_ueCqiTimers.insert (std::pair <uint16_t, uint32_t > ((*itMap).second.at (i), m_cqiTimersThreshold));
- }
- else
- {
- // update the value
- (*itCqi).second.at (i) = sinr;
- // update correspondent timer
- std::map <uint16_t, uint32_t>::iterator itTimers;
- itTimers = m_ueCqiTimers.find ((*itMap).second.at (i));
- (*itTimers).second = m_cqiTimersThreshold;
-
- }
-
- }
- // remove obsolete info on allocation
- m_allocationMaps.erase (itMap);
- }
+ case UlCqi_s::PUSCH:
+ {
+ std::map <uint16_t, std::vector <uint16_t> >::iterator itMap;
+ std::map <uint16_t, std::vector <double> >::iterator itCqi;
+ itMap = m_allocationMaps.find (params.m_sfnSf);
+ if (itMap == m_allocationMaps.end ())
+ {
+ NS_LOG_INFO (this << " Does not find info on allocation, size : " << m_allocationMaps.size ());
+ return;
+ }
+ for (uint32_t i = 0; i < (*itMap).second.size (); i++)
+ {
+ // convert from fixed point notation Sxxxxxxxxxxx.xxx to double
+ double sinr = LteFfConverter::fpS11dot3toDouble (params.m_ulCqi.m_sinr.at (i));
+ itCqi = m_ueCqi.find ((*itMap).second.at (i));
+ if (itCqi == m_ueCqi.end ())
+ {
+ // create a new entry
+ std::vector <double> newCqi;
+ for (uint32_t j = 0; j < m_cschedCellConfig.m_ulBandwidth; j++)
+ {
+ if (i == j)
+ {
+ newCqi.push_back (sinr);
+ }
+ else
+ {
+ // initialize with NO_SINR value.
+ newCqi.push_back (30.0);
+ }
+
+ }
+ m_ueCqi.insert (std::pair <uint16_t, std::vector <double> > ((*itMap).second.at (i), newCqi));
+ // generate correspondent timer
+ m_ueCqiTimers.insert (std::pair <uint16_t, uint32_t > ((*itMap).second.at (i), m_cqiTimersThreshold));
+ }
+ else
+ {
+ // update the value
+ (*itCqi).second.at (i) = sinr;
+ // update correspondent timer
+ std::map <uint16_t, uint32_t>::iterator itTimers;
+ itTimers = m_ueCqiTimers.find ((*itMap).second.at (i));
+ (*itTimers).second = m_cqiTimersThreshold;
+
+ }
+
+ }
+ // remove obsolete info on allocation
+ m_allocationMaps.erase (itMap);
+ }
break;
- case UlCqi_s::SRS:
- {
- // get the RNTI from vendor specific parameters
- uint16_t rnti;
- NS_ASSERT (params.m_vendorSpecificList.size () > 0);
- for (uint16_t i = 0; i < params.m_vendorSpecificList.size (); i++)
- {
- if (params.m_vendorSpecificList.at (i).m_type == SRS_CQI_RNTI_VSP)
- {
- Ptr<SrsCqiRntiVsp> vsp = DynamicCast<SrsCqiRntiVsp> (params.m_vendorSpecificList.at (i).m_value);
- rnti = vsp->GetRnti ();
- }
- }
- std::map <uint16_t, std::vector <double> >::iterator itCqi;
- itCqi = m_ueCqi.find (rnti);
- if (itCqi == m_ueCqi.end ())
- {
- // create a new entry
- std::vector <double> newCqi;
- for (uint32_t j = 0; j < m_cschedCellConfig.m_ulBandwidth; j++)
- {
- double sinr = LteFfConverter::fpS11dot3toDouble (params.m_ulCqi.m_sinr.at (j));
- newCqi.push_back (sinr);
- NS_LOG_DEBUG (this << " RNTI " << rnti << " new SRS-CQI for RB " << j << " value " << sinr);
-
- }
- m_ueCqi.insert (std::pair <uint16_t, std::vector <double> > (rnti, newCqi));
- // generate correspondent timer
- m_ueCqiTimers.insert (std::pair <uint16_t, uint32_t > (rnti, m_cqiTimersThreshold));
- }
- else
- {
- // update the values
- for (uint32_t j = 0; j < m_cschedCellConfig.m_ulBandwidth; j++)
- {
- double sinr = LteFfConverter::fpS11dot3toDouble (params.m_ulCqi.m_sinr.at (j));
- (*itCqi).second.at (j) = sinr;
- NS_LOG_DEBUG (this << " RNTI " << rnti << " update SRS-CQI for RB " << j << " value " << sinr);
- }
- // update correspondent timer
- std::map <uint16_t, uint32_t>::iterator itTimers;
- itTimers = m_ueCqiTimers.find (rnti);
- (*itTimers).second = m_cqiTimersThreshold;
-
- }
-
-
- }
+ case UlCqi_s::SRS:
+ {
+ // get the RNTI from vendor specific parameters
+ uint16_t rnti = 0;
+ NS_ASSERT (params.m_vendorSpecificList.size () > 0);
+ for (uint16_t i = 0; i < params.m_vendorSpecificList.size (); i++)
+ {
+ if (params.m_vendorSpecificList.at (i).m_type == SRS_CQI_RNTI_VSP)
+ {
+ Ptr<SrsCqiRntiVsp> vsp = DynamicCast<SrsCqiRntiVsp> (params.m_vendorSpecificList.at (i).m_value);
+ rnti = vsp->GetRnti ();
+ }
+ }
+ std::map <uint16_t, std::vector <double> >::iterator itCqi;
+ itCqi = m_ueCqi.find (rnti);
+ if (itCqi == m_ueCqi.end ())
+ {
+ // create a new entry
+ std::vector <double> newCqi;
+ for (uint32_t j = 0; j < m_cschedCellConfig.m_ulBandwidth; j++)
+ {
+ double sinr = LteFfConverter::fpS11dot3toDouble (params.m_ulCqi.m_sinr.at (j));
+ newCqi.push_back (sinr);
+ NS_LOG_INFO (this << " RNTI " << rnti << " new SRS-CQI for RB " << j << " value " << sinr);
+
+ }
+ m_ueCqi.insert (std::pair <uint16_t, std::vector <double> > (rnti, newCqi));
+ // generate correspondent timer
+ m_ueCqiTimers.insert (std::pair <uint16_t, uint32_t > (rnti, m_cqiTimersThreshold));
+ }
+ else
+ {
+ // update the values
+ for (uint32_t j = 0; j < m_cschedCellConfig.m_ulBandwidth; j++)
+ {
+ double sinr = LteFfConverter::fpS11dot3toDouble (params.m_ulCqi.m_sinr.at (j));
+ (*itCqi).second.at (j) = sinr;
+ NS_LOG_INFO (this << " RNTI " << rnti << " update SRS-CQI for RB " << j << " value " << sinr);
+ }
+ // update correspondent timer
+ std::map <uint16_t, uint32_t>::iterator itTimers;
+ itTimers = m_ueCqiTimers.find (rnti);
+ (*itTimers).second = m_cqiTimersThreshold;
+
+ }
+
+
+ }
break;
- case UlCqi_s::PUCCH_1:
- case UlCqi_s::PUCCH_2:
- case UlCqi_s::PRACH:
- {
- NS_FATAL_ERROR ("PfFfMacScheduler supports only PUSCH and SRS UL-CQIs");
- }
+ case UlCqi_s::PUCCH_1:
+ case UlCqi_s::PUCCH_2:
+ case UlCqi_s::PRACH:
+ {
+ NS_FATAL_ERROR ("PfFfMacScheduler supports only PUSCH and SRS UL-CQIs");
+ }
break;
- default:
- NS_FATAL_ERROR ("Unknown type of UL-CQI");
+ default:
+ NS_FATAL_ERROR ("Unknown type of UL-CQI");
}
return;
}
void
-RrFfMacScheduler::RefreshDlCqiMaps(void)
+RrFfMacScheduler::RefreshDlCqiMaps (void)
{
NS_LOG_FUNCTION (this << m_p10CqiTimers.size ());
// refresh DL CQI P01 Map
std::map <uint16_t,uint32_t>::iterator itP10 = m_p10CqiTimers.begin ();
- while (itP10!=m_p10CqiTimers.end ())
+ while (itP10 != m_p10CqiTimers.end ())
{
NS_LOG_INFO (this << " P10-CQI for user " << (*itP10).first << " is " << (uint32_t)(*itP10).second << " thr " << (uint32_t)m_cqiTimersThreshold);
if ((*itP10).second == 0)
@@ -1043,17 +1584,17 @@
itP10++;
}
}
-
+
return;
}
void
-RrFfMacScheduler::RefreshUlCqiMaps(void)
+RrFfMacScheduler::RefreshUlCqiMaps (void)
{
// refresh UL CQI Map
std::map <uint16_t,uint32_t>::iterator itUl = m_ueCqiTimers.begin ();
- while (itUl!=m_ueCqiTimers.end ())
+ while (itUl != m_ueCqiTimers.end ())
{
NS_LOG_INFO (this << " UL-CQI for user " << (*itUl).first << " is " << (uint32_t)(*itUl).second << " thr " << (uint32_t)m_cqiTimersThreshold);
if ((*itUl).second == 0)
@@ -1074,7 +1615,7 @@
itUl++;
}
}
-
+
return;
}
@@ -1087,7 +1628,7 @@
{
if (((*it).m_rnti == rnti) && ((*it).m_logicalChannelIdentity))
{
-// NS_LOG_DEBUG (this << " UE " << rnti << " LC " << (uint16_t)lcid << " txqueue " << (*it).m_rlcTransmissionQueueSize << " retxqueue " << (*it).m_rlcRetransmissionQueueSize << " status " << (*it).m_rlcStatusPduSize << " decrease " << size);
+ NS_LOG_INFO (this << " UE " << rnti << " LC " << (uint16_t)lcid << " txqueue " << (*it).m_rlcTransmissionQueueSize << " retxqueue " << (*it).m_rlcRetransmissionQueueSize << " status " << (*it).m_rlcStatusPduSize << " decrease " << size);
// Update queues: RLC tx order Status, ReTx, Tx
// Update status queue
if ((*it).m_rlcStatusPduSize <= size)
@@ -1100,7 +1641,7 @@
(*it).m_rlcStatusPduSize -= size;
return;
}
- // update retransmission queue
+ // update retransmission queue
if ((*it).m_rlcRetransmissionQueueSize <= size)
{
size -= (*it).m_rlcRetransmissionQueueSize;
@@ -1133,9 +1674,9 @@
size = size - 2; // remove the minimum RLC overhead
std::map <uint16_t,uint32_t>::iterator it = m_ceBsrRxed.find (rnti);
- if (it!=m_ceBsrRxed.end ())
+ if (it != m_ceBsrRxed.end ())
{
-// NS_LOG_DEBUG (this << " Update RLC BSR UE " << rnti << " size " << size << " BSR " << (*it).second);
+ NS_LOG_INFO (this << " Update RLC BSR UE " << rnti << " size " << size << " BSR " << (*it).second);
if ((*it).second >= size)
{
(*it).second -= size;
@@ -1149,7 +1690,7 @@
{
NS_LOG_ERROR (this << " Does not find BSR report info of UE " << rnti);
}
-
+
}