--- a/src/lte/model/rr-ff-mac-scheduler.cc Tue Nov 06 16:40:04 2012 +0100
+++ b/src/lte/model/rr-ff-mac-scheduler.cc Tue Nov 06 17:42:40 2012 +0100
@@ -19,7 +19,7 @@
*/
#ifdef __FreeBSD__
-#define log2(x) (log(x) / M_LN2)
+#define log2(x) (log (x) / M_LN2)
#endif /* __FreeBSD__ */
#include <ns3/log.h>
@@ -262,7 +262,7 @@
BooleanValue (true),
MakeBooleanAccessor (&RrFfMacScheduler::m_harqOn),
MakeBooleanChecker ())
- ;
+ ;
return tid;
}
@@ -309,7 +309,7 @@
{
NS_LOG_FUNCTION (this << " RNTI " << params.m_rnti << " txMode " << (uint16_t)params.m_transmissionMode);
std::map <uint16_t,uint8_t>::iterator it = m_uesTxMode.find (params.m_rnti);
- if (it==m_uesTxMode.end ())
+ if (it == m_uesTxMode.end ())
{
m_uesTxMode.insert (std::pair <uint16_t, double> (params.m_rnti, params.m_transmissionMode));
// generate HARQ buffers
@@ -431,7 +431,7 @@
bool
RrFfMacScheduler::SortRlcBufferReq (FfMacSchedSapProvider::SchedDlRlcBufferReqParameters i,FfMacSchedSapProvider::SchedDlRlcBufferReqParameters j)
{
- return (i.m_rnti<j.m_rnti);
+ return (i.m_rnti < j.m_rnti);
}
@@ -439,14 +439,14 @@
RrFfMacScheduler::HarqProcessAvailability (uint16_t rnti)
{
NS_LOG_FUNCTION (this << rnti);
-
+
std::map <uint16_t, uint8_t>::iterator it = m_dlHarqCurrentProcessId.find (rnti);
- if (it==m_dlHarqCurrentProcessId.end ())
+ if (it == m_dlHarqCurrentProcessId.end ())
{
NS_FATAL_ERROR ("No Process Id found for this RNTI " << rnti);
}
std::map <uint16_t, DlHarqProcessesStatus_t>::iterator itStat = m_dlHarqProcessesStatus.find (rnti);
- if (itStat==m_dlHarqProcessesStatus.end ())
+ if (itStat == m_dlHarqProcessesStatus.end ())
{
NS_FATAL_ERROR ("No Process Id Statusfound for this RNTI " << rnti);
}
@@ -454,9 +454,9 @@
do
{
i = (i + 1) % HARQ_PROC_NUM;
- // NS_LOG_DEBUG (this << " check i " << (uint16_t)i << " stat " << (uint16_t)(*itStat).second.at (i));
- } while ( ((*itStat).second.at (i)!=0)&&(i!=(*it).second));
- if ((*itStat).second.at (i)==0)
+ }
+ while ( ((*itStat).second.at (i) != 0)&&(i != (*it).second));
+ if ((*itStat).second.at (i) == 0)
{
return (true);
}
@@ -480,22 +480,22 @@
}
std::map <uint16_t, uint8_t>::iterator it = m_dlHarqCurrentProcessId.find (rnti);
- if (it==m_dlHarqCurrentProcessId.end ())
+ if (it == m_dlHarqCurrentProcessId.end ())
{
NS_FATAL_ERROR ("No Process Id found for this RNTI " << rnti);
}
std::map <uint16_t, DlHarqProcessesStatus_t>::iterator itStat = m_dlHarqProcessesStatus.find (rnti);
- if (itStat==m_dlHarqProcessesStatus.end ())
+ if (itStat == m_dlHarqProcessesStatus.end ())
{
NS_FATAL_ERROR ("No Process Id Statusfound for this RNTI " << rnti);
}
uint8_t i = (*it).second;
do
- {
- i = (i + 1) % HARQ_PROC_NUM;
-// NS_LOG_DEBUG (this << " check i " << (uint16_t)i << " stat " << (uint16_t)(*itStat).second.at (i));
- } while ( ((*itStat).second.at (i)!=0)&&(i!=(*it).second));
- if ((*itStat).second.at (i)==0)
+ {
+ i = (i + 1) % HARQ_PROC_NUM;
+ }
+ while ( ((*itStat).second.at (i) != 0)&&(i != (*it).second));
+ if ((*itStat).second.at (i) == 0)
{
(*it).second = i;
(*itStat).second.at (i) = 1;
@@ -504,7 +504,7 @@
{
return (9); // return a not valid harq proc id
}
-
+
return ((*it).second);
}
@@ -514,7 +514,7 @@
{
NS_LOG_FUNCTION (this << " DL Frame no. " << (params.m_sfnSf >> 4) << " subframe no. " << (0xF & params.m_sfnSf));
// API generated by RLC for triggering the scheduling of a DL subframe
-
+
RefreshDlCqiMaps ();
int rbgSize = GetRbgSize (m_cschedCellConfig.m_dlBandwidth);
int rbgNum = m_cschedCellConfig.m_dlBandwidth / rbgSize;
@@ -524,43 +524,42 @@
std::vector <bool> rbgMap;
uint16_t rbgAllocatedNum = 0;
std::set <uint16_t> rntiAllocated;
- rbgMap.resize (m_cschedCellConfig.m_dlBandwidth/rbgSize, false);
+ rbgMap.resize (m_cschedCellConfig.m_dlBandwidth / rbgSize, false);
// Process DL HARQ feedback
// retrieve past HARQ retx buffered
- if (m_dlInfoListBuffered.size ()>0)
+ if (m_dlInfoListBuffered.size () > 0)
{
- if (params.m_dlInfoList.size ()>0)
+ if (params.m_dlInfoList.size () > 0)
{
- NS_LOG_DEBUG (this << " RECEIVED DL-HARQ");
+ NS_LOG_INFO (this << " Received DL-HARQ feedback");
m_dlInfoListBuffered.insert (m_dlInfoListBuffered.end (), params.m_dlInfoList.begin (), params.m_dlInfoList.end ());
}
}
else
{
- if (params.m_dlInfoList.size ()>0)
+ if (params.m_dlInfoList.size () > 0)
{
m_dlInfoListBuffered = params.m_dlInfoList;
}
}
if (m_harqOn == false)
{
- // Ignore HARQ feedbacks
+ // Ignore HARQ feedback
m_dlInfoListBuffered.clear ();
- NS_LOG_DEBUG (this << " HARQ OFF");
}
std::vector <struct DlInfoListElement_s> dlInfoListUntxed;
for (uint8_t i = 0; i < m_dlInfoListBuffered.size (); i++)
{
std::set <uint16_t>::iterator itRnti = rntiAllocated.find (m_dlInfoListBuffered.at (i).m_rnti);
- if (itRnti!=rntiAllocated.end ())
+ if (itRnti != rntiAllocated.end ())
{
// RNTI already allocated for retx
continue;
}
uint8_t nLayers = m_dlInfoListBuffered.at (i).m_harqStatus.size ();
std::vector <bool> retx;
- NS_LOG_DEBUG (this << " Processing DLHARQ-FEEDBACK");
+ NS_LOG_INFO (this << " Processing DLHARQ feedback");
if (nLayers == 1)
{
retx.push_back (m_dlInfoListBuffered.at (i).m_harqStatus.at (0) == DlInfoListElement_s::NACK);
@@ -576,68 +575,63 @@
// retrieve HARQ process information
uint16_t rnti = m_dlInfoListBuffered.at (i).m_rnti;
uint8_t harqId = m_dlInfoListBuffered.at (i).m_harqProcessId;
- NS_LOG_DEBUG (this << " HARQ retx RNTI " << rnti << " harqId " << (uint16_t)harqId);
+ NS_LOG_INFO (this << " HARQ retx RNTI " << rnti << " harqId " << (uint16_t)harqId);
std::map <uint16_t, DlHarqProcessesDciBuffer_t>::iterator itHarq = m_dlHarqProcessesDciBuffer.find (rnti);
- if (itHarq==m_dlHarqProcessesDciBuffer.end ())
+ if (itHarq == m_dlHarqProcessesDciBuffer.end ())
{
NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << rnti);
}
DlDciListElement_s dci = (*itHarq).second.at (harqId);
int rv = 0;
- if (dci.m_rv.size ()==1)
+ if (dci.m_rv.size () == 1)
{
rv = dci.m_rv.at (0);
}
- else
+ else
{
rv = (dci.m_rv.at (0) > dci.m_rv.at (1) ? dci.m_rv.at (0) : dci.m_rv.at (1));
}
-
+
if (rv == 3)
{
// maximum number of retx reached -> drop process
- NS_LOG_DEBUG ("Max number of retransmissions reached -> drop process");
+ NS_LOG_INFO ("Max number of retransmissions reached -> drop process");
std::map <uint16_t, DlHarqProcessesStatus_t>::iterator it = m_dlHarqProcessesStatus.find (rnti);
- if (it==m_dlHarqProcessesStatus.end ())
- {
- NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << m_dlInfoListBuffered.at (i).m_rnti);
- }
+ if (it == m_dlHarqProcessesStatus.end ())
+ {
+ NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << m_dlInfoListBuffered.at (i).m_rnti);
+ }
(*it).second.at (harqId) = 0;
std::map <uint16_t, DlHarqRlcPduListBuffer_t>::iterator itRlcPdu = m_dlHarqProcessesRlcPduListBuffer.find (rnti);
- if (itRlcPdu==m_dlHarqProcessesRlcPduListBuffer.end ())
- {
- NS_FATAL_ERROR ("Unable to find RlcPdcList in HARQ buffer for RNTI " << m_dlInfoListBuffered.at (i).m_rnti);
- }
+ if (itRlcPdu == m_dlHarqProcessesRlcPduListBuffer.end ())
+ {
+ NS_FATAL_ERROR ("Unable to find RlcPdcList in HARQ buffer for RNTI " << m_dlInfoListBuffered.at (i).m_rnti);
+ }
for (uint16_t k = 0; k < (*itRlcPdu).second.size (); k++)
- {
- (*itRlcPdu).second.at (k).at (harqId).clear ();
- }
+ {
+ (*itRlcPdu).second.at (k).at (harqId).clear ();
+ }
continue;
}
// check the feasibility of retransmitting on the same RBGs
// translate the DCI to Spectrum framework
std::vector <int> dciRbg;
uint32_t mask = 0x1;
- NS_LOG_DEBUG ("Original RBGs " << dci.m_rbBitmap << " rnti " << dci.m_rnti);
+ NS_LOG_INFO ("Original RBGs " << dci.m_rbBitmap << " rnti " << dci.m_rnti);
for (int j = 0; j < 32; j++)
{
if (((dci.m_rbBitmap & mask) >> j) == 1)
{
dciRbg.push_back (j);
- NS_LOG_DEBUG ("\t"<<j);
-// for (int k = 0; k < rbgSize; k++)
-// {
-// dciRb.push_back ((j * rbgSize) + k);
-// //NS_LOG_DEBUG(this << "DL-DCI allocated PRB " << (i*GetRbgSize()) + k);
-// }
+ NS_LOG_INFO ("\t" << j);
}
mask = (mask << 1);
}
bool free = true;
for (uint8_t j = 0; j < dciRbg.size (); j++)
{
- if (rbgMap.at (dciRbg.at (j))==true)
+ if (rbgMap.at (dciRbg.at (j)) == true)
{
free = false;
break;
@@ -650,11 +644,11 @@
for (uint8_t j = 0; j < dciRbg.size (); j++)
{
rbgMap.at (dciRbg.at (j)) = true;
- NS_LOG_DEBUG ("RBG " << dciRbg.at (j) << " assigned");
+ NS_LOG_INFO ("RBG " << dciRbg.at (j) << " assigned");
rbgAllocatedNum++;
}
- NS_LOG_DEBUG (this << " Send retx in the same RBGs");
+ NS_LOG_INFO (this << " Send retx in the same RBGs");
}
else
{
@@ -663,7 +657,7 @@
uint8_t rbgId = (dciRbg.at (dciRbg.size () - 1) + 1) % rbgNum;
uint8_t startRbg = dciRbg.at (dciRbg.size () - 1);
std::vector <bool> rbgMapCopy = rbgMap;
- while ((j < dciRbg.size ())&&(startRbg!=rbgId))
+ while ((j < dciRbg.size ())&&(startRbg != rbgId))
{
if (rbgMapCopy.at (rbgId) == false)
{
@@ -680,24 +674,23 @@
for (uint16_t k = 0; k < dciRbg.size (); k++)
{
rbgMask = rbgMask + (0x1 << dciRbg.at (k));
- // NS_LOG_DEBUG (this << " Allocated PRB " << (*itMap).second.at (k));
+ NS_LOG_INFO (this << " New allocated RBG " << dciRbg.at (k));
rbgAllocatedNum++;
}
dci.m_rbBitmap = rbgMask;
rbgMap = rbgMapCopy;
- NS_LOG_DEBUG (this << " Move retx in RBGs " << dciRbg.size ());
}
else
{
// HARQ retx cannot be performed on this TTI -> store it
dlInfoListUntxed.push_back (params.m_dlInfoList.at (i));
- NS_LOG_DEBUG (this << " No resource for this retx -> buffer it");
+ NS_LOG_INFO (this << " No resource for this retx -> buffer it");
}
}
// retrieve RLC PDU list for retx TBsize and update DCI
BuildDataListElement_s newEl;
std::map <uint16_t, DlHarqRlcPduListBuffer_t>::iterator itRlcPdu = m_dlHarqProcessesRlcPduListBuffer.find (rnti);
- if (itRlcPdu==m_dlHarqProcessesRlcPduListBuffer.end ())
+ if (itRlcPdu == m_dlHarqProcessesRlcPduListBuffer.end ())
{
NS_FATAL_ERROR ("Unable to find RlcPdcList in HARQ buffer for RNTI " << rnti);
}
@@ -712,15 +705,15 @@
dci.m_rv.push_back (0);
dci.m_mcs.push_back (0);
dci.m_tbsSize.push_back (0);
- NS_LOG_DEBUG (this << " layer " << (uint16_t)j << " no txed (MIMO transition)");
-
+ NS_LOG_INFO (this << " layer " << (uint16_t)j << " no txed (MIMO transition)");
+
}
else
{
dci.m_ndi.at (j) = 0;
- dci.m_rv.at (j) ++;
+ dci.m_rv.at (j)++;
(*itHarq).second.at (harqId).m_rv.at (j)++;
- NS_LOG_DEBUG (this << " layer " << (uint16_t)j << " RV " << (uint16_t)dci.m_rv.at (j));
+ NS_LOG_INFO (this << " layer " << (uint16_t)j << " RV " << (uint16_t)dci.m_rv.at (j));
}
}
else
@@ -730,7 +723,7 @@
dci.m_rv.at (j) = 0;
dci.m_mcs.at (j) = 0;
dci.m_tbsSize.at (j) = 0;
- NS_LOG_DEBUG (this << " layer " << (uint16_t)j << " no retx");
+ NS_LOG_INFO (this << " layer " << (uint16_t)j << " no retx");
}
}
@@ -748,7 +741,7 @@
}
}
- if (rlcPduListPerLc.size ()>0)
+ if (rlcPduListPerLc.size () > 0)
{
newEl.m_rlcPduList.push_back (rlcPduListPerLc);
}
@@ -762,24 +755,24 @@
else
{
// update HARQ process status
- NS_LOG_DEBUG (this << " RR HARQ ACK UE " << m_dlInfoListBuffered.at (i).m_rnti);
+ NS_LOG_INFO (this << " HARQ ACK UE " << m_dlInfoListBuffered.at (i).m_rnti);
std::map <uint16_t, DlHarqProcessesStatus_t>::iterator it = m_dlHarqProcessesStatus.find (m_dlInfoListBuffered.at (i).m_rnti);
- if (it==m_dlHarqProcessesStatus.end ())
+ if (it == m_dlHarqProcessesStatus.end ())
{
NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << m_dlInfoListBuffered.at (i).m_rnti);
}
(*it).second.at (m_dlInfoListBuffered.at (i).m_harqProcessId) = 0;
std::map <uint16_t, DlHarqRlcPduListBuffer_t>::iterator itRlcPdu = m_dlHarqProcessesRlcPduListBuffer.find (m_dlInfoListBuffered.at (i).m_rnti);
- if (itRlcPdu==m_dlHarqProcessesRlcPduListBuffer.end ())
+ if (itRlcPdu == m_dlHarqProcessesRlcPduListBuffer.end ())
{
NS_FATAL_ERROR ("Unable to find RlcPdcList in HARQ buffer for RNTI " << m_dlInfoListBuffered.at (i).m_rnti);
}
- for (uint16_t k = 0; k < (*itRlcPdu).second.size (); k++)
- {
- (*itRlcPdu).second.at (k).at (m_dlInfoListBuffered.at (i).m_harqProcessId).clear ();
- }
+ for (uint16_t k = 0; k < (*itRlcPdu).second.size (); k++)
+ {
+ (*itRlcPdu).second.at (k).at (m_dlInfoListBuffered.at (i).m_harqProcessId).clear ();
+ }
}
- }
+ }
m_dlInfoListBuffered.clear ();
m_dlInfoListBuffered = dlInfoListUntxed;
@@ -796,11 +789,11 @@
// remove old entries of this UE-LC
std::set <uint16_t>::iterator itRnti = rntiAllocated.find ((*it).m_rnti);
if ( (((*it).m_rlcTransmissionQueueSize > 0)
- || ((*it).m_rlcRetransmissionQueueSize > 0)
- || ((*it).m_rlcStatusPduSize > 0))
+ || ((*it).m_rlcRetransmissionQueueSize > 0)
+ || ((*it).m_rlcStatusPduSize > 0))
&& (itRnti == rntiAllocated.end ()) // UE must not be allocated for HARQ retx
&& (HarqProcessAvailability ((*it).m_rnti)) ) // UE needs HARQ proc free
-
+
{
std::map <uint16_t,uint8_t>::iterator itCqi = m_p10CqiRxed.find ((*it).m_rnti);
uint8_t cqi = 0;
@@ -826,22 +819,22 @@
lcActivesPerRnti.insert (std::pair<uint16_t, uint8_t > ((*it).m_rnti, 1));
nTbs++;
}
-
+
}
}
}
-
+
if (nflows == 0)
{
- if (ret.m_buildDataList.size ()>0)
+ if (ret.m_buildDataList.size () > 0)
{
m_schedSapUser->SchedDlConfigInd (ret);
}
- return;
+ return;
}
// Divide the resource equally among the active users according to
// Resource allocation type 0 (see sec 7.1.6.1 of 36.213)
-
+
int rbgPerTb = (rbgNum - rbgAllocatedNum) / nTbs;
if (rbgPerTb == 0)
{
@@ -876,7 +869,7 @@
{
itLcRnti = lcActivesPerRnti.find ((*it).m_rnti);
std::set <uint16_t>::iterator itRnti = rntiAllocated.find ((*it).m_rnti);
- if ((itLcRnti == lcActivesPerRnti.end ())||(itRnti!=rntiAllocated.end ()))
+ if ((itLcRnti == lcActivesPerRnti.end ())||(itRnti != rntiAllocated.end ()))
{
// skip this entry (no active queue or yet allocated for HARQ)
it++;
@@ -888,12 +881,11 @@
continue;
}
itTxMode = m_uesTxMode.find ((*it).m_rnti);
- if (itTxMode == m_uesTxMode.end())
+ if (itTxMode == m_uesTxMode.end ())
{
NS_FATAL_ERROR ("No Transmission Mode info on user " << (*it).m_rnti);
}
int nLayer = TransmissionModesLayers::TxMode2LayerNum ((*itTxMode).second);
- NS_LOG_DEBUG (this << " NLAYERS " << nLayer);
int lcNum = (*itLcRnti).second;
// create new BuildDataListElement_s for this RNTI
BuildDataListElement_s newEl;
@@ -905,7 +897,7 @@
newDci.m_resAlloc = 0;
newDci.m_rbBitmap = 0;
std::map <uint16_t,uint8_t>::iterator itCqi = m_p10CqiRxed.find (newEl.m_rnti);
- for (uint8_t i = 0; i < nLayer; i++)
+ for (uint8_t i = 0; i < nLayer; i++)
{
if (itCqi == m_p10CqiRxed.end ())
{
@@ -918,7 +910,7 @@
}
int tbSize = (m_amc->GetTbSizeFromMcs (newDci.m_mcs.at (0), rbgPerTb * rbgSize) / 8);
uint16_t rlcPduSize = tbSize / lcNum;
- while (lcNum>0)
+ while (lcNum > 0)
{
if ( ((*it).m_rlcTransmissionQueueSize > 0)
|| ((*it).m_rlcRetransmissionQueueSize > 0)
@@ -929,16 +921,16 @@
{
RlcPduListElement_s newRlcEl;
newRlcEl.m_logicalChannelIdentity = (*it).m_logicalChannelIdentity;
- // NS_LOG_DEBUG (this << "LCID " << (uint32_t) newRlcEl.m_logicalChannelIdentity << " size " << rlcPduSize << " ID " << (*it).m_rnti << " layer " << (uint16_t)j);
+ NS_LOG_INFO (this << "LCID " << (uint32_t) newRlcEl.m_logicalChannelIdentity << " size " << rlcPduSize << " ID " << (*it).m_rnti << " layer " << (uint16_t)j);
newRlcEl.m_size = rlcPduSize;
UpdateDlRlcBufferInfo ((*it).m_rnti, newRlcEl.m_logicalChannelIdentity, rlcPduSize);
newRlcPduLe.push_back (newRlcEl);
-
+
if (m_harqOn == true)
{
// store RLC PDU list for HARQ
std::map <uint16_t, DlHarqRlcPduListBuffer_t>::iterator itRlcPdu = m_dlHarqProcessesRlcPduListBuffer.find ((*it).m_rnti);
- if (itRlcPdu==m_dlHarqProcessesRlcPduListBuffer.end ())
+ if (itRlcPdu == m_dlHarqProcessesRlcPduListBuffer.end ())
{
NS_FATAL_ERROR ("Unable to find RlcPdcList in HARQ buffer for RNTI " << (*it).m_rnti);
}
@@ -958,18 +950,18 @@
}
uint32_t rbgMask = 0;
uint16_t i = 0;
- NS_LOG_DEBUG (this << " DL - Allocate user " << newEl.m_rnti << " LCs " << (uint16_t)(*itLcRnti).second << " bytes " << tbSize << " mcs " << (uint16_t) newDci.m_mcs.at (0) << " harqId " << (uint16_t)newDci.m_harqProcess << " layers " << nLayer);
+ NS_LOG_INFO (this << " DL - Allocate user " << newEl.m_rnti << " LCs " << (uint16_t)(*itLcRnti).second << " bytes " << tbSize << " mcs " << (uint16_t) newDci.m_mcs.at (0) << " harqId " << (uint16_t)newDci.m_harqProcess << " layers " << nLayer);
NS_LOG_DEBUG ("RBG:");
while (i < rbgPerTb)
{
- if (rbgMap.at (rbgAllocated)==false)
+ if (rbgMap.at (rbgAllocated) == false)
{
rbgMask = rbgMask + (0x1 << rbgAllocated);
NS_LOG_DEBUG ("\t " << rbgAllocated);
i++;
rbgMap.at (rbgAllocated) = true;
}
- rbgAllocated++;
+ rbgAllocated++;
}
newDci.m_rbBitmap = rbgMask; // (32 bit bitmap see 7.1.6 of 36.213)
@@ -984,7 +976,7 @@
{
// store DCI for HARQ
std::map <uint16_t, DlHarqProcessesDciBuffer_t>::iterator itDci = m_dlHarqProcessesDciBuffer.find (newEl.m_rnti);
- if (itDci==m_dlHarqProcessesDciBuffer.end ())
+ if (itDci == m_dlHarqProcessesDciBuffer.end ())
{
NS_FATAL_ERROR ("Unable to find RNTI entry in DCI HARQ buffer for RNTI " << (*it).m_rnti);
}
@@ -995,7 +987,6 @@
ret.m_buildDataList.push_back (newEl);
if (rbgAllocated == rbgNum)
{
- //NS_LOG_DEBUG (this << " FULL " << (*it).m_rnti);
m_nextRntiDl = (*it).m_rnti; // store last RNTI served
break; // no more RGB to be allocated
}
@@ -1049,7 +1040,7 @@
}
else if ( params.m_cqiList.at (i).m_cqiType == CqiListElement_s::A30 )
{
- // subband CQI reporting high layer configured
+ // subband CQI reporting high layer configured
// Not used by RR Scheduler
}
else
@@ -1078,50 +1069,50 @@
rbgAllocationMap.resize (m_cschedCellConfig.m_ulBandwidth, 0);
rbMap.resize (m_cschedCellConfig.m_ulBandwidth, false);
-
+
// Process UL HARQ feedback
// update UL HARQ proc id
std::map <uint16_t, uint8_t>::iterator itProcId;
- for (itProcId = m_ulHarqCurrentProcessId.begin (); itProcId!= m_ulHarqCurrentProcessId.end (); itProcId++)
+ for (itProcId = m_ulHarqCurrentProcessId.begin (); itProcId != m_ulHarqCurrentProcessId.end (); itProcId++)
{
(*itProcId).second = ((*itProcId).second + 1) % HARQ_PROC_NUM;
}
for (uint8_t i = 0; i < params.m_ulInfoList.size (); i++)
{
- if (params.m_ulInfoList.at (i).m_receptionStatus==UlInfoListElement_s::NotOk)
+ if (params.m_ulInfoList.at (i).m_receptionStatus == UlInfoListElement_s::NotOk)
{
// retx correspondent block: retrieve the UL-DCI
uint16_t rnti = params.m_ulInfoList.at (i).m_rnti;
uint8_t harqId = (uint8_t)((*itProcId).second - HARQ_PERIOD) % HARQ_PROC_NUM;
- NS_LOG_DEBUG (this << " UL-HARQ retx RNTI " << rnti << " harqId " << (uint16_t)harqId);
+ NS_LOG_INFO (this << " UL-HARQ retx RNTI " << rnti << " harqId " << (uint16_t)harqId);
std::map <uint16_t, UlHarqProcessesDciBuffer_t>::iterator itHarq = m_ulHarqProcessesDciBuffer.find (rnti);
- if (itHarq==m_ulHarqProcessesDciBuffer.end ())
+ if (itHarq == m_ulHarqProcessesDciBuffer.end ())
{
NS_FATAL_ERROR ("No info find in UL-HARQ buffer for UE " << rnti);
}
itProcId = m_ulHarqCurrentProcessId.find (rnti);
- if (itProcId==m_ulHarqCurrentProcessId.end ())
+ if (itProcId == m_ulHarqCurrentProcessId.end ())
{
NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << rnti);
}
UlDciListElement_s dci = (*itHarq).second.at (harqId);
std::map <uint16_t, UlHarqProcessesStatus_t>::iterator itStat = m_ulHarqProcessesStatus.find (rnti);
- if (itStat==m_ulHarqProcessesStatus.end ())
+ if (itStat == m_ulHarqProcessesStatus.end ())
{
NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << rnti);
}
- if ((*itStat).second.at (harqId)>3)
+ if ((*itStat).second.at (harqId) > 3)
{
- NS_LOG_DEBUG ("Max number of retransmissions reached (UL)-> drop process");
+ NS_LOG_INFO ("Max number of retransmissions reached (UL)-> drop process");
continue;
}
bool free = true;
for (int j = dci.m_rbStart; j < dci.m_rbStart + dci.m_rbLen; j++)
{
- if (rbMap.at (j)==true)
+ if (rbMap.at (j) == true)
{
free = false;
- NS_LOG_DEBUG (this << " BUSY " << j);
+ NS_LOG_INFO (this << " BUSY " << j);
}
}
if (free)
@@ -1131,10 +1122,10 @@
{
rbMap.at (j) = true;
rbgAllocationMap.at (j) = dci.m_rnti;
- NS_LOG_DEBUG ("\t" << j);
+ NS_LOG_INFO ("\t" << j);
rbAllocatedNum++;
}
- NS_LOG_DEBUG (this << " Send retx in the same RBGs " << (uint16_t)dci.m_rbStart << " to " << dci.m_rbStart + dci.m_rbLen);
+ NS_LOG_INFO (this << " Send retx in the same RBGs " << (uint16_t)dci.m_rbStart << " to " << dci.m_rbStart + dci.m_rbLen);
}
else
{
@@ -1146,8 +1137,8 @@
}
}
-
- std::map <uint16_t,uint32_t>::iterator it;
+
+ std::map <uint16_t,uint32_t>::iterator it;
int nflows = 0;
for (it = m_ceBsrRxed.begin (); it != m_ceBsrRxed.end (); it++)
@@ -1162,19 +1153,18 @@
if (nflows == 0)
{
- return ; // no flows to be scheduled
+ return; // no flows to be scheduled
}
// Divide the remaining resources equally among the active users starting from the subsequent one served last scheduling trigger
- //uint16_t rbPerFlow = (m_cschedCellConfig.m_ulBandwidth - rbAllocatedNum) / nflows;
uint16_t rbPerFlow = (m_cschedCellConfig.m_ulBandwidth) / (nflows + rntiAllocated.size ());
if (rbPerFlow == 0)
{
rbPerFlow = 1; // at least 1 rbg per flow (till available resource)
}
uint16_t rbAllocated = 0;
-
+
if (m_nextRntiUl != 0)
{
for (it = m_ceBsrRxed.begin (); it != m_ceBsrRxed.end (); it++)
@@ -1194,11 +1184,11 @@
it = m_ceBsrRxed.begin ();
m_nextRntiUl = (*it).first;
}
- NS_LOG_DEBUG (this << " rbPerFlow " << rbPerFlow);
+ NS_LOG_INFO (this << " RB per Flow " << rbPerFlow);
do
{
std::set <uint16_t>::iterator itRnti = rntiAllocated.find ((*it).first);
- if ((itRnti!=rntiAllocated.end ())||((*it).second == 0))
+ if ((itRnti != rntiAllocated.end ())||((*it).second == 0))
{
// UE already allocated for UL-HARQ -> skip it
it++;
@@ -1214,12 +1204,12 @@
// limit to physical resources last resource assignment
rbPerFlow = m_cschedCellConfig.m_ulBandwidth - rbAllocated;
}
-
+
UlDciListElement_s uldci;
uldci.m_rnti = (*it).first;
uldci.m_rbLen = rbPerFlow;
bool allocated = false;
- while ((!allocated)&&(rbAllocated<m_cschedCellConfig.m_ulBandwidth))
+ while ((!allocated)&&(rbAllocated < m_cschedCellConfig.m_ulBandwidth))
{
// check availability
bool free = true;
@@ -1251,7 +1241,7 @@
if (!allocated)
{
// unable to allocate new resource: finish scheduling
- if (ret.m_dciList.size ()>0)
+ if (ret.m_dciList.size () > 0)
{
m_schedSapUser->SchedUlConfigInd (ret);
}
@@ -1264,7 +1254,7 @@
{
// no cqi info about this UE
uldci.m_mcs = 0; // MCS 0 -> UL-AMC TBD
- NS_LOG_DEBUG (this << " UE does not have ULCQI " << (*it).first );
+ NS_LOG_INFO (this << " UE does not have ULCQI " << (*it).first );
}
else
{
@@ -1295,8 +1285,6 @@
continue; // CQI == 0 means "out of range" (see table 7.2.3-1 of 36.213)
}
uldci.m_mcs = m_amc->GetMcsFromCqi (cqi);
-// NS_LOG_DEBUG (this << " UE " << (*it).first << " minsinr " << minSinr << " -> mcs " << (uint16_t)uldci.m_mcs);
-
}
uldci.m_tbSize = (m_amc->GetTbSizeFromMcs (uldci.m_mcs, rbPerFlow) / 8); // MCS 0 -> UL-AMC TBD
@@ -1316,23 +1304,23 @@
ret.m_dciList.push_back (uldci);
// store DCI for HARQ_PERIOD
uint8_t harqId = 0;
- if (m_harqOn==true)
+ if (m_harqOn == true)
{
itProcId = m_ulHarqCurrentProcessId.find (uldci.m_rnti);
- if (itProcId==m_ulHarqCurrentProcessId.end ())
+ if (itProcId == m_ulHarqCurrentProcessId.end ())
{
NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << uldci.m_rnti);
}
harqId = (*itProcId).second;
std::map <uint16_t, UlHarqProcessesDciBuffer_t>::iterator itDci = m_ulHarqProcessesDciBuffer.find (uldci.m_rnti);
- if (itDci==m_ulHarqProcessesDciBuffer.end ())
+ if (itDci == m_ulHarqProcessesDciBuffer.end ())
{
NS_FATAL_ERROR ("Unable to find RNTI entry in UL DCI HARQ buffer for RNTI " << uldci.m_rnti);
}
(*itDci).second.at (harqId) = uldci;
}
- NS_LOG_DEBUG (this << " UL Allocation - UE " << (*it).first << " startPRB " << (uint32_t)uldci.m_rbStart << " nPRB " << (uint32_t)uldci.m_rbLen << " CQI " << cqi << " MCS " << (uint32_t)uldci.m_mcs << " TBsize " << uldci.m_tbSize << " harqId " << (uint16_t)harqId);
-
+ NS_LOG_INFO (this << " UL Allocation - UE " << (*it).first << " startPRB " << (uint32_t)uldci.m_rbStart << " nPRB " << (uint32_t)uldci.m_rbLen << " CQI " << cqi << " MCS " << (uint32_t)uldci.m_mcs << " TBsize " << uldci.m_tbSize << " harqId " << (uint16_t)harqId);
+
it++;
if (it == m_ceBsrRxed.end ())
{
@@ -1415,159 +1403,156 @@
RrFfMacScheduler::DoSchedUlCqiInfoReq (const struct FfMacSchedSapProvider::SchedUlCqiInfoReqParameters& params)
{
NS_LOG_FUNCTION (this);
- NS_LOG_DEBUG (this << " RX SFNID " << params.m_sfnSf);
-// NS_LOG_DEBUG (this << " Actual sfn " << frameNo << " sbfn " << subframeNo << " sfnSf " << sfnSf);
+
switch (m_ulCqiFilter)
{
- case FfMacScheduler::SRS_UL_CQI:
- {
- // filter all the CQIs that are not SRS based
- if (params.m_ulCqi.m_type!=UlCqi_s::SRS)
- {
- return;
- }
- }
+ case FfMacScheduler::SRS_UL_CQI:
+ {
+ // filter all the CQIs that are not SRS based
+ if (params.m_ulCqi.m_type != UlCqi_s::SRS)
+ {
+ return;
+ }
+ }
break;
- case FfMacScheduler::PUSCH_UL_CQI:
- {
- // filter all the CQIs that are not SRS based
- if (params.m_ulCqi.m_type!=UlCqi_s::PUSCH)
- {
- return;
- }
- }
- case FfMacScheduler::ALL_UL_CQI:
- break;
-
- default:
- NS_FATAL_ERROR ("Unknown UL CQI type");
+ case FfMacScheduler::PUSCH_UL_CQI:
+ {
+ // filter all the CQIs that are not SRS based
+ if (params.m_ulCqi.m_type != UlCqi_s::PUSCH)
+ {
+ return;
+ }
+ }
+ case FfMacScheduler::ALL_UL_CQI:
+ break;
+
+ default:
+ NS_FATAL_ERROR ("Unknown UL CQI type");
}
- switch (params.m_ulCqi.m_type)
+ switch (params.m_ulCqi.m_type)
{
- case UlCqi_s::PUSCH:
- {
- std::map <uint16_t, std::vector <uint16_t> >::iterator itMap;
- std::map <uint16_t, std::vector <double> >::iterator itCqi;
- itMap = m_allocationMaps.find (params.m_sfnSf);
- if (itMap == m_allocationMaps.end ())
- {
- NS_LOG_DEBUG (this << " Does not find info on allocation, size : " << m_allocationMaps.size ());
- return;
- }
- for (uint32_t i = 0; i < (*itMap).second.size (); i++)
- {
- // convert from fixed point notation Sxxxxxxxxxxx.xxx to double
- // NS_LOG_INFO (this << " i " << i << " size " << params.m_ulCqi.m_sinr.size () << " mapSIze " << (*itMap).second.size ());
- double sinr = LteFfConverter::fpS11dot3toDouble (params.m_ulCqi.m_sinr.at (i));
- //NS_LOG_DEBUG (this << " UE " << (*itMap).second.at (i) << " SINRfp " << params.m_ulCqi.m_sinr.at (i) << " sinrdb " << sinr);
- itCqi = m_ueCqi.find ((*itMap).second.at (i));
- if (itCqi == m_ueCqi.end ())
- {
- // create a new entry
- std::vector <double> newCqi;
- for (uint32_t j = 0; j < m_cschedCellConfig.m_ulBandwidth; j++)
- {
- if (i == j)
- {
- newCqi.push_back (sinr);
- }
- else
- {
- // initialize with NO_SINR value.
- newCqi.push_back (30.0);
- }
-
- }
- m_ueCqi.insert (std::pair <uint16_t, std::vector <double> > ((*itMap).second.at (i), newCqi));
- // generate correspondent timer
- m_ueCqiTimers.insert (std::pair <uint16_t, uint32_t > ((*itMap).second.at (i), m_cqiTimersThreshold));
- }
- else
- {
- // update the value
- (*itCqi).second.at (i) = sinr;
- // update correspondent timer
- std::map <uint16_t, uint32_t>::iterator itTimers;
- itTimers = m_ueCqiTimers.find ((*itMap).second.at (i));
- (*itTimers).second = m_cqiTimersThreshold;
-
- }
-
- }
- // remove obsolete info on allocation
- m_allocationMaps.erase (itMap);
- }
+ case UlCqi_s::PUSCH:
+ {
+ std::map <uint16_t, std::vector <uint16_t> >::iterator itMap;
+ std::map <uint16_t, std::vector <double> >::iterator itCqi;
+ itMap = m_allocationMaps.find (params.m_sfnSf);
+ if (itMap == m_allocationMaps.end ())
+ {
+ NS_LOG_INFO (this << " Does not find info on allocation, size : " << m_allocationMaps.size ());
+ return;
+ }
+ for (uint32_t i = 0; i < (*itMap).second.size (); i++)
+ {
+ // convert from fixed point notation Sxxxxxxxxxxx.xxx to double
+ double sinr = LteFfConverter::fpS11dot3toDouble (params.m_ulCqi.m_sinr.at (i));
+ itCqi = m_ueCqi.find ((*itMap).second.at (i));
+ if (itCqi == m_ueCqi.end ())
+ {
+ // create a new entry
+ std::vector <double> newCqi;
+ for (uint32_t j = 0; j < m_cschedCellConfig.m_ulBandwidth; j++)
+ {
+ if (i == j)
+ {
+ newCqi.push_back (sinr);
+ }
+ else
+ {
+ // initialize with NO_SINR value.
+ newCqi.push_back (30.0);
+ }
+
+ }
+ m_ueCqi.insert (std::pair <uint16_t, std::vector <double> > ((*itMap).second.at (i), newCqi));
+ // generate correspondent timer
+ m_ueCqiTimers.insert (std::pair <uint16_t, uint32_t > ((*itMap).second.at (i), m_cqiTimersThreshold));
+ }
+ else
+ {
+ // update the value
+ (*itCqi).second.at (i) = sinr;
+ // update correspondent timer
+ std::map <uint16_t, uint32_t>::iterator itTimers;
+ itTimers = m_ueCqiTimers.find ((*itMap).second.at (i));
+ (*itTimers).second = m_cqiTimersThreshold;
+
+ }
+
+ }
+ // remove obsolete info on allocation
+ m_allocationMaps.erase (itMap);
+ }
break;
- case UlCqi_s::SRS:
- {
- // get the RNTI from vendor specific parameters
- uint16_t rnti = 0;
- NS_ASSERT (params.m_vendorSpecificList.size () > 0);
- for (uint16_t i = 0; i < params.m_vendorSpecificList.size (); i++)
- {
- if (params.m_vendorSpecificList.at (i).m_type == SRS_CQI_RNTI_VSP)
- {
- Ptr<SrsCqiRntiVsp> vsp = DynamicCast<SrsCqiRntiVsp> (params.m_vendorSpecificList.at (i).m_value);
- rnti = vsp->GetRnti ();
- }
- }
- std::map <uint16_t, std::vector <double> >::iterator itCqi;
- itCqi = m_ueCqi.find (rnti);
- if (itCqi == m_ueCqi.end ())
- {
- // create a new entry
- std::vector <double> newCqi;
- for (uint32_t j = 0; j < m_cschedCellConfig.m_ulBandwidth; j++)
- {
- double sinr = LteFfConverter::fpS11dot3toDouble (params.m_ulCqi.m_sinr.at (j));
- newCqi.push_back (sinr);
- NS_LOG_DEBUG (this << " RNTI " << rnti << " new SRS-CQI for RB " << j << " value " << sinr);
-
- }
- m_ueCqi.insert (std::pair <uint16_t, std::vector <double> > (rnti, newCqi));
- // generate correspondent timer
- m_ueCqiTimers.insert (std::pair <uint16_t, uint32_t > (rnti, m_cqiTimersThreshold));
- }
- else
- {
- // update the values
- for (uint32_t j = 0; j < m_cschedCellConfig.m_ulBandwidth; j++)
- {
- double sinr = LteFfConverter::fpS11dot3toDouble (params.m_ulCqi.m_sinr.at (j));
- (*itCqi).second.at (j) = sinr;
- NS_LOG_DEBUG (this << " RNTI " << rnti << " update SRS-CQI for RB " << j << " value " << sinr);
- }
- // update correspondent timer
- std::map <uint16_t, uint32_t>::iterator itTimers;
- itTimers = m_ueCqiTimers.find (rnti);
- (*itTimers).second = m_cqiTimersThreshold;
-
- }
-
-
- }
+ case UlCqi_s::SRS:
+ {
+ // get the RNTI from vendor specific parameters
+ uint16_t rnti = 0;
+ NS_ASSERT (params.m_vendorSpecificList.size () > 0);
+ for (uint16_t i = 0; i < params.m_vendorSpecificList.size (); i++)
+ {
+ if (params.m_vendorSpecificList.at (i).m_type == SRS_CQI_RNTI_VSP)
+ {
+ Ptr<SrsCqiRntiVsp> vsp = DynamicCast<SrsCqiRntiVsp> (params.m_vendorSpecificList.at (i).m_value);
+ rnti = vsp->GetRnti ();
+ }
+ }
+ std::map <uint16_t, std::vector <double> >::iterator itCqi;
+ itCqi = m_ueCqi.find (rnti);
+ if (itCqi == m_ueCqi.end ())
+ {
+ // create a new entry
+ std::vector <double> newCqi;
+ for (uint32_t j = 0; j < m_cschedCellConfig.m_ulBandwidth; j++)
+ {
+ double sinr = LteFfConverter::fpS11dot3toDouble (params.m_ulCqi.m_sinr.at (j));
+ newCqi.push_back (sinr);
+ NS_LOG_INFO (this << " RNTI " << rnti << " new SRS-CQI for RB " << j << " value " << sinr);
+
+ }
+ m_ueCqi.insert (std::pair <uint16_t, std::vector <double> > (rnti, newCqi));
+ // generate correspondent timer
+ m_ueCqiTimers.insert (std::pair <uint16_t, uint32_t > (rnti, m_cqiTimersThreshold));
+ }
+ else
+ {
+ // update the values
+ for (uint32_t j = 0; j < m_cschedCellConfig.m_ulBandwidth; j++)
+ {
+ double sinr = LteFfConverter::fpS11dot3toDouble (params.m_ulCqi.m_sinr.at (j));
+ (*itCqi).second.at (j) = sinr;
+ NS_LOG_INFO (this << " RNTI " << rnti << " update SRS-CQI for RB " << j << " value " << sinr);
+ }
+ // update correspondent timer
+ std::map <uint16_t, uint32_t>::iterator itTimers;
+ itTimers = m_ueCqiTimers.find (rnti);
+ (*itTimers).second = m_cqiTimersThreshold;
+
+ }
+
+
+ }
break;
- case UlCqi_s::PUCCH_1:
- case UlCqi_s::PUCCH_2:
- case UlCqi_s::PRACH:
- {
- NS_FATAL_ERROR ("PfFfMacScheduler supports only PUSCH and SRS UL-CQIs");
- }
+ case UlCqi_s::PUCCH_1:
+ case UlCqi_s::PUCCH_2:
+ case UlCqi_s::PRACH:
+ {
+ NS_FATAL_ERROR ("PfFfMacScheduler supports only PUSCH and SRS UL-CQIs");
+ }
break;
- default:
- NS_FATAL_ERROR ("Unknown type of UL-CQI");
+ default:
+ NS_FATAL_ERROR ("Unknown type of UL-CQI");
}
return;
}
void
-RrFfMacScheduler::RefreshDlCqiMaps(void)
+RrFfMacScheduler::RefreshDlCqiMaps (void)
{
NS_LOG_FUNCTION (this << m_p10CqiTimers.size ());
// refresh DL CQI P01 Map
std::map <uint16_t,uint32_t>::iterator itP10 = m_p10CqiTimers.begin ();
- while (itP10!=m_p10CqiTimers.end ())
+ while (itP10 != m_p10CqiTimers.end ())
{
NS_LOG_INFO (this << " P10-CQI for user " << (*itP10).first << " is " << (uint32_t)(*itP10).second << " thr " << (uint32_t)m_cqiTimersThreshold);
if ((*itP10).second == 0)
@@ -1587,17 +1572,17 @@
itP10++;
}
}
-
+
return;
}
void
-RrFfMacScheduler::RefreshUlCqiMaps(void)
+RrFfMacScheduler::RefreshUlCqiMaps (void)
{
// refresh UL CQI Map
std::map <uint16_t,uint32_t>::iterator itUl = m_ueCqiTimers.begin ();
- while (itUl!=m_ueCqiTimers.end ())
+ while (itUl != m_ueCqiTimers.end ())
{
NS_LOG_INFO (this << " UL-CQI for user " << (*itUl).first << " is " << (uint32_t)(*itUl).second << " thr " << (uint32_t)m_cqiTimersThreshold);
if ((*itUl).second == 0)
@@ -1618,7 +1603,7 @@
itUl++;
}
}
-
+
return;
}
@@ -1631,7 +1616,7 @@
{
if (((*it).m_rnti == rnti) && ((*it).m_logicalChannelIdentity))
{
-// NS_LOG_DEBUG (this << " UE " << rnti << " LC " << (uint16_t)lcid << " txqueue " << (*it).m_rlcTransmissionQueueSize << " retxqueue " << (*it).m_rlcRetransmissionQueueSize << " status " << (*it).m_rlcStatusPduSize << " decrease " << size);
+ NS_LOG_INFO (this << " UE " << rnti << " LC " << (uint16_t)lcid << " txqueue " << (*it).m_rlcTransmissionQueueSize << " retxqueue " << (*it).m_rlcRetransmissionQueueSize << " status " << (*it).m_rlcStatusPduSize << " decrease " << size);
// Update queues: RLC tx order Status, ReTx, Tx
// Update status queue
if ((*it).m_rlcStatusPduSize <= size)
@@ -1644,7 +1629,7 @@
(*it).m_rlcStatusPduSize -= size;
return;
}
- // update retransmission queue
+ // update retransmission queue
if ((*it).m_rlcRetransmissionQueueSize <= size)
{
size -= (*it).m_rlcRetransmissionQueueSize;
@@ -1677,9 +1662,9 @@
size = size - 2; // remove the minimum RLC overhead
std::map <uint16_t,uint32_t>::iterator it = m_ceBsrRxed.find (rnti);
- if (it!=m_ceBsrRxed.end ())
+ if (it != m_ceBsrRxed.end ())
{
-// NS_LOG_DEBUG (this << " Update RLC BSR UE " << rnti << " size " << size << " BSR " << (*it).second);
+ NS_LOG_INFO (this << " Update RLC BSR UE " << rnti << " size " << size << " BSR " << (*it).second);
if ((*it).second >= size)
{
(*it).second -= size;
@@ -1693,7 +1678,7 @@
{
NS_LOG_ERROR (this << " Does not find BSR report info of UE " << rnti);
}
-
+
}