src/lte/model/tdtbfq-ff-mac-scheduler.cc
changeset 9654 cd7eb841e50e
parent 9180 818aee3d2c7b
child 9656 c38c27b9e5ce
--- a/src/lte/model/tdtbfq-ff-mac-scheduler.cc	Mon Feb 04 18:47:25 2013 +0100
+++ b/src/lte/model/tdtbfq-ff-mac-scheduler.cc	Tue Apr 02 19:58:36 2013 +0200
@@ -15,25 +15,27 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  *
- * Author: Marco Miozzo <marco.miozzo@cttc.es>        // original version
+ * Author: Marco Miozzo <marco.miozzo@cttc.es>
  * Modification: Dizhi Zhou <dizhi.zhou@gmail.com>    // modify codes related to downlink scheduler
  */
 
+#ifdef __FreeBSD__
+#define log2(x) (log (x) / M_LN2)
+#endif /* __FreeBSD__ */
+
 #include <ns3/log.h>
 #include <ns3/pointer.h>
-#include <ns3/integer.h>
 
 #include <ns3/simulator.h>
 #include <ns3/lte-amc.h>
 #include <ns3/tdtbfq-ff-mac-scheduler.h>
 #include <ns3/lte-vendor-specific-parameters.h>
+#include <ns3/boolean.h>
+#include <ns3/integer.h>
+#include <set>
 
 NS_LOG_COMPONENT_DEFINE ("TdTbfqFfMacScheduler");
 
-// value for SINR outside the range defined by LTE, used to indicate that there
-// is no CQI for this element
-#define NO_SINR -5000
-
 namespace ns3 {
 
 int TdTbfqType0AllocationRbg[4] = {
@@ -43,8 +45,11 @@
   110       // RGB size 4
 };  // see table 7.1.6.1-1 of 36.213
 
+
 NS_OBJECT_ENSURE_REGISTERED (TdTbfqFfMacScheduler);
 
+
+
 class TdTbfqSchedulerMemberCschedSapProvider : public FfMacCschedSapProvider
 {
 public:
@@ -215,8 +220,7 @@
   :   m_cschedSapUser (0),
     m_schedSapUser (0),
     m_timeWindow (99.0),
-    m_nextRntiUl (0),
-    bankSize (0)
+    m_nextRntiUl (0)
 {
   m_amc = CreateObject <LteAmc> ();
   m_cschedSapProvider = new TdTbfqSchedulerMemberCschedSapProvider (this);
@@ -232,6 +236,13 @@
 TdTbfqFfMacScheduler::DoDispose ()
 {
   NS_LOG_FUNCTION (this);
+  m_dlHarqProcessesDciBuffer.clear ();
+  m_dlHarqProcessesTimer.clear ();
+  m_dlHarqProcessesRlcPduListBuffer.clear ();
+  m_dlInfoListBuffered.clear ();
+  m_ulHarqCurrentProcessId.clear ();
+  m_ulHarqProcessesStatus.clear ();
+  m_ulHarqProcessesDciBuffer.clear ();
   delete m_cschedSapProvider;
   delete m_schedSapProvider;
 }
@@ -267,6 +278,17 @@
                    UintegerValue (0),
                    MakeUintegerAccessor (&TdTbfqFfMacScheduler::m_creditableThreshold),
                    MakeUintegerChecker<uint32_t> ())
+
+    .AddAttribute ("HarqEnabled",
+                   "Activate/Deactivate the HARQ [by default is active].",
+                   BooleanValue (true),
+                   MakeBooleanAccessor (&TdTbfqFfMacScheduler::m_harqOn),
+                   MakeBooleanChecker ())
+    .AddAttribute ("UlGrantMcs",
+                   "The MCS of the UL grant, must be [0..15] (default 0)",
+                   UintegerValue (0),
+                   MakeUintegerAccessor (&TdTbfqFfMacScheduler::m_ulGrantMcs),
+                   MakeUintegerChecker<uint8_t> ())
   ;
   return tid;
 }
@@ -303,6 +325,7 @@
   NS_LOG_FUNCTION (this);
   // Read the subset of parameters used
   m_cschedCellConfig = params;
+  m_rachAllocationMap.resize (m_cschedCellConfig.m_ulBandwidth, 0);
   FfMacCschedSapUser::CschedUeConfigCnfParameters cnf;
   cnf.m_result = SUCCESS;
   m_cschedSapUser->CschedUeConfigCnf (cnf);
@@ -317,6 +340,29 @@
   if (it == m_uesTxMode.end ())
     {
       m_uesTxMode.insert (std::pair <uint16_t, double> (params.m_rnti, params.m_transmissionMode));
+      // generate HARQ buffers
+      m_dlHarqCurrentProcessId.insert (std::pair <uint16_t,uint8_t > (params.m_rnti, 0));
+      DlHarqProcessesStatus_t dlHarqPrcStatus;
+      dlHarqPrcStatus.resize (8,0);
+      m_dlHarqProcessesStatus.insert (std::pair <uint16_t, DlHarqProcessesStatus_t> (params.m_rnti, dlHarqPrcStatus));
+      DlHarqProcessesTimer_t dlHarqProcessesTimer;
+      dlHarqProcessesTimer.resize (8,0);
+      m_dlHarqProcessesTimer.insert (std::pair <uint16_t, DlHarqProcessesTimer_t> (params.m_rnti, dlHarqProcessesTimer));
+      DlHarqProcessesDciBuffer_t dlHarqdci;
+      dlHarqdci.resize (8);
+      m_dlHarqProcessesDciBuffer.insert (std::pair <uint16_t, DlHarqProcessesDciBuffer_t> (params.m_rnti, dlHarqdci));
+      DlHarqRlcPduListBuffer_t dlHarqRlcPdu;
+      dlHarqRlcPdu.resize (2);
+      dlHarqRlcPdu.at (0).resize (8);
+      dlHarqRlcPdu.at (1).resize (8);
+      m_dlHarqProcessesRlcPduListBuffer.insert (std::pair <uint16_t, DlHarqRlcPduListBuffer_t> (params.m_rnti, dlHarqRlcPdu));
+      m_ulHarqCurrentProcessId.insert (std::pair <uint16_t,uint8_t > (params.m_rnti, 0));
+      UlHarqProcessesStatus_t ulHarqPrcStatus;
+      ulHarqPrcStatus.resize (8,0);
+      m_ulHarqProcessesStatus.insert (std::pair <uint16_t, UlHarqProcessesStatus_t> (params.m_rnti, ulHarqPrcStatus));
+      UlHarqProcessesDciBuffer_t ulHarqdci;
+      ulHarqdci.resize (8);
+      m_ulHarqProcessesDciBuffer.insert (std::pair <uint16_t, UlHarqProcessesDciBuffer_t> (params.m_rnti, ulHarqdci));
     }
   else
     {
@@ -343,7 +389,7 @@
           tdtbfqsFlowPerf_t flowStatsDl;
           flowStatsDl.flowStart = Simulator::Now ();
           flowStatsDl.packetArrivalRate = 0;
-          flowStatsDl.tokenGenerationRate = mbrDlInBytes;
+          flowStatsDl.tokenGenerationRate =  mbrDlInBytes;
           flowStatsDl.tokenPoolSize = 0;
           flowStatsDl.maxTokenPoolSize = m_tokenPoolSize;
           flowStatsDl.counter = 0;
@@ -372,18 +418,68 @@
   return;
 }
 
-
 void
 TdTbfqFfMacScheduler::DoCschedLcReleaseReq (const struct FfMacCschedSapProvider::CschedLcReleaseReqParameters& params)
 {
-  NS_FATAL_ERROR ("unimplemented");
+  NS_LOG_FUNCTION (this);
+  for (uint16_t i = 0; i < params.m_logicalChannelIdentity.size (); i++)
+    {
+      std::map<LteFlowId_t, FfMacSchedSapProvider::SchedDlRlcBufferReqParameters>::iterator it = m_rlcBufferReq.begin ();
+      std::map<LteFlowId_t, FfMacSchedSapProvider::SchedDlRlcBufferReqParameters>::iterator temp;
+      while (it!=m_rlcBufferReq.end ())
+        {
+          if (((*it).first.m_rnti == params.m_rnti) && ((*it).first.m_lcId == params.m_logicalChannelIdentity.at (i)))
+            {
+              temp = it;
+              it++;
+              m_rlcBufferReq.erase (temp);
+            }
+          else
+            {
+              it++;
+            }
+        }
+    }
   return;
 }
 
 void
 TdTbfqFfMacScheduler::DoCschedUeReleaseReq (const struct FfMacCschedSapProvider::CschedUeReleaseReqParameters& params)
 {
-  NS_FATAL_ERROR ("unimplemented");
+  NS_LOG_FUNCTION (this);
+  
+  m_uesTxMode.erase (params.m_rnti);
+  m_dlHarqCurrentProcessId.erase (params.m_rnti);
+  m_dlHarqProcessesStatus.erase  (params.m_rnti);
+  m_dlHarqProcessesTimer.erase (params.m_rnti);
+  m_dlHarqProcessesDciBuffer.erase  (params.m_rnti);
+  m_dlHarqProcessesRlcPduListBuffer.erase  (params.m_rnti);
+  m_ulHarqCurrentProcessId.erase  (params.m_rnti);
+  m_ulHarqProcessesStatus.erase  (params.m_rnti);
+  m_ulHarqProcessesDciBuffer.erase  (params.m_rnti);
+  m_flowStatsDl.erase  (params.m_rnti);
+  m_flowStatsUl.erase  (params.m_rnti);
+  m_ceBsrRxed.erase (params.m_rnti);
+  std::map<LteFlowId_t, FfMacSchedSapProvider::SchedDlRlcBufferReqParameters>::iterator it = m_rlcBufferReq.begin ();
+  std::map<LteFlowId_t, FfMacSchedSapProvider::SchedDlRlcBufferReqParameters>::iterator temp;
+  while (it!=m_rlcBufferReq.end ())
+    {
+      if ((*it).first.m_rnti == params.m_rnti)
+        {
+          temp = it;
+          it++;
+          m_rlcBufferReq.erase (temp);
+        }
+      else
+        {
+          it++;
+        }
+    }
+  if (m_nextRntiUl == params.m_rnti)
+    {
+      m_nextRntiUl = 0;
+    }
+
   return;
 }
 
@@ -415,14 +511,16 @@
 void
 TdTbfqFfMacScheduler::DoSchedDlPagingBufferReq (const struct FfMacSchedSapProvider::SchedDlPagingBufferReqParameters& params)
 {
-  NS_FATAL_ERROR ("unimplemented");
+  NS_LOG_FUNCTION (this);
+  // TODO: Implementation of the API
   return;
 }
 
 void
 TdTbfqFfMacScheduler::DoSchedDlMacBufferReq (const struct FfMacSchedSapProvider::SchedDlMacBufferReqParameters& params)
 {
-  NS_FATAL_ERROR ("unimplemented");
+  NS_LOG_FUNCTION (this);
+  // TODO: Implementation of the API
   return;
 }
 
@@ -440,7 +538,8 @@
   return (-1);
 }
 
-int 
+
+int
 TdTbfqFfMacScheduler::LcActivePerFlow (uint16_t rnti)
 {
   std::map <LteFlowId_t, FfMacSchedSapProvider::SchedDlRlcBufferReqParameters>::iterator it;
@@ -462,17 +561,439 @@
 
 }
 
+
+uint8_t
+TdTbfqFfMacScheduler::HarqProcessAvailability (uint16_t rnti)
+{
+  NS_LOG_FUNCTION (this << rnti);
+
+  std::map <uint16_t, uint8_t>::iterator it = m_dlHarqCurrentProcessId.find (rnti);
+  if (it == m_dlHarqCurrentProcessId.end ())
+    {
+      NS_FATAL_ERROR ("No Process Id found for this RNTI " << rnti);
+    }
+  std::map <uint16_t, DlHarqProcessesStatus_t>::iterator itStat = m_dlHarqProcessesStatus.find (rnti);
+  if (itStat == m_dlHarqProcessesStatus.end ())
+    {
+      NS_FATAL_ERROR ("No Process Id Statusfound for this RNTI " << rnti);
+    }
+  uint8_t i = (*it).second;
+  do
+    {
+      i = (i + 1) % HARQ_PROC_NUM;
+    }
+  while ( ((*itStat).second.at (i) != 0)&&(i != (*it).second));
+  if ((*itStat).second.at (i) == 0)
+    {
+      return (true);
+    }
+  else
+    {
+      return (false); // return a not valid harq proc id
+    }
+}
+
+
+
+uint8_t
+TdTbfqFfMacScheduler::UpdateHarqProcessId (uint16_t rnti)
+{
+  NS_LOG_FUNCTION (this << rnti);
+
+  if (m_harqOn == false)
+    {
+      return (0);
+    }
+
+
+  std::map <uint16_t, uint8_t>::iterator it = m_dlHarqCurrentProcessId.find (rnti);
+  if (it == m_dlHarqCurrentProcessId.end ())
+    {
+      NS_FATAL_ERROR ("No Process Id found for this RNTI " << rnti);
+    }
+  std::map <uint16_t, DlHarqProcessesStatus_t>::iterator itStat = m_dlHarqProcessesStatus.find (rnti);
+  if (itStat == m_dlHarqProcessesStatus.end ())
+    {
+      NS_FATAL_ERROR ("No Process Id Statusfound for this RNTI " << rnti);
+    }
+  uint8_t i = (*it).second;
+  do
+    {
+      i = (i + 1) % HARQ_PROC_NUM;
+    }
+  while ( ((*itStat).second.at (i) != 0)&&(i != (*it).second));
+  if ((*itStat).second.at (i) == 0)
+    {
+      (*it).second = i;
+      (*itStat).second.at (i) = 1;
+    }
+  else
+    {
+      NS_FATAL_ERROR ("No HARQ process available for RNTI " << rnti << " check before update with HarqProcessAvailability");
+    }
+
+  return ((*it).second);
+}
+
+
+void
+TdTbfqFfMacScheduler::RefreshHarqProcesses ()
+{
+  NS_LOG_FUNCTION (this);
+
+  std::map <uint16_t, DlHarqProcessesTimer_t>::iterator itTimers;
+  for (itTimers = m_dlHarqProcessesTimer.begin (); itTimers != m_dlHarqProcessesTimer.end (); itTimers ++)
+    {
+      for (uint16_t i = 0; i < HARQ_PROC_NUM; i++)
+        {
+          if ((*itTimers).second.at (i) == HARQ_DL_TIMEOUT)
+            {
+              // reset HARQ process
+              
+              NS_LOG_DEBUG (this << " Reset HARQ proc " << i << " for RNTI " << (*itTimers).first);
+              std::map <uint16_t, DlHarqProcessesStatus_t>::iterator itStat = m_dlHarqProcessesStatus.find ((*itTimers).first);
+              if (itStat == m_dlHarqProcessesStatus.end ())
+                {
+                  NS_FATAL_ERROR ("No Process Id Status found for this RNTI " << (*itTimers).first);
+                }
+              (*itStat).second.at (i) = 0;
+              (*itTimers).second.at (i) = 0;
+            }
+          else
+            {
+              (*itTimers).second.at (i)++;
+            }
+        }
+    }
+  
+}
+
+
 void
 TdTbfqFfMacScheduler::DoSchedDlTriggerReq (const struct FfMacSchedSapProvider::SchedDlTriggerReqParameters& params)
 {
   NS_LOG_FUNCTION (this << " Frame no. " << (params.m_sfnSf >> 4) << " subframe no. " << (0xF & params.m_sfnSf));
   // API generated by RLC for triggering the scheduling of a DL subframe
 
-  // evaluate the relative channel quality indicator for each UE per each RBG 
+
+  // evaluate the relative channel quality indicator for each UE per each RBG
   // (since we are using allocation type 0 the small unit of allocation is RBG)
   // Resource allocation type 0 (see sec 7.1.6.1 of 36.213)
+
   RefreshDlCqiMaps ();
 
+  int rbgSize = GetRbgSize (m_cschedCellConfig.m_dlBandwidth);
+  int rbgNum = m_cschedCellConfig.m_dlBandwidth / rbgSize;
+  std::map <uint16_t, std::vector <uint16_t> > allocationMap; // RBs map per RNTI
+  std::vector <bool> rbgMap;  // global RBGs map
+  uint16_t rbgAllocatedNum = 0;
+  std::set <uint16_t> rntiAllocated;
+  rbgMap.resize (m_cschedCellConfig.m_dlBandwidth / rbgSize, false);
+  FfMacSchedSapUser::SchedDlConfigIndParameters ret;
+
+  // RACH Allocation
+  m_rachAllocationMap.resize (m_cschedCellConfig.m_ulBandwidth, 0);
+  uint16_t rbStart = 0;
+  std::vector <struct RachListElement_s>::iterator itRach;
+  for (itRach = m_rachList.begin (); itRach != m_rachList.end (); itRach++)
+    {
+      NS_ASSERT_MSG (m_amc->GetTbSizeFromMcs (m_ulGrantMcs, m_cschedCellConfig.m_ulBandwidth) > (*itRach).m_estimatedSize, " Default UL Grant MCS does not allow to send RACH messages");
+      BuildRarListElement_s newRar;
+      newRar.m_rnti = (*itRach).m_rnti;
+      // DL-RACH Allocation
+      // Ideal: no needs of configuring m_dci
+      // UL-RACH Allocation
+      newRar.m_grant.m_rnti = newRar.m_rnti;
+      newRar.m_grant.m_mcs = m_ulGrantMcs;
+      uint16_t rbLen = 1;
+      uint16_t tbSizeBits = 0;
+      // find lowest TB size that fits UL grant estimated size
+      while ((tbSizeBits < (*itRach).m_estimatedSize) && (rbStart + rbLen < m_cschedCellConfig.m_ulBandwidth))
+        {
+          rbLen++;
+          tbSizeBits = m_amc->GetTbSizeFromMcs (m_ulGrantMcs, rbLen);
+        }
+      if (tbSizeBits < (*itRach).m_estimatedSize)
+        {
+          // no more allocation space: finish allocation
+          break;
+        }
+      newRar.m_grant.m_rbStart = rbStart;
+      newRar.m_grant.m_rbLen = rbLen;
+      newRar.m_grant.m_tbSize = tbSizeBits / 8;
+      newRar.m_grant.m_hopping = false;
+      newRar.m_grant.m_tpc = 0;
+      newRar.m_grant.m_cqiRequest = false;
+      newRar.m_grant.m_ulDelay = false;
+      NS_LOG_INFO (this << " UL grant allocated to RNTI " << (*itRach).m_rnti << " rbStart " << rbStart << " rbLen " << rbLen << " MCS " << m_ulGrantMcs << " tbSize " << newRar.m_grant.m_tbSize);
+      for (uint16_t i = rbStart; i < rbStart + rbLen; i++)
+        {
+          m_rachAllocationMap.at (i) = (*itRach).m_rnti;
+        }
+      rbStart = rbStart + rbLen;
+
+      ret.m_buildRarList.push_back (newRar);
+    }
+  m_rachList.clear ();
+
+
+  // Process DL HARQ feedback
+  RefreshHarqProcesses ();
+  // retrieve past HARQ retx buffered
+  if (m_dlInfoListBuffered.size () > 0)
+    {
+      if (params.m_dlInfoList.size () > 0)
+        {
+          NS_LOG_INFO (this << " Received DL-HARQ feedback");
+          m_dlInfoListBuffered.insert (m_dlInfoListBuffered.end (), params.m_dlInfoList.begin (), params.m_dlInfoList.end ());
+        }
+    }
+  else
+    {
+      if (params.m_dlInfoList.size () > 0)
+        {
+          m_dlInfoListBuffered = params.m_dlInfoList;
+        }
+    }
+  if (m_harqOn == false)
+    {
+      // Ignore HARQ feedback
+      m_dlInfoListBuffered.clear ();
+    }
+  std::vector <struct DlInfoListElement_s> dlInfoListUntxed;
+  for (uint16_t i = 0; i < m_dlInfoListBuffered.size (); i++)
+    {
+      std::set <uint16_t>::iterator itRnti = rntiAllocated.find (m_dlInfoListBuffered.at (i).m_rnti);
+      if (itRnti != rntiAllocated.end ())
+        {
+          // RNTI already allocated for retx
+          continue;
+        }
+      uint8_t nLayers = m_dlInfoListBuffered.at (i).m_harqStatus.size ();
+      std::vector <bool> retx;
+      NS_LOG_INFO (this << " Processing DLHARQ feedback");
+      if (nLayers == 1)
+        {
+          retx.push_back (m_dlInfoListBuffered.at (i).m_harqStatus.at (0) == DlInfoListElement_s::NACK);
+          retx.push_back (false);
+        }
+      else
+        {
+          retx.push_back (m_dlInfoListBuffered.at (i).m_harqStatus.at (0) == DlInfoListElement_s::NACK);
+          retx.push_back (m_dlInfoListBuffered.at (i).m_harqStatus.at (1) == DlInfoListElement_s::NACK);
+        }
+      if (retx.at (0) || retx.at (1))
+        {
+          // retrieve HARQ process information
+          uint16_t rnti = m_dlInfoListBuffered.at (i).m_rnti;
+          uint8_t harqId = m_dlInfoListBuffered.at (i).m_harqProcessId;
+          NS_LOG_INFO (this << " HARQ retx RNTI " << rnti << " harqId " << (uint16_t)harqId);
+          std::map <uint16_t, DlHarqProcessesDciBuffer_t>::iterator itHarq = m_dlHarqProcessesDciBuffer.find (rnti);
+          if (itHarq == m_dlHarqProcessesDciBuffer.end ())
+            {
+              NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << rnti);
+            }
+
+          DlDciListElement_s dci = (*itHarq).second.at (harqId);
+          int rv = 0;
+          if (dci.m_rv.size () == 1)
+            {
+              rv = dci.m_rv.at (0);
+            }
+          else
+            {
+              rv = (dci.m_rv.at (0) > dci.m_rv.at (1) ? dci.m_rv.at (0) : dci.m_rv.at (1));
+            }
+
+          if (rv == 3)
+            {
+              // maximum number of retx reached -> drop process
+              NS_LOG_INFO ("Maximum number of retransmissions reached -> drop process");
+              std::map <uint16_t, DlHarqProcessesStatus_t>::iterator it = m_dlHarqProcessesStatus.find (rnti);
+              if (it == m_dlHarqProcessesStatus.end ())
+                {
+                  NS_LOG_ERROR ("No info find in HARQ buffer for UE (might change eNB) " << m_dlInfoListBuffered.at (i).m_rnti);
+                }
+              (*it).second.at (harqId) = 0;
+              std::map <uint16_t, DlHarqRlcPduListBuffer_t>::iterator itRlcPdu =  m_dlHarqProcessesRlcPduListBuffer.find (rnti);
+              if (itRlcPdu == m_dlHarqProcessesRlcPduListBuffer.end ())
+                {
+                  NS_FATAL_ERROR ("Unable to find RlcPdcList in HARQ buffer for RNTI " << m_dlInfoListBuffered.at (i).m_rnti);
+                }
+              for (uint16_t k = 0; k < (*itRlcPdu).second.size (); k++)
+                {
+                  (*itRlcPdu).second.at (k).at (harqId).clear ();
+                }
+              continue;
+            }
+          // check the feasibility of retransmitting on the same RBGs
+          // translate the DCI to Spectrum framework
+          std::vector <int> dciRbg;
+          uint32_t mask = 0x1;
+          NS_LOG_INFO ("Original RBGs " << dci.m_rbBitmap << " rnti " << dci.m_rnti);
+          for (int j = 0; j < 32; j++)
+            {
+              if (((dci.m_rbBitmap & mask) >> j) == 1)
+                {
+                  dciRbg.push_back (j);
+                  NS_LOG_INFO ("\t" << j);
+                }
+              mask = (mask << 1);
+            }
+          bool free = true;
+          for (uint8_t j = 0; j < dciRbg.size (); j++)
+            {
+              if (rbgMap.at (dciRbg.at (j)) == true)
+                {
+                  free = false;
+                  break;
+                }
+            }
+          if (free)
+            {
+              // use the same RBGs for the retx
+              // reserve RBGs
+              for (uint8_t j = 0; j < dciRbg.size (); j++)
+                {
+                  rbgMap.at (dciRbg.at (j)) = true;
+                  NS_LOG_INFO ("RBG " << dciRbg.at (j) << " assigned");
+                  rbgAllocatedNum++;
+                }
+
+              NS_LOG_INFO (this << " Send retx in the same RBGs");
+            }
+          else
+            {
+              // find RBGs for sending HARQ retx
+              uint8_t j = 0;
+              uint8_t rbgId = (dciRbg.at (dciRbg.size () - 1) + 1) % rbgNum;
+              uint8_t startRbg = dciRbg.at (dciRbg.size () - 1);
+              std::vector <bool> rbgMapCopy = rbgMap;
+              while ((j < dciRbg.size ())&&(startRbg != rbgId))
+                {
+                  if (rbgMapCopy.at (rbgId) == false)
+                    {
+                      rbgMapCopy.at (rbgId) = true;
+                      dciRbg.at (j) = rbgId;
+                      j++;
+                    }
+                  rbgId++;
+                }
+              if (j == dciRbg.size ())
+                {
+                  // find new RBGs -> update DCI map
+                  uint32_t rbgMask = 0;
+                  for (uint16_t k = 0; k < dciRbg.size (); k++)
+                    {
+                      rbgMask = rbgMask + (0x1 << dciRbg.at (k));
+                      rbgAllocatedNum++;
+                    }
+                  dci.m_rbBitmap = rbgMask;
+                  rbgMap = rbgMapCopy;
+                  NS_LOG_INFO (this << " Move retx in RBGs " << dciRbg.size ());
+                }
+              else
+                {
+                  // HARQ retx cannot be performed on this TTI -> store it
+                  dlInfoListUntxed.push_back (params.m_dlInfoList.at (i));
+                  NS_LOG_INFO (this << " No resource for this retx -> buffer it");
+                }
+            }
+          // retrieve RLC PDU list for retx TBsize and update DCI
+          BuildDataListElement_s newEl;
+          std::map <uint16_t, DlHarqRlcPduListBuffer_t>::iterator itRlcPdu =  m_dlHarqProcessesRlcPduListBuffer.find (rnti);
+          if (itRlcPdu == m_dlHarqProcessesRlcPduListBuffer.end ())
+            {
+              NS_FATAL_ERROR ("Unable to find RlcPdcList in HARQ buffer for RNTI " << rnti);
+            }
+          for (uint8_t j = 0; j < nLayers; j++)
+            {
+              if (retx.at (j))
+                {
+                  if (j >= dci.m_ndi.size ())
+                    {
+                      // for avoiding errors in MIMO transient phases
+                      dci.m_ndi.push_back (0);
+                      dci.m_rv.push_back (0);
+                      dci.m_mcs.push_back (0);
+                      dci.m_tbsSize.push_back (0);
+                      NS_LOG_INFO (this << " layer " << (uint16_t)j << " no txed (MIMO transition)");
+                    }
+                  else
+                    {
+                      dci.m_ndi.at (j) = 0;
+                      dci.m_rv.at (j)++;
+                      (*itHarq).second.at (harqId).m_rv.at (j)++;
+                      NS_LOG_INFO (this << " layer " << (uint16_t)j << " RV " << (uint16_t)dci.m_rv.at (j));
+                    }
+                }
+              else
+                {
+                  // empty TB of layer j
+                  dci.m_ndi.at (j) = 0;
+                  dci.m_rv.at (j) = 0;
+                  dci.m_mcs.at (j) = 0;
+                  dci.m_tbsSize.at (j) = 0;
+                  NS_LOG_INFO (this << " layer " << (uint16_t)j << " no retx");
+                }
+            }
+          for (uint16_t k = 0; k < (*itRlcPdu).second.at (0).at (dci.m_harqProcess).size (); k++)
+            {
+              std::vector <struct RlcPduListElement_s> rlcPduListPerLc;
+              for (uint8_t j = 0; j < nLayers; j++)
+                {
+                  if (retx.at (j))
+                    {
+                      if (j < dci.m_ndi.size ())
+                        {
+                          rlcPduListPerLc.push_back ((*itRlcPdu).second.at (j).at (dci.m_harqProcess).at (k));
+                        }
+                    }
+                }
+
+              if (rlcPduListPerLc.size () > 0)
+                {
+                  newEl.m_rlcPduList.push_back (rlcPduListPerLc);
+                }
+            }
+          newEl.m_rnti = rnti;
+          newEl.m_dci = dci;
+          (*itHarq).second.at (harqId).m_rv = dci.m_rv;
+          // refresh timer
+          std::map <uint16_t, DlHarqProcessesTimer_t>::iterator itHarqTimer = m_dlHarqProcessesTimer.find (rnti);
+          if (itHarqTimer== m_dlHarqProcessesTimer.end ())
+            {
+              NS_FATAL_ERROR ("Unable to find HARQ timer for RNTI " << (uint16_t)rnti);
+            }
+          (*itHarqTimer).second.at (harqId) = 0;
+          ret.m_buildDataList.push_back (newEl);
+          rntiAllocated.insert (rnti);
+        }
+      else
+        {
+          // update HARQ process status
+          NS_LOG_INFO (this << " HARQ received ACK for UE " << m_dlInfoListBuffered.at (i).m_rnti);
+          std::map <uint16_t, DlHarqProcessesStatus_t>::iterator it = m_dlHarqProcessesStatus.find (m_dlInfoListBuffered.at (i).m_rnti);
+          if (it == m_dlHarqProcessesStatus.end ())
+            {
+              NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << m_dlInfoListBuffered.at (i).m_rnti);
+            }
+          (*it).second.at (m_dlInfoListBuffered.at (i).m_harqProcessId) = 0;
+          std::map <uint16_t, DlHarqRlcPduListBuffer_t>::iterator itRlcPdu =  m_dlHarqProcessesRlcPduListBuffer.find (m_dlInfoListBuffered.at (i).m_rnti);
+          if (itRlcPdu == m_dlHarqProcessesRlcPduListBuffer.end ())
+            {
+              NS_FATAL_ERROR ("Unable to find RlcPdcList in HARQ buffer for RNTI " << m_dlInfoListBuffered.at (i).m_rnti);
+            }
+          for (uint16_t k = 0; k < (*itRlcPdu).second.size (); k++)
+            {
+              (*itRlcPdu).second.at (k).at (m_dlInfoListBuffered.at (i).m_harqProcessId).clear ();
+            }
+        }
+    }
+  m_dlInfoListBuffered.clear ();
+  m_dlInfoListBuffered = dlInfoListUntxed;
+
+
   // update token pool, counter and bank size
   std::map <uint16_t, tdtbfqsFlowPerf_t>::iterator itStats;
   for (itStats = m_flowStatsDl.begin (); itStats != m_flowStatsDl.end (); itStats++)
@@ -481,7 +1002,7 @@
         {
           (*itStats).second.counter +=  (*itStats).second.tokenGenerationRate / 1000 - ( (*itStats).second.maxTokenPoolSize -  (*itStats).second.tokenPoolSize );
           (*itStats).second.tokenPoolSize = (*itStats).second.maxTokenPoolSize;
-          bankSize += (*itStats).second.tokenGenerationRate / 1000 - ( (*itStats).second.maxTokenPoolSize -  (*itStats).second.tokenPoolSize );  
+          bankSize += (*itStats).second.tokenGenerationRate / 1000 - ( (*itStats).second.maxTokenPoolSize -  (*itStats).second.tokenPoolSize );
         }
       else
         {
@@ -489,9 +1010,6 @@
         }
     }
 
-  int rbgSize = GetRbgSize (m_cschedCellConfig.m_dlBandwidth);
-  int rbgNum = m_cschedCellConfig.m_dlBandwidth / rbgSize;
-  std::map <uint16_t, std::vector <uint16_t> > allocationMap;
 
   // select UE with largest metric
   std::map <uint16_t, tdtbfqsFlowPerf_t>::iterator it;
@@ -500,6 +1018,21 @@
   bool firstRnti = true;
   for (it = m_flowStatsDl.begin (); it != m_flowStatsDl.end (); it++)
     {
+      std::set <uint16_t>::iterator itRnti = rntiAllocated.find ((*it).first);
+      if ((itRnti != rntiAllocated.end ())||(!HarqProcessAvailability ((*it).first)))
+        {
+          // UE already allocated for HARQ or without HARQ process available -> drop it
+          if (itRnti != rntiAllocated.end ())
+            {
+              NS_LOG_DEBUG (this << " RNTI discared for HARQ tx" << (uint16_t)(*it).first);
+            }
+          if (!HarqProcessAvailability ((*it).first))
+            {
+              NS_LOG_DEBUG (this << " RNTI discared for HARQ id" << (uint16_t)(*it).first);
+            }
+          continue;
+       }
+
       if (LcActivePerFlow ((*it).first) == 0)
         {
           continue;
@@ -523,7 +1056,7 @@
 
   if (itMax == m_flowStatsDl.end ())
     {
-      // no UE available for downlink 
+      // all UEs are allocated RBG or all UEs already allocated for HARQ or without HARQ process available
       return;
     }
   else
@@ -532,6 +1065,8 @@
       std::vector <uint16_t> tempMap;
       for (int i = 0; i < rbgNum; i++)
         {
+          if ( rbgMap.at (i) == true) // this RBG is allocated in RACH procedure
+            continue;
           tempMap.push_back (i);
         }
       allocationMap.insert (std::pair <uint16_t, std::vector <uint16_t> > ((*itMax).first, tempMap));
@@ -539,9 +1074,7 @@
 
   // generate the transmission opportunities by grouping the RBGs of the same RNTI and
   // creating the correspondent DCIs
-  FfMacSchedSapUser::SchedDlConfigIndParameters ret;
   std::map <uint16_t, std::vector <uint16_t> >::iterator itMap = allocationMap.begin ();
-
   while (itMap != allocationMap.end ())
     {
       // create new BuildDataListElement_s for this LC
@@ -549,10 +1082,11 @@
       newEl.m_rnti = (*itMap).first;
       // create the DlDciListElement_s
       DlDciListElement_s newDci;
-      std::vector <struct RlcPduListElement_s> newRlcPduLe;
       newDci.m_rnti = (*itMap).first;
+      newDci.m_harqProcess = UpdateHarqProcessId ((*itMap).first);
 
       uint16_t lcActives = LcActivePerFlow ((*itMap).first);
+      NS_LOG_INFO (this << "Allocate user " << newEl.m_rnti << " rbg " << lcActives);
       uint16_t RgbPerRnti = (*itMap).second.size ();
       std::map <uint16_t,SbMeasResult_s>::iterator itCqi;
       itCqi = m_a30CqiRxed.find ((*itMap).first);
@@ -570,7 +1104,8 @@
             {
               if ((*itCqi).second.m_higherLayerSelected.size () > (*itMap).second.at (k))
                 {
-                  for (uint8_t j = 0; j < nLayer; j++) 
+                  NS_LOG_INFO (this << " RBG " << (*itMap).second.at (k) << " CQI " << (uint16_t)((*itCqi).second.m_higherLayerSelected.at ((*itMap).second.at (k)).m_sbCqi.at (0)) );
+                  for (uint8_t j = 0; j < nLayer; j++)
                     {
                       if ((*itCqi).second.m_higherLayerSelected.at ((*itMap).second.at (k)).m_sbCqi.size () > j)
                         {
@@ -602,12 +1137,17 @@
               worstCqi.at (j) = 1; // try with lowest MCS in RBG with no info on channel
             }
         }
+      for (uint8_t j = 0; j < nLayer; j++)
+        {
+          NS_LOG_INFO (this << " Layer " << (uint16_t)j << " CQI selected " << (uint16_t)worstCqi.at (j));
+        }
       uint32_t bytesTxed = 0;
       for (uint8_t j = 0; j < nLayer; j++)
         {
           newDci.m_mcs.push_back (m_amc->GetMcsFromCqi (worstCqi.at (j)));
-          int tbSize = (m_amc->GetTbSizeFromMcs (newDci.m_mcs.at (j), RgbPerRnti * rbgSize) / 8); // (size of TB in bytes according to table 7.1.7.2.1-1 of 36.213) 
+          int tbSize = (m_amc->GetTbSizeFromMcs (newDci.m_mcs.at (j), RgbPerRnti * rbgSize) / 8); // (size of TB in bytes according to table 7.1.7.2.1-1 of 36.213)
           newDci.m_tbsSize.push_back (tbSize);
+          NS_LOG_INFO (this << " Layer " << (uint16_t)j << " MCS selected" << m_amc->GetMcsFromCqi (worstCqi.at (j)));
           bytesTxed += tbSize;
         }
 
@@ -617,6 +1157,7 @@
       for (uint16_t k = 0; k < (*itMap).second.size (); k++)
         {
           rbgMask = rbgMask + (0x1 << (*itMap).second.at (k));
+          NS_LOG_INFO (this << " Allocated RBG " << (*itMap).second.at (k));
         }
       newDci.m_rbBitmap = rbgMask; // (32 bit bitmap see 7.1.6 of 36.213)
 
@@ -624,72 +1165,85 @@
       std::map <LteFlowId_t, FfMacSchedSapProvider::SchedDlRlcBufferReqParameters>::iterator itBufReq;
       for (itBufReq = m_rlcBufferReq.begin (); itBufReq != m_rlcBufferReq.end (); itBufReq++)
         {
-          if (((*itBufReq).first.m_rnti == (*itMap).first) 
-            && (((*itBufReq).second.m_rlcTransmissionQueueSize > 0)
-                || ((*itBufReq).second.m_rlcRetransmissionQueueSize > 0)
-                || ((*itBufReq).second.m_rlcStatusPduSize > 0) ))
+          if (((*itBufReq).first.m_rnti == (*itMap).first)
+              && (((*itBufReq).second.m_rlcTransmissionQueueSize > 0)
+                  || ((*itBufReq).second.m_rlcRetransmissionQueueSize > 0)
+                  || ((*itBufReq).second.m_rlcStatusPduSize > 0) ))
             {
+              std::vector <struct RlcPduListElement_s> newRlcPduLe;
               for (uint8_t j = 0; j < nLayer; j++)
                 {
                   RlcPduListElement_s newRlcEl;
                   newRlcEl.m_logicalChannelIdentity = (*itBufReq).first.m_lcId;
                   newRlcEl.m_size = newDci.m_tbsSize.at (j) / lcActives;
+                  NS_LOG_INFO (this << " LCID " << (uint32_t) newRlcEl.m_logicalChannelIdentity << " size " << newRlcEl.m_size << " layer " << (uint16_t)j);
                   newRlcPduLe.push_back (newRlcEl);
                   UpdateDlRlcBufferInfo (newDci.m_rnti, newRlcEl.m_logicalChannelIdentity, newRlcEl.m_size);
+                  if (m_harqOn == true)
+                    {
+                      // store RLC PDU list for HARQ
+                      std::map <uint16_t, DlHarqRlcPduListBuffer_t>::iterator itRlcPdu =  m_dlHarqProcessesRlcPduListBuffer.find ((*itMap).first);
+                      if (itRlcPdu == m_dlHarqProcessesRlcPduListBuffer.end ())
+                        {
+                          NS_FATAL_ERROR ("Unable to find RlcPdcList in HARQ buffer for RNTI " << (*itMap).first);
+                        }
+                      (*itRlcPdu).second.at (j).at (newDci.m_harqProcess).push_back (newRlcEl);
+                    }
                 }
+              newEl.m_rlcPduList.push_back (newRlcPduLe);
             }
           if ((*itBufReq).first.m_rnti > (*itMap).first)
             {
               break;
             }
         }
-      newDci.m_ndi.push_back (1); // TBD (new data indicator)
-      newDci.m_rv.push_back (0); // TBD (redundancy version)
+      for (uint8_t j = 0; j < nLayer; j++)
+        {
+          newDci.m_ndi.push_back (1);
+          newDci.m_rv.push_back (0);
+        }
 
       newEl.m_dci = newDci;
+
+      if (m_harqOn == true)
+        {
+          // store DCI for HARQ
+          std::map <uint16_t, DlHarqProcessesDciBuffer_t>::iterator itDci = m_dlHarqProcessesDciBuffer.find (newEl.m_rnti);
+          if (itDci == m_dlHarqProcessesDciBuffer.end ())
+            {
+              NS_FATAL_ERROR ("Unable to find RNTI entry in DCI HARQ buffer for RNTI " << newEl.m_rnti);
+            }
+          (*itDci).second.at (newDci.m_harqProcess) = newDci;
+          // refresh timer
+          std::map <uint16_t, DlHarqProcessesTimer_t>::iterator itHarqTimer =  m_dlHarqProcessesTimer.find (newEl.m_rnti);
+          if (itHarqTimer== m_dlHarqProcessesTimer.end ())
+            {
+              NS_FATAL_ERROR ("Unable to find HARQ timer for RNTI " << (uint16_t)newEl.m_rnti);
+            }
+          (*itHarqTimer).second.at (newDci.m_harqProcess) = 0;
+        }
+
       // ...more parameters -> ingored in this version
 
-      newEl.m_rlcPduList.push_back (newRlcPduLe);
       ret.m_buildDataList.push_back (newEl);
 
-      // update UE stats
-      std::map <uint16_t, tdtbfqsFlowPerf_t>::iterator it;
-      it = m_flowStatsDl.find ((*itMap).first);
-      if (it != m_flowStatsDl.end ())
-        {
-          if ( bytesTxed <= (*it).second.tokenPoolSize )
-            {
-              (*it).second.tokenPoolSize -= bytesTxed;
-            }
-          else
-            {
-              (*it).second.counter = (*it).second.counter - ( bytesTxed -  (*it).second.tokenPoolSize );
-              (*it).second.tokenPoolSize = 0;
-              if (bankSize <= ( bytesTxed - (*it).second.tokenPoolSize ))
-                bankSize = 0;
-              else 
-                bankSize = bankSize - ( bytesTxed -  (*it).second.tokenPoolSize );
-            }
-        }
-      else
-        {
-          NS_LOG_DEBUG (this << " No Stats for this allocated UE");
-        }
-
       itMap++;
     } // end while allocation
   ret.m_nrOfPdcchOfdmSymbols = 1;   // TODO: check correct value according the DCIs txed
 
   m_schedSapUser->SchedDlConfigInd (ret);
 
- 
+
   return;
 }
 
 void
 TdTbfqFfMacScheduler::DoSchedDlRachInfoReq (const struct FfMacSchedSapProvider::SchedDlRachInfoReqParameters& params)
 {
-  NS_FATAL_ERROR ("unimplemented");
+  NS_LOG_FUNCTION (this);
+
+  m_rachList = params.m_rachList;
+  
   return;
 }
 
@@ -788,17 +1342,123 @@
 void
 TdTbfqFfMacScheduler::DoSchedUlTriggerReq (const struct FfMacSchedSapProvider::SchedUlTriggerReqParameters& params)
 {
-  NS_LOG_FUNCTION (this << " UL - Frame no. " << (params.m_sfnSf >> 4) << " subframe no. " << (0xF & params.m_sfnSf));
- 
+  NS_LOG_FUNCTION (this << " UL - Frame no. " << (params.m_sfnSf >> 4) << " subframe no. " << (0xF & params.m_sfnSf) << " size " << params.m_ulInfoList.size ());
+
   RefreshUlCqiMaps ();
 
-  std::map <uint16_t,uint32_t>::iterator it; 
+  // Generate RBs map
+  FfMacSchedSapUser::SchedUlConfigIndParameters ret;
+  std::vector <bool> rbMap;
+  uint16_t rbAllocatedNum = 0;
+  std::set <uint16_t> rntiAllocated;
+  std::vector <uint16_t> rbgAllocationMap;
+  // update with RACH allocation map
+  rbgAllocationMap = m_rachAllocationMap;
+  //rbgAllocationMap.resize (m_cschedCellConfig.m_ulBandwidth, 0);
+  m_rachAllocationMap.clear ();
+  m_rachAllocationMap.resize (m_cschedCellConfig.m_ulBandwidth, 0);
+
+  rbMap.resize (m_cschedCellConfig.m_ulBandwidth, false);
+  // remove RACH allocation
+  for (uint16_t i = 0; i < m_cschedCellConfig.m_ulBandwidth; i++)
+    {
+      if (rbgAllocationMap.at (i) != 0)
+        {
+          rbMap.at (i) = true;
+          NS_LOG_DEBUG (this << " Allocated for RACH " << i);
+        }
+    }
+
+
+  if (m_harqOn == true)
+    {
+      //   Process UL HARQ feedback
+      //   update UL HARQ proc id
+      std::map <uint16_t, uint8_t>::iterator itProcId;
+      for (itProcId = m_ulHarqCurrentProcessId.begin (); itProcId != m_ulHarqCurrentProcessId.end (); itProcId++)
+        {
+          (*itProcId).second = ((*itProcId).second + 1) % HARQ_PROC_NUM;
+        }
+
+      for (uint16_t i = 0; i < params.m_ulInfoList.size (); i++)
+        {        
+          if (params.m_ulInfoList.at (i).m_receptionStatus == UlInfoListElement_s::NotOk)
+            {
+              // retx correspondent block: retrieve the UL-DCI
+              uint16_t rnti = params.m_ulInfoList.at (i).m_rnti;
+              itProcId = m_ulHarqCurrentProcessId.find (rnti);
+              if (itProcId == m_ulHarqCurrentProcessId.end ())
+                {
+                  NS_LOG_ERROR ("No info find in HARQ buffer for UE (might change eNB) " << rnti);
+                }
+              uint8_t harqId = (uint8_t)((*itProcId).second - HARQ_PERIOD) % HARQ_PROC_NUM;
+              NS_LOG_INFO (this << " UL-HARQ retx RNTI " << rnti << " harqId " << (uint16_t)harqId << " i " << i << " size "  << params.m_ulInfoList.size ());
+              std::map <uint16_t, UlHarqProcessesDciBuffer_t>::iterator itHarq = m_ulHarqProcessesDciBuffer.find (rnti);
+              if (itHarq == m_ulHarqProcessesDciBuffer.end ())
+                {
+                  NS_LOG_ERROR ("No info find in HARQ buffer for UE (might change eNB) " << rnti);
+                  continue;
+                }
+              UlDciListElement_s dci = (*itHarq).second.at (harqId);
+              std::map <uint16_t, UlHarqProcessesStatus_t>::iterator itStat = m_ulHarqProcessesStatus.find (rnti);
+              if (itStat == m_ulHarqProcessesStatus.end ())
+                {
+                  NS_LOG_ERROR ("No info find in HARQ buffer for UE (might change eNB) " << rnti);
+                }
+              if ((*itStat).second.at (harqId) >= 3)
+                {
+                  NS_LOG_INFO ("Max number of retransmissions reached (UL)-> drop process");
+                  continue;
+                }
+              bool free = true;
+              for (int j = dci.m_rbStart; j < dci.m_rbStart + dci.m_rbLen; j++)
+                {
+                  if (rbMap.at (j) == true)
+                    {
+                      free = false;
+                      NS_LOG_INFO (this << " BUSY " << j);
+                    }
+                }
+              if (free)
+                {
+                  // retx on the same RBs
+                  for (int j = dci.m_rbStart; j < dci.m_rbStart + dci.m_rbLen; j++)
+                    {
+                      rbMap.at (j) = true;
+                      rbgAllocationMap.at (j) = dci.m_rnti;
+                      NS_LOG_INFO ("\tRB " << j);
+                      rbAllocatedNum++;
+                    }
+                  NS_LOG_INFO (this << " Send retx in the same RBs " << (uint16_t)dci.m_rbStart << " to " << dci.m_rbStart + dci.m_rbLen << " RV " << (*itStat).second.at (harqId) + 1);
+                }
+              else
+                {
+                  NS_LOG_INFO ("Cannot allocate retx due to RACH allocations for UE " << rnti);
+                  continue;
+                }
+              dci.m_ndi = 0;
+              // Update HARQ buffers with new HarqId
+              (*itStat).second.at ((*itProcId).second) = (*itStat).second.at (harqId) + 1;
+              (*itStat).second.at (harqId) = 0;
+              (*itHarq).second.at ((*itProcId).second) = dci;
+              ret.m_dciList.push_back (dci);
+              rntiAllocated.insert (dci.m_rnti);
+            }
+            else
+            {
+              NS_LOG_INFO (this << " HARQ-ACK feedback from RNTI " << params.m_ulInfoList.at (i).m_rnti);
+            }
+        }
+    }
+
+  std::map <uint16_t,uint32_t>::iterator it;
   int nflows = 0;
 
   for (it = m_ceBsrRxed.begin (); it != m_ceBsrRxed.end (); it++)
     {
-      // remove old entries of this UE-LC
-      if ((*it).second > 0)
+      std::set <uint16_t>::iterator itRnti = rntiAllocated.find ((*it).first);
+      // select UEs with queues not empty and not yet allocated for HARQ
+      if (((*it).second > 0)&&(itRnti == rntiAllocated.end ()))
         {
           nflows++;
         }
@@ -806,20 +1466,24 @@
 
   if (nflows == 0)
     {
-      return ; // no flows to be scheduled
+      if (ret.m_dciList.size () > 0)
+        {
+          m_schedSapUser->SchedUlConfigInd (ret);
+        }
+        
+      return;  // no flows to be scheduled
     }
 
 
-  // Divide the resource equally among the active users
-  int rbPerFlow = m_cschedCellConfig.m_ulBandwidth / nflows;
-  if (rbPerFlow == 0)
+  // Divide the remaining resources equally among the active users starting from the subsequent one served last scheduling trigger
+  uint16_t rbPerFlow = (m_cschedCellConfig.m_ulBandwidth) / (nflows + rntiAllocated.size ());
+  if (rbPerFlow < 3)
     {
-      rbPerFlow = 1;              // at least 1 rbg per flow (till available resource)
+      rbPerFlow = 3;  // at least 3 rbg per flow (till available resource) to ensure TxOpportunity >= 7 bytes
     }
   int rbAllocated = 0;
 
-  FfMacSchedSapUser::SchedUlConfigIndParameters ret;
-  std::vector <uint16_t> rbgAllocationMap;
+  std::map <uint16_t, tdtbfqsFlowPerf_t>::iterator itStats;
   if (m_nextRntiUl != 0)
     {
       for (it = m_ceBsrRxed.begin (); it != m_ceBsrRxed.end (); it++)
@@ -841,16 +1505,88 @@
     }
   do
     {
-      if (rbAllocated + rbPerFlow > m_cschedCellConfig.m_ulBandwidth)
+      std::set <uint16_t>::iterator itRnti = rntiAllocated.find ((*it).first);
+      if ((itRnti != rntiAllocated.end ())||((*it).second == 0))
+        {
+          // UE already allocated for UL-HARQ -> skip it
+          it++;
+          if (it == m_ceBsrRxed.end ())
+            {
+              // restart from the first
+              it = m_ceBsrRxed.begin ();
+            }
+          continue;
+        }
+      if (rbAllocated + rbPerFlow - 1 > m_cschedCellConfig.m_ulBandwidth)
         {
           // limit to physical resources last resource assignment
           rbPerFlow = m_cschedCellConfig.m_ulBandwidth - rbAllocated;
+          // at least 3 rbg per flow to ensure TxOpportunity >= 7 bytes
+          if (rbPerFlow < 3)
+            {
+              // terminate allocation
+              rbPerFlow = 0;      
+            }
         }
-     
+
       UlDciListElement_s uldci;
       uldci.m_rnti = (*it).first;
-      uldci.m_rbStart = rbAllocated;
       uldci.m_rbLen = rbPerFlow;
+      bool allocated = false;
+      NS_LOG_INFO (this << " RB Allocated " << rbAllocated << " rbPerFlow " << rbPerFlow);
+      while ((!allocated)&&((rbAllocated + rbPerFlow - 1) < m_cschedCellConfig.m_ulBandwidth) && (rbPerFlow != 0))
+        {
+          // check availability
+          bool free = true;
+          for (uint16_t j = rbAllocated; j < rbAllocated + rbPerFlow; j++)
+            {
+              if (rbMap.at (j) == true)
+                {
+                  free = false;
+                  break;
+                }
+            }
+          if (free)
+            {
+              uldci.m_rbStart = rbAllocated;
+
+              for (uint16_t j = rbAllocated; j < rbAllocated + rbPerFlow; j++)
+                {
+                  rbMap.at (j) = true;
+                  // store info on allocation for managing ul-cqi interpretation
+                  rbgAllocationMap.at (j) = (*it).first;
+                }
+              rbAllocated += rbPerFlow;
+              allocated = true;
+              break;
+            }
+          rbAllocated++;
+          if (rbAllocated + rbPerFlow - 1 > m_cschedCellConfig.m_ulBandwidth)
+            {
+              // limit to physical resources last resource assignment
+              rbPerFlow = m_cschedCellConfig.m_ulBandwidth - rbAllocated;
+              // at least 3 rbg per flow to ensure TxOpportunity >= 7 bytes
+              if (rbPerFlow < 3)
+                {
+                  // terminate allocation
+                  rbPerFlow = 0;                 
+                }
+            }
+        }
+      if (!allocated)
+        {
+          // unable to allocate new resource: finish scheduling
+          m_nextRntiUl = (*it).first;
+          if (ret.m_dciList.size () > 0)
+            {
+              m_schedSapUser->SchedUlConfigInd (ret);
+            }
+          m_allocationMaps.insert (std::pair <uint16_t, std::vector <uint16_t> > (params.m_sfnSf, rbgAllocationMap));
+          return;
+        }
+
+
+
       std::map <uint16_t, std::vector <double> >::iterator itCqi = m_ueCqi.find ((*it).first);
       int cqi = 0;
       if (itCqi == m_ueCqi.end ())
@@ -881,8 +1617,8 @@
 
           // translate SINR -> cqi: WILD ACK: same as DL
           double s = log2 ( 1 + (
-                                 std::pow (10, minSinr / 10 )  /
-                                 ( (-std::log (5.0 * 0.00005 )) / 1.5) ));
+                              pow (10, minSinr / 10 )  /
+                              ( (-log (5.0 * 0.00005 )) / 1.5) ));
           cqi = m_amc->GetCqiFromSpectralEfficiency (s);
           if (cqi == 0)
             {
@@ -895,17 +1631,9 @@
               continue; // CQI == 0 means "out of range" (see table 7.2.3-1 of 36.213)
             }
           uldci.m_mcs = m_amc->GetMcsFromCqi (cqi);
+        }
 
-        }
-      
-      rbAllocated += rbPerFlow;
-      // store info on allocation for managing ul-cqi interpretation
-      for (int i = 0; i < rbPerFlow; i++)
-        {
-          rbgAllocationMap.push_back ((*it).first);
-        }
       uldci.m_tbSize = (m_amc->GetTbSizeFromMcs (uldci.m_mcs, rbPerFlow) / 8);
-      NS_LOG_DEBUG (this << " UE " << (*it).first << " startPRB " << (uint32_t)uldci.m_rbStart << " nPRB " << (uint32_t)uldci.m_rbLen << " CQI " << cqi << " MCS " << (uint32_t)uldci.m_mcs << " TBsize " << uldci.m_tbSize << " RbAlloc " << rbAllocated);
       UpdateUlRlcBufferInfo (uldci.m_rnti, uldci.m_tbSize);
       uldci.m_ndi = 1;
       uldci.m_cceIndex = 0;
@@ -920,7 +1648,26 @@
       uldci.m_freqHopping = 0;
       uldci.m_pdcchPowerOffset = 0; // not used
       ret.m_dciList.push_back (uldci);
+      // store DCI for HARQ_PERIOD
+      uint8_t harqId = 0;
+      if (m_harqOn == true)
+        {
+          std::map <uint16_t, uint8_t>::iterator itProcId;
+          itProcId = m_ulHarqCurrentProcessId.find (uldci.m_rnti);
+          if (itProcId == m_ulHarqCurrentProcessId.end ())
+            {
+              NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << uldci.m_rnti);
+            }
+          harqId = (*itProcId).second;
+          std::map <uint16_t, UlHarqProcessesDciBuffer_t>::iterator itDci = m_ulHarqProcessesDciBuffer.find (uldci.m_rnti);
+          if (itDci == m_ulHarqProcessesDciBuffer.end ())
+            {
+              NS_FATAL_ERROR ("Unable to find RNTI entry in UL DCI HARQ buffer for RNTI " << uldci.m_rnti);
+            }
+          (*itDci).second.at (harqId) = uldci;
+        }
 
+      NS_LOG_INFO (this << " UE Allocation RNTI " << (*it).first << " startPRB " << (uint32_t)uldci.m_rbStart << " nPRB " << (uint32_t)uldci.m_rbLen << " CQI " << cqi << " MCS " << (uint32_t)uldci.m_mcs << " TBsize " << uldci.m_tbSize << " RbAlloc " << rbAllocated << " harqId " << (uint16_t)harqId);
 
       it++;
       if (it == m_ceBsrRxed.end ())
@@ -928,31 +1675,35 @@
           // restart from the first
           it = m_ceBsrRxed.begin ();
         }
-      if (rbAllocated == m_cschedCellConfig.m_ulBandwidth)
+      if ((rbAllocated == m_cschedCellConfig.m_ulBandwidth) || (rbPerFlow == 0))
         {
           // Stop allocation: no more PRBs
           m_nextRntiUl = (*it).first;
           break;
         }
     }
-  while ((*it).first != m_nextRntiUl);
+  while (((*it).first != m_nextRntiUl)&&(rbPerFlow!=0));
+
 
   m_allocationMaps.insert (std::pair <uint16_t, std::vector <uint16_t> > (params.m_sfnSf, rbgAllocationMap));
   m_schedSapUser->SchedUlConfigInd (ret);
+
   return;
 }
 
 void
 TdTbfqFfMacScheduler::DoSchedUlNoiseInterferenceReq (const struct FfMacSchedSapProvider::SchedUlNoiseInterferenceReqParameters& params)
 {
-  NS_FATAL_ERROR ("unimplemented");
+  NS_LOG_FUNCTION (this);
+  // TODO: Implementation of the API
   return;
 }
 
 void
 TdTbfqFfMacScheduler::DoSchedUlSrInfoReq (const struct FfMacSchedSapProvider::SchedUlSrInfoReqParameters& params)
 {
-  NS_FATAL_ERROR ("unimplemented");
+  NS_LOG_FUNCTION (this);
+  // TODO: Implementation of the API
   return;
 }
 
@@ -962,31 +1713,41 @@
   NS_LOG_FUNCTION (this);
 
   std::map <uint16_t,uint32_t>::iterator it;
-  
+
   for (unsigned int i = 0; i < params.m_macCeList.size (); i++)
-  {
-    if ( params.m_macCeList.at (i).m_macCeType == MacCeListElement_s::BSR )
     {
-      // buffer status report
-      // note that we only consider LCG 0, the other three LCGs are neglected
-      // this is consistent with the assumption in LteUeMac that the first LCG gathers all LCs
-      uint16_t rnti = params.m_macCeList.at (i).m_rnti;
-      it = m_ceBsrRxed.find (rnti);
-      if (it == m_ceBsrRxed.end ())
-      {
-        // create the new entry
-        uint8_t bsrId = params.m_macCeList.at (i).m_macCeValue.m_bufferStatus.at (0);
-        int buffer = BufferSizeLevelBsr::BsrId2BufferSize (bsrId);
-        m_ceBsrRxed.insert ( std::pair<uint16_t, uint32_t > (rnti, buffer)); 
-      }
-      else
-      {
-        // update the buffer size value
-        (*it).second = BufferSizeLevelBsr::BsrId2BufferSize (params.m_macCeList.at (i).m_macCeValue.m_bufferStatus.at (0));
-      }
+      if ( params.m_macCeList.at (i).m_macCeType == MacCeListElement_s::BSR )
+        {
+          // buffer status report
+          // note that this scheduler does not differentiate the
+          // allocation according to which LCGs have more/less bytes
+          // to send.
+          // Hence the BSR of different LCGs are just summed up to get
+          // a total queue size that is used for allocation purposes.
+
+          uint32_t buffer = 0;
+          for (uint8_t lcg = 0; lcg < 4; ++lcg)
+            {
+              uint8_t bsrId = params.m_macCeList.at (i).m_macCeValue.m_bufferStatus.at (lcg);
+              buffer += BufferSizeLevelBsr::BsrId2BufferSize (bsrId);
+            }
+          
+          uint16_t rnti = params.m_macCeList.at (i).m_rnti;
+          NS_LOG_LOGIC (this << "RNTI=" << rnti << " buffer=" << buffer);
+          it = m_ceBsrRxed.find (rnti);
+          if (it == m_ceBsrRxed.end ())
+            {
+              // create the new entry
+              m_ceBsrRxed.insert ( std::pair<uint16_t, uint32_t > (rnti, buffer));
+            }
+          else
+            {
+              // update the buffer size value
+              (*it).second = buffer;
+            }
+        }
     }
-  }
-  
+
   return;
 }
 
@@ -994,49 +1755,48 @@
 TdTbfqFfMacScheduler::DoSchedUlCqiInfoReq (const struct FfMacSchedSapProvider::SchedUlCqiInfoReqParameters& params)
 {
   NS_LOG_FUNCTION (this);
-  // retrieve the allocation for this subframe
+// retrieve the allocation for this subframe
   switch (m_ulCqiFilter)
     {
-      case FfMacScheduler::SRS_UL_CQI:
-        {
-          // filter all the CQIs that are not SRS based
-          if (params.m_ulCqi.m_type!=UlCqi_s::SRS)
-              {
-                return;
-              }
-        }
+    case FfMacScheduler::SRS_UL_CQI:
+      {
+        // filter all the CQIs that are not SRS based
+        if (params.m_ulCqi.m_type != UlCqi_s::SRS)
+          {
+            return;
+          }
+      }
       break;
-      case FfMacScheduler::PUSCH_UL_CQI:
-        {
-          // filter all the CQIs that are not SRS based
-          if (params.m_ulCqi.m_type!=UlCqi_s::PUSCH)
-            {
-              return;
-            }
-        }
-      case FfMacScheduler::ALL_UL_CQI:
+    case FfMacScheduler::PUSCH_UL_CQI:
+      {
+        // filter all the CQIs that are not SRS based
+        if (params.m_ulCqi.m_type != UlCqi_s::PUSCH)
+          {
+            return;
+          }
+      }
+    case FfMacScheduler::ALL_UL_CQI:
       break;
-      
-      default:
-        NS_FATAL_ERROR ("Unknown UL CQI type");
+
+    default:
+      NS_FATAL_ERROR ("Unknown UL CQI type");
     }
 
   switch (params.m_ulCqi.m_type)
-  {
+    {
     case UlCqi_s::PUSCH:
       {
         std::map <uint16_t, std::vector <uint16_t> >::iterator itMap;
         std::map <uint16_t, std::vector <double> >::iterator itCqi;
+        NS_LOG_DEBUG (this << " Collect PUSCH CQIs of Frame no. " << (params.m_sfnSf >> 4) << " subframe no. " << (0xF & params.m_sfnSf));
         itMap = m_allocationMaps.find (params.m_sfnSf);
         if (itMap == m_allocationMaps.end ())
           {
-            NS_LOG_DEBUG (this << " Does not find info on allocation, size : " << m_allocationMaps.size ());
             return;
           }
         for (uint32_t i = 0; i < (*itMap).second.size (); i++)
           {
             // convert from fixed point notation Sxxxxxxxxxxx.xxx to double
-      //       NS_LOG_INFO (this << " i " << i << " size " << params.m_ulCqi.m_sinr.size () << " mapSIze " << (*itMap).second.size ());
             double sinr = LteFfConverter::fpS11dot3toDouble (params.m_ulCqi.m_sinr.at (i));
             itCqi = m_ueCqi.find ((*itMap).second.at (i));
             if (itCqi == m_ueCqi.end ())
@@ -1064,11 +1824,12 @@
               {
                 // update the value
                 (*itCqi).second.at (i) = sinr;
+                NS_LOG_DEBUG (this << " RNTI " << (*itMap).second.at (i) << " RB " << i << " SINR " << sinr);
                 // update correspondent timer
                 std::map <uint16_t, uint32_t>::iterator itTimers;
                 itTimers = m_ueCqiTimers.find ((*itMap).second.at (i));
                 (*itTimers).second = m_cqiTimersThreshold;
-                
+
               }
 
           }
@@ -1096,33 +1857,33 @@
             // create a new entry
             std::vector <double> newCqi;
             for (uint32_t j = 0; j < m_cschedCellConfig.m_ulBandwidth; j++)
-            {
-              double sinr = LteFfConverter::fpS11dot3toDouble (params.m_ulCqi.m_sinr.at (j));
-              newCqi.push_back (sinr);
-              NS_LOG_DEBUG (this << " RNTI " << rnti << " new SRS-CQI for RB  " << j << " value " << sinr);
-              
-            }
+              {
+                double sinr = LteFfConverter::fpS11dot3toDouble (params.m_ulCqi.m_sinr.at (j));
+                newCqi.push_back (sinr);
+                NS_LOG_INFO (this << " RNTI " << rnti << " new SRS-CQI for RB  " << j << " value " << sinr);
+
+              }
             m_ueCqi.insert (std::pair <uint16_t, std::vector <double> > (rnti, newCqi));
             // generate correspondent timer
             m_ueCqiTimers.insert (std::pair <uint16_t, uint32_t > (rnti, m_cqiTimersThreshold));
           }
         else
-        {
-          // update the values
-          for (uint32_t j = 0; j < m_cschedCellConfig.m_ulBandwidth; j++)
-            {
-              double sinr = LteFfConverter::fpS11dot3toDouble (params.m_ulCqi.m_sinr.at (j));
-              (*itCqi).second.at (j) = sinr;
-              NS_LOG_DEBUG (this << " RNTI " << rnti << " update SRS-CQI for RB  " << j << " value " << sinr);
-            }
-          // update correspondent timer
-          std::map <uint16_t, uint32_t>::iterator itTimers;
-          itTimers = m_ueCqiTimers.find (rnti);
-          (*itTimers).second = m_cqiTimersThreshold;
-          
-        }
-        
-        
+          {
+            // update the values
+            for (uint32_t j = 0; j < m_cschedCellConfig.m_ulBandwidth; j++)
+              {
+                double sinr = LteFfConverter::fpS11dot3toDouble (params.m_ulCqi.m_sinr.at (j));
+                (*itCqi).second.at (j) = sinr;
+                NS_LOG_INFO (this << " RNTI " << rnti << " update SRS-CQI for RB  " << j << " value " << sinr);
+              }
+            // update correspondent timer
+            std::map <uint16_t, uint32_t>::iterator itTimers;
+            itTimers = m_ueCqiTimers.find (rnti);
+            (*itTimers).second = m_cqiTimersThreshold;
+
+          }
+
+
       }
       break;
     case UlCqi_s::PUCCH_1:
@@ -1134,24 +1895,24 @@
       break;
     default:
       NS_FATAL_ERROR ("Unknown type of UL-CQI");
-  }
+    }
   return;
 }
 
 void
-TdTbfqFfMacScheduler::RefreshDlCqiMaps(void)
+TdTbfqFfMacScheduler::RefreshDlCqiMaps (void)
 {
   // refresh DL CQI P01 Map
   std::map <uint16_t,uint32_t>::iterator itP10 = m_p10CqiTimers.begin ();
-  while (itP10!=m_p10CqiTimers.end ())
+  while (itP10 != m_p10CqiTimers.end ())
     {
-//       NS_LOG_INFO (this << " P10-CQI for user " << (*itP10).first << " is " << (uint32_t)(*itP10).second << " thr " << (uint32_t)m_cqiTimersThreshold);
+      NS_LOG_INFO (this << " P10-CQI for user " << (*itP10).first << " is " << (uint32_t)(*itP10).second << " thr " << (uint32_t)m_cqiTimersThreshold);
       if ((*itP10).second == 0)
         {
           // delete correspondent entries
           std::map <uint16_t,uint8_t>::iterator itMap = m_p10CqiRxed.find ((*itP10).first);
           NS_ASSERT_MSG (itMap != m_p10CqiRxed.end (), " Does not find CQI report for user " << (*itP10).first);
-          NS_LOG_INFO (this << " P10-CQI exired for user " << (*itP10).first);
+          NS_LOG_INFO (this << " P10-CQI expired for user " << (*itP10).first);
           m_p10CqiRxed.erase (itMap);
           std::map <uint16_t,uint32_t>::iterator temp = itP10;
           itP10++;
@@ -1163,18 +1924,18 @@
           itP10++;
         }
     }
-  
+
   // refresh DL CQI A30 Map
   std::map <uint16_t,uint32_t>::iterator itA30 = m_a30CqiTimers.begin ();
-  while (itA30!=m_a30CqiTimers.end ())
+  while (itA30 != m_a30CqiTimers.end ())
     {
-//       NS_LOG_INFO (this << " A30-CQI for user " << (*itA30).first << " is " << (uint32_t)(*itA30).second << " thr " << (uint32_t)m_cqiTimersThreshold);
+      NS_LOG_INFO (this << " A30-CQI for user " << (*itA30).first << " is " << (uint32_t)(*itA30).second << " thr " << (uint32_t)m_cqiTimersThreshold);
       if ((*itA30).second == 0)
         {
           // delete correspondent entries
           std::map <uint16_t,SbMeasResult_s>::iterator itMap = m_a30CqiRxed.find ((*itA30).first);
           NS_ASSERT_MSG (itMap != m_a30CqiRxed.end (), " Does not find CQI report for user " << (*itA30).first);
-          NS_LOG_INFO (this << " A30-CQI exired for user " << (*itA30).first);
+          NS_LOG_INFO (this << " A30-CQI expired for user " << (*itA30).first);
           m_a30CqiRxed.erase (itMap);
           std::map <uint16_t,uint32_t>::iterator temp = itA30;
           itA30++;
@@ -1186,19 +1947,19 @@
           itA30++;
         }
     }
-    
-    return;
+
+  return;
 }
 
 
 void
-TdTbfqFfMacScheduler::RefreshUlCqiMaps(void)
+TdTbfqFfMacScheduler::RefreshUlCqiMaps (void)
 {
   // refresh UL CQI  Map
   std::map <uint16_t,uint32_t>::iterator itUl = m_ueCqiTimers.begin ();
-  while (itUl!=m_ueCqiTimers.end ())
+  while (itUl != m_ueCqiTimers.end ())
     {
-//       NS_LOG_INFO (this << " UL-CQI for user " << (*itUl).first << " is " << (uint32_t)(*itUl).second << " thr " << (uint32_t)m_cqiTimersThreshold);
+      NS_LOG_INFO (this << " UL-CQI for user " << (*itUl).first << " is " << (uint32_t)(*itUl).second << " thr " << (uint32_t)m_cqiTimersThreshold);
       if ((*itUl).second == 0)
         {
           // delete correspondent entries
@@ -1217,52 +1978,41 @@
           itUl++;
         }
     }
-    
-    return;
+
+  return;
 }
 
 void
 TdTbfqFfMacScheduler::UpdateDlRlcBufferInfo (uint16_t rnti, uint8_t lcid, uint16_t size)
 {
-  size = size - 2; // remove the minimum RLC overhead
   std::map<LteFlowId_t, FfMacSchedSapProvider::SchedDlRlcBufferReqParameters>::iterator it;
   LteFlowId_t flow (rnti, lcid);
   it = m_rlcBufferReq.find (flow);
-  if (it!=m_rlcBufferReq.end ())
+  if (it != m_rlcBufferReq.end ())
     {
+      NS_LOG_INFO (this << " UE " << rnti << " LC " << (uint16_t)lcid << " txqueue " << (*it).second.m_rlcTransmissionQueueSize << " retxqueue " << (*it).second.m_rlcRetransmissionQueueSize << " status " << (*it).second.m_rlcStatusPduSize << " decrease " << size);
       // Update queues: RLC tx order Status, ReTx, Tx
       // Update status queue
-      if ((*it).second.m_rlcStatusPduSize <= size)
-        {
-          size -= (*it).second.m_rlcStatusPduSize;
-          (*it).second.m_rlcStatusPduSize = 0;
-        }
-      else
+      if (((*it).second.m_rlcStatusPduSize > 0) && (size >= (*it).second.m_rlcStatusPduSize))
         {
-          (*it).second.m_rlcStatusPduSize -= size;
-          return;
+           (*it).second.m_rlcStatusPduSize = 0;
         }
-      // update retransmission queue  
-      if ((*it).second.m_rlcRetransmissionQueueSize <= size)
+      else if (((*it).second.m_rlcRetransmissionQueueSize > 0) && (size >= (*it).second.m_rlcRetransmissionQueueSize))
         {
-          size -= (*it).second.m_rlcRetransmissionQueueSize;
           (*it).second.m_rlcRetransmissionQueueSize = 0;
         }
-      else
-        {
-          (*it).second.m_rlcRetransmissionQueueSize -= size;
-          return;
-        }
-      // update transmission queue
-      if ((*it).second.m_rlcTransmissionQueueSize <= size)
+      else if ((*it).second.m_rlcTransmissionQueueSize > 0)
         {
-          size -= (*it).second.m_rlcTransmissionQueueSize;
-          (*it).second.m_rlcTransmissionQueueSize = 0;
-        }
-      else
-        {
-          (*it).second.m_rlcTransmissionQueueSize -= size;
-          return;
+          // update transmission queue
+          if ((*it).second.m_rlcTransmissionQueueSize <= size)
+            {
+              (*it).second.m_rlcTransmissionQueueSize = 0;
+            }
+          else
+            {
+              size -= 2; // remove minimun RLC overhead due to header
+              (*it).second.m_rlcTransmissionQueueSize -= size;
+            }
         }
     }
   else
@@ -1274,11 +2024,12 @@
 void
 TdTbfqFfMacScheduler::UpdateUlRlcBufferInfo (uint16_t rnti, uint16_t size)
 {
-  
+
   size = size - 2; // remove the minimum RLC overhead
   std::map <uint16_t,uint32_t>::iterator it = m_ceBsrRxed.find (rnti);
-  if (it!=m_ceBsrRxed.end ())
+  if (it != m_ceBsrRxed.end ())
     {
+      NS_LOG_INFO (this << " UE " << rnti << " size " << size << " BSR " << (*it).second);
       if ((*it).second >= size)
         {
           (*it).second -= size;
@@ -1292,7 +2043,7 @@
     {
       NS_LOG_ERROR (this << " Does not find BSR report info of UE " << rnti);
     }
-  
+
 }
 
 void