422 return (lcActive); |
514 return (lcActive); |
423 |
515 |
424 } |
516 } |
425 |
517 |
426 |
518 |
|
519 uint8_t |
|
520 FdMtFfMacScheduler::HarqProcessAvailability (uint16_t rnti) |
|
521 { |
|
522 NS_LOG_FUNCTION (this << rnti); |
|
523 |
|
524 std::map <uint16_t, uint8_t>::iterator it = m_dlHarqCurrentProcessId.find (rnti); |
|
525 if (it == m_dlHarqCurrentProcessId.end ()) |
|
526 { |
|
527 NS_FATAL_ERROR ("No Process Id found for this RNTI " << rnti); |
|
528 } |
|
529 std::map <uint16_t, DlHarqProcessesStatus_t>::iterator itStat = m_dlHarqProcessesStatus.find (rnti); |
|
530 if (itStat == m_dlHarqProcessesStatus.end ()) |
|
531 { |
|
532 NS_FATAL_ERROR ("No Process Id Statusfound for this RNTI " << rnti); |
|
533 } |
|
534 uint8_t i = (*it).second; |
|
535 do |
|
536 { |
|
537 i = (i + 1) % HARQ_PROC_NUM; |
|
538 } |
|
539 while ( ((*itStat).second.at (i) != 0)&&(i != (*it).second)); |
|
540 if ((*itStat).second.at (i) == 0) |
|
541 { |
|
542 return (true); |
|
543 } |
|
544 else |
|
545 { |
|
546 return (false); // return a not valid harq proc id |
|
547 } |
|
548 } |
|
549 |
|
550 |
|
551 |
|
552 uint8_t |
|
553 FdMtFfMacScheduler::UpdateHarqProcessId (uint16_t rnti) |
|
554 { |
|
555 NS_LOG_FUNCTION (this << rnti); |
|
556 |
|
557 if (m_harqOn == false) |
|
558 { |
|
559 return (0); |
|
560 } |
|
561 |
|
562 |
|
563 std::map <uint16_t, uint8_t>::iterator it = m_dlHarqCurrentProcessId.find (rnti); |
|
564 if (it == m_dlHarqCurrentProcessId.end ()) |
|
565 { |
|
566 NS_FATAL_ERROR ("No Process Id found for this RNTI " << rnti); |
|
567 } |
|
568 std::map <uint16_t, DlHarqProcessesStatus_t>::iterator itStat = m_dlHarqProcessesStatus.find (rnti); |
|
569 if (itStat == m_dlHarqProcessesStatus.end ()) |
|
570 { |
|
571 NS_FATAL_ERROR ("No Process Id Statusfound for this RNTI " << rnti); |
|
572 } |
|
573 uint8_t i = (*it).second; |
|
574 do |
|
575 { |
|
576 i = (i + 1) % HARQ_PROC_NUM; |
|
577 } |
|
578 while ( ((*itStat).second.at (i) != 0)&&(i != (*it).second)); |
|
579 if ((*itStat).second.at (i) == 0) |
|
580 { |
|
581 (*it).second = i; |
|
582 (*itStat).second.at (i) = 1; |
|
583 } |
|
584 else |
|
585 { |
|
586 NS_FATAL_ERROR ("No HARQ process available for RNTI " << rnti << " check before update with HarqProcessAvailability"); |
|
587 } |
|
588 |
|
589 return ((*it).second); |
|
590 } |
|
591 |
|
592 |
|
593 void |
|
594 FdMtFfMacScheduler::RefreshHarqProcesses () |
|
595 { |
|
596 NS_LOG_FUNCTION (this); |
|
597 |
|
598 std::map <uint16_t, DlHarqProcessesTimer_t>::iterator itTimers; |
|
599 for (itTimers = m_dlHarqProcessesTimer.begin (); itTimers != m_dlHarqProcessesTimer.end (); itTimers ++) |
|
600 { |
|
601 for (uint16_t i = 0; i < HARQ_PROC_NUM; i++) |
|
602 { |
|
603 if ((*itTimers).second.at (i) == HARQ_DL_TIMEOUT) |
|
604 { |
|
605 // reset HARQ process |
|
606 |
|
607 NS_LOG_DEBUG (this << " Reset HARQ proc " << i << " for RNTI " << (*itTimers).first); |
|
608 std::map <uint16_t, DlHarqProcessesStatus_t>::iterator itStat = m_dlHarqProcessesStatus.find ((*itTimers).first); |
|
609 if (itStat == m_dlHarqProcessesStatus.end ()) |
|
610 { |
|
611 NS_FATAL_ERROR ("No Process Id Status found for this RNTI " << (*itTimers).first); |
|
612 } |
|
613 (*itStat).second.at (i) = 0; |
|
614 (*itTimers).second.at (i) = 0; |
|
615 } |
|
616 else |
|
617 { |
|
618 (*itTimers).second.at (i)++; |
|
619 } |
|
620 } |
|
621 } |
|
622 |
|
623 } |
|
624 |
|
625 |
427 void |
626 void |
428 FdMtFfMacScheduler::DoSchedDlTriggerReq (const struct FfMacSchedSapProvider::SchedDlTriggerReqParameters& params) |
627 FdMtFfMacScheduler::DoSchedDlTriggerReq (const struct FfMacSchedSapProvider::SchedDlTriggerReqParameters& params) |
429 { |
628 { |
430 NS_LOG_FUNCTION (this << " Frame no. " << (params.m_sfnSf >> 4) << " subframe no. " << (0xF & params.m_sfnSf)); |
629 NS_LOG_FUNCTION (this << " Frame no. " << (params.m_sfnSf >> 4) << " subframe no. " << (0xF & params.m_sfnSf)); |
431 // API generated by RLC for triggering the scheduling of a DL subframe |
630 // API generated by RLC for triggering the scheduling of a DL subframe |
432 |
631 |
433 |
632 |
434 // evaluate the relative channel quality indicator for each UE per each RBG |
633 // evaluate the relative channel quality indicator for each UE per each RBG |
435 // (since we are using allocation type 0 the small unit of allocation is RBG) |
634 // (since we are using allocation type 0 the small unit of allocation is RBG) |
436 // Resource allocation type 0 (see sec 7.1.6.1 of 36.213) |
635 // Resource allocation type 0 (see sec 7.1.6.1 of 36.213) |
437 |
636 |
438 RefreshDlCqiMaps (); |
637 RefreshDlCqiMaps (); |
439 |
638 |
440 int rbgSize = GetRbgSize (m_cschedCellConfig.m_dlBandwidth); |
639 int rbgSize = GetRbgSize (m_cschedCellConfig.m_dlBandwidth); |
441 int rbgNum = m_cschedCellConfig.m_dlBandwidth / rbgSize; |
640 int rbgNum = m_cschedCellConfig.m_dlBandwidth / rbgSize; |
442 std::map <uint16_t, std::vector <uint16_t> > allocationMap; |
641 std::map <uint16_t, std::vector <uint16_t> > allocationMap; // RBs map per RNTI |
|
642 std::vector <bool> rbgMap; // global RBGs map |
|
643 uint16_t rbgAllocatedNum = 0; |
|
644 std::set <uint16_t> rntiAllocated; |
|
645 rbgMap.resize (m_cschedCellConfig.m_dlBandwidth / rbgSize, false); |
|
646 FfMacSchedSapUser::SchedDlConfigIndParameters ret; |
|
647 |
|
648 // RACH Allocation |
|
649 m_rachAllocationMap.resize (m_cschedCellConfig.m_ulBandwidth, 0); |
|
650 uint16_t rbStart = 0; |
|
651 std::vector <struct RachListElement_s>::iterator itRach; |
|
652 for (itRach = m_rachList.begin (); itRach != m_rachList.end (); itRach++) |
|
653 { |
|
654 NS_ASSERT_MSG (m_amc->GetTbSizeFromMcs (m_ulGrantMcs, m_cschedCellConfig.m_ulBandwidth) > (*itRach).m_estimatedSize, " Default UL Grant MCS does not allow to send RACH messages"); |
|
655 BuildRarListElement_s newRar; |
|
656 newRar.m_rnti = (*itRach).m_rnti; |
|
657 // DL-RACH Allocation |
|
658 // Ideal: no needs of configuring m_dci |
|
659 // UL-RACH Allocation |
|
660 newRar.m_grant.m_rnti = newRar.m_rnti; |
|
661 newRar.m_grant.m_mcs = m_ulGrantMcs; |
|
662 uint16_t rbLen = 1; |
|
663 uint16_t tbSizeBits = 0; |
|
664 // find lowest TB size that fits UL grant estimated size |
|
665 while ((tbSizeBits < (*itRach).m_estimatedSize) && (rbStart + rbLen < m_cschedCellConfig.m_ulBandwidth)) |
|
666 { |
|
667 rbLen++; |
|
668 tbSizeBits = m_amc->GetTbSizeFromMcs (m_ulGrantMcs, rbLen); |
|
669 } |
|
670 if (tbSizeBits < (*itRach).m_estimatedSize) |
|
671 { |
|
672 // no more allocation space: finish allocation |
|
673 break; |
|
674 } |
|
675 newRar.m_grant.m_rbStart = rbStart; |
|
676 newRar.m_grant.m_rbLen = rbLen; |
|
677 newRar.m_grant.m_tbSize = tbSizeBits / 8; |
|
678 newRar.m_grant.m_hopping = false; |
|
679 newRar.m_grant.m_tpc = 0; |
|
680 newRar.m_grant.m_cqiRequest = false; |
|
681 newRar.m_grant.m_ulDelay = false; |
|
682 NS_LOG_INFO (this << " UL grant allocated to RNTI " << (*itRach).m_rnti << " rbStart " << rbStart << " rbLen " << rbLen << " MCS " << m_ulGrantMcs << " tbSize " << newRar.m_grant.m_tbSize); |
|
683 for (uint16_t i = rbStart; i < rbStart + rbLen; i++) |
|
684 { |
|
685 m_rachAllocationMap.at (i) = (*itRach).m_rnti; |
|
686 } |
|
687 rbStart = rbStart + rbLen; |
|
688 |
|
689 ret.m_buildRarList.push_back (newRar); |
|
690 } |
|
691 m_rachList.clear (); |
|
692 |
|
693 |
|
694 // Process DL HARQ feedback |
|
695 RefreshHarqProcesses (); |
|
696 // retrieve past HARQ retx buffered |
|
697 if (m_dlInfoListBuffered.size () > 0) |
|
698 { |
|
699 if (params.m_dlInfoList.size () > 0) |
|
700 { |
|
701 NS_LOG_INFO (this << " Received DL-HARQ feedback"); |
|
702 m_dlInfoListBuffered.insert (m_dlInfoListBuffered.end (), params.m_dlInfoList.begin (), params.m_dlInfoList.end ()); |
|
703 } |
|
704 } |
|
705 else |
|
706 { |
|
707 if (params.m_dlInfoList.size () > 0) |
|
708 { |
|
709 m_dlInfoListBuffered = params.m_dlInfoList; |
|
710 } |
|
711 } |
|
712 if (m_harqOn == false) |
|
713 { |
|
714 // Ignore HARQ feedback |
|
715 m_dlInfoListBuffered.clear (); |
|
716 } |
|
717 std::vector <struct DlInfoListElement_s> dlInfoListUntxed; |
|
718 for (uint16_t i = 0; i < m_dlInfoListBuffered.size (); i++) |
|
719 { |
|
720 std::set <uint16_t>::iterator itRnti = rntiAllocated.find (m_dlInfoListBuffered.at (i).m_rnti); |
|
721 if (itRnti != rntiAllocated.end ()) |
|
722 { |
|
723 // RNTI already allocated for retx |
|
724 continue; |
|
725 } |
|
726 uint8_t nLayers = m_dlInfoListBuffered.at (i).m_harqStatus.size (); |
|
727 std::vector <bool> retx; |
|
728 NS_LOG_INFO (this << " Processing DLHARQ feedback"); |
|
729 if (nLayers == 1) |
|
730 { |
|
731 retx.push_back (m_dlInfoListBuffered.at (i).m_harqStatus.at (0) == DlInfoListElement_s::NACK); |
|
732 retx.push_back (false); |
|
733 } |
|
734 else |
|
735 { |
|
736 retx.push_back (m_dlInfoListBuffered.at (i).m_harqStatus.at (0) == DlInfoListElement_s::NACK); |
|
737 retx.push_back (m_dlInfoListBuffered.at (i).m_harqStatus.at (1) == DlInfoListElement_s::NACK); |
|
738 } |
|
739 if (retx.at (0) || retx.at (1)) |
|
740 { |
|
741 // retrieve HARQ process information |
|
742 uint16_t rnti = m_dlInfoListBuffered.at (i).m_rnti; |
|
743 uint8_t harqId = m_dlInfoListBuffered.at (i).m_harqProcessId; |
|
744 NS_LOG_INFO (this << " HARQ retx RNTI " << rnti << " harqId " << (uint16_t)harqId); |
|
745 std::map <uint16_t, DlHarqProcessesDciBuffer_t>::iterator itHarq = m_dlHarqProcessesDciBuffer.find (rnti); |
|
746 if (itHarq == m_dlHarqProcessesDciBuffer.end ()) |
|
747 { |
|
748 NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << rnti); |
|
749 } |
|
750 |
|
751 DlDciListElement_s dci = (*itHarq).second.at (harqId); |
|
752 int rv = 0; |
|
753 if (dci.m_rv.size () == 1) |
|
754 { |
|
755 rv = dci.m_rv.at (0); |
|
756 } |
|
757 else |
|
758 { |
|
759 rv = (dci.m_rv.at (0) > dci.m_rv.at (1) ? dci.m_rv.at (0) : dci.m_rv.at (1)); |
|
760 } |
|
761 |
|
762 if (rv == 3) |
|
763 { |
|
764 // maximum number of retx reached -> drop process |
|
765 NS_LOG_INFO ("Maximum number of retransmissions reached -> drop process"); |
|
766 std::map <uint16_t, DlHarqProcessesStatus_t>::iterator it = m_dlHarqProcessesStatus.find (rnti); |
|
767 if (it == m_dlHarqProcessesStatus.end ()) |
|
768 { |
|
769 NS_LOG_ERROR ("No info find in HARQ buffer for UE (might change eNB) " << m_dlInfoListBuffered.at (i).m_rnti); |
|
770 } |
|
771 (*it).second.at (harqId) = 0; |
|
772 std::map <uint16_t, DlHarqRlcPduListBuffer_t>::iterator itRlcPdu = m_dlHarqProcessesRlcPduListBuffer.find (rnti); |
|
773 if (itRlcPdu == m_dlHarqProcessesRlcPduListBuffer.end ()) |
|
774 { |
|
775 NS_FATAL_ERROR ("Unable to find RlcPdcList in HARQ buffer for RNTI " << m_dlInfoListBuffered.at (i).m_rnti); |
|
776 } |
|
777 for (uint16_t k = 0; k < (*itRlcPdu).second.size (); k++) |
|
778 { |
|
779 (*itRlcPdu).second.at (k).at (harqId).clear (); |
|
780 } |
|
781 continue; |
|
782 } |
|
783 // check the feasibility of retransmitting on the same RBGs |
|
784 // translate the DCI to Spectrum framework |
|
785 std::vector <int> dciRbg; |
|
786 uint32_t mask = 0x1; |
|
787 NS_LOG_INFO ("Original RBGs " << dci.m_rbBitmap << " rnti " << dci.m_rnti); |
|
788 for (int j = 0; j < 32; j++) |
|
789 { |
|
790 if (((dci.m_rbBitmap & mask) >> j) == 1) |
|
791 { |
|
792 dciRbg.push_back (j); |
|
793 NS_LOG_INFO ("\t" << j); |
|
794 } |
|
795 mask = (mask << 1); |
|
796 } |
|
797 bool free = true; |
|
798 for (uint8_t j = 0; j < dciRbg.size (); j++) |
|
799 { |
|
800 if (rbgMap.at (dciRbg.at (j)) == true) |
|
801 { |
|
802 free = false; |
|
803 break; |
|
804 } |
|
805 } |
|
806 if (free) |
|
807 { |
|
808 // use the same RBGs for the retx |
|
809 // reserve RBGs |
|
810 for (uint8_t j = 0; j < dciRbg.size (); j++) |
|
811 { |
|
812 rbgMap.at (dciRbg.at (j)) = true; |
|
813 NS_LOG_INFO ("RBG " << dciRbg.at (j) << " assigned"); |
|
814 rbgAllocatedNum++; |
|
815 } |
|
816 |
|
817 NS_LOG_INFO (this << " Send retx in the same RBGs"); |
|
818 } |
|
819 else |
|
820 { |
|
821 // find RBGs for sending HARQ retx |
|
822 uint8_t j = 0; |
|
823 uint8_t rbgId = (dciRbg.at (dciRbg.size () - 1) + 1) % rbgNum; |
|
824 uint8_t startRbg = dciRbg.at (dciRbg.size () - 1); |
|
825 std::vector <bool> rbgMapCopy = rbgMap; |
|
826 while ((j < dciRbg.size ())&&(startRbg != rbgId)) |
|
827 { |
|
828 if (rbgMapCopy.at (rbgId) == false) |
|
829 { |
|
830 rbgMapCopy.at (rbgId) = true; |
|
831 dciRbg.at (j) = rbgId; |
|
832 j++; |
|
833 } |
|
834 rbgId++; |
|
835 } |
|
836 if (j == dciRbg.size ()) |
|
837 { |
|
838 // find new RBGs -> update DCI map |
|
839 uint32_t rbgMask = 0; |
|
840 for (uint16_t k = 0; k < dciRbg.size (); k++) |
|
841 { |
|
842 rbgMask = rbgMask + (0x1 << dciRbg.at (k)); |
|
843 rbgAllocatedNum++; |
|
844 } |
|
845 dci.m_rbBitmap = rbgMask; |
|
846 rbgMap = rbgMapCopy; |
|
847 NS_LOG_INFO (this << " Move retx in RBGs " << dciRbg.size ()); |
|
848 } |
|
849 else |
|
850 { |
|
851 // HARQ retx cannot be performed on this TTI -> store it |
|
852 dlInfoListUntxed.push_back (params.m_dlInfoList.at (i)); |
|
853 NS_LOG_INFO (this << " No resource for this retx -> buffer it"); |
|
854 } |
|
855 } |
|
856 // retrieve RLC PDU list for retx TBsize and update DCI |
|
857 BuildDataListElement_s newEl; |
|
858 std::map <uint16_t, DlHarqRlcPduListBuffer_t>::iterator itRlcPdu = m_dlHarqProcessesRlcPduListBuffer.find (rnti); |
|
859 if (itRlcPdu == m_dlHarqProcessesRlcPduListBuffer.end ()) |
|
860 { |
|
861 NS_FATAL_ERROR ("Unable to find RlcPdcList in HARQ buffer for RNTI " << rnti); |
|
862 } |
|
863 for (uint8_t j = 0; j < nLayers; j++) |
|
864 { |
|
865 if (retx.at (j)) |
|
866 { |
|
867 if (j >= dci.m_ndi.size ()) |
|
868 { |
|
869 // for avoiding errors in MIMO transient phases |
|
870 dci.m_ndi.push_back (0); |
|
871 dci.m_rv.push_back (0); |
|
872 dci.m_mcs.push_back (0); |
|
873 dci.m_tbsSize.push_back (0); |
|
874 NS_LOG_INFO (this << " layer " << (uint16_t)j << " no txed (MIMO transition)"); |
|
875 } |
|
876 else |
|
877 { |
|
878 dci.m_ndi.at (j) = 0; |
|
879 dci.m_rv.at (j)++; |
|
880 (*itHarq).second.at (harqId).m_rv.at (j)++; |
|
881 NS_LOG_INFO (this << " layer " << (uint16_t)j << " RV " << (uint16_t)dci.m_rv.at (j)); |
|
882 } |
|
883 } |
|
884 else |
|
885 { |
|
886 // empty TB of layer j |
|
887 dci.m_ndi.at (j) = 0; |
|
888 dci.m_rv.at (j) = 0; |
|
889 dci.m_mcs.at (j) = 0; |
|
890 dci.m_tbsSize.at (j) = 0; |
|
891 NS_LOG_INFO (this << " layer " << (uint16_t)j << " no retx"); |
|
892 } |
|
893 } |
|
894 for (uint16_t k = 0; k < (*itRlcPdu).second.at (0).at (dci.m_harqProcess).size (); k++) |
|
895 { |
|
896 std::vector <struct RlcPduListElement_s> rlcPduListPerLc; |
|
897 for (uint8_t j = 0; j < nLayers; j++) |
|
898 { |
|
899 if (retx.at (j)) |
|
900 { |
|
901 if (j < dci.m_ndi.size ()) |
|
902 { |
|
903 rlcPduListPerLc.push_back ((*itRlcPdu).second.at (j).at (dci.m_harqProcess).at (k)); |
|
904 } |
|
905 } |
|
906 } |
|
907 |
|
908 if (rlcPduListPerLc.size () > 0) |
|
909 { |
|
910 newEl.m_rlcPduList.push_back (rlcPduListPerLc); |
|
911 } |
|
912 } |
|
913 newEl.m_rnti = rnti; |
|
914 newEl.m_dci = dci; |
|
915 (*itHarq).second.at (harqId).m_rv = dci.m_rv; |
|
916 // refresh timer |
|
917 std::map <uint16_t, DlHarqProcessesTimer_t>::iterator itHarqTimer = m_dlHarqProcessesTimer.find (rnti); |
|
918 if (itHarqTimer== m_dlHarqProcessesTimer.end ()) |
|
919 { |
|
920 NS_FATAL_ERROR ("Unable to find HARQ timer for RNTI " << (uint16_t)rnti); |
|
921 } |
|
922 (*itHarqTimer).second.at (harqId) = 0; |
|
923 ret.m_buildDataList.push_back (newEl); |
|
924 rntiAllocated.insert (rnti); |
|
925 } |
|
926 else |
|
927 { |
|
928 // update HARQ process status |
|
929 NS_LOG_INFO (this << " HARQ received ACK for UE " << m_dlInfoListBuffered.at (i).m_rnti); |
|
930 std::map <uint16_t, DlHarqProcessesStatus_t>::iterator it = m_dlHarqProcessesStatus.find (m_dlInfoListBuffered.at (i).m_rnti); |
|
931 if (it == m_dlHarqProcessesStatus.end ()) |
|
932 { |
|
933 NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << m_dlInfoListBuffered.at (i).m_rnti); |
|
934 } |
|
935 (*it).second.at (m_dlInfoListBuffered.at (i).m_harqProcessId) = 0; |
|
936 std::map <uint16_t, DlHarqRlcPduListBuffer_t>::iterator itRlcPdu = m_dlHarqProcessesRlcPduListBuffer.find (m_dlInfoListBuffered.at (i).m_rnti); |
|
937 if (itRlcPdu == m_dlHarqProcessesRlcPduListBuffer.end ()) |
|
938 { |
|
939 NS_FATAL_ERROR ("Unable to find RlcPdcList in HARQ buffer for RNTI " << m_dlInfoListBuffered.at (i).m_rnti); |
|
940 } |
|
941 for (uint16_t k = 0; k < (*itRlcPdu).second.size (); k++) |
|
942 { |
|
943 (*itRlcPdu).second.at (k).at (m_dlInfoListBuffered.at (i).m_harqProcessId).clear (); |
|
944 } |
|
945 } |
|
946 } |
|
947 m_dlInfoListBuffered.clear (); |
|
948 m_dlInfoListBuffered = dlInfoListUntxed; |
|
949 |
|
950 |
|
951 |
443 for (int i = 0; i < rbgNum; i++) |
952 for (int i = 0; i < rbgNum; i++) |
444 { |
953 { |
445 std::set <uint16_t>::iterator it; |
954 NS_LOG_INFO (this << " ALLOCATION for RBG " << i << " of " << rbgNum); |
446 std::set <uint16_t>::iterator itMax = m_flowStatsDl.end (); |
955 if (rbgMap.at (i) == false) |
447 double rcqiMax = 0.0; |
956 { |
448 for (it = m_flowStatsDl.begin (); it != m_flowStatsDl.end (); it++) |
957 std::set <uint16_t>::iterator it; |
449 { |
958 std::set <uint16_t>::iterator itMax = m_flowStatsDl.end (); |
450 std::map <uint16_t,SbMeasResult_s>::iterator itCqi; |
959 double rcqiMax = 0.0; |
451 itCqi = m_a30CqiRxed.find ((*it)); |
960 for (it = m_flowStatsDl.begin (); it != m_flowStatsDl.end (); it++) |
452 std::map <uint16_t,uint8_t>::iterator itTxMode; |
961 { |
453 itTxMode = m_uesTxMode.find ((*it)); |
962 std::set <uint16_t>::iterator itRnti = rntiAllocated.find ((*it)); |
454 if (itTxMode == m_uesTxMode.end ()) |
963 if ((itRnti != rntiAllocated.end ())||(!HarqProcessAvailability ((*it)))) |
455 { |
964 { |
456 NS_FATAL_ERROR ("No Transmission Mode info on user " << (*it)); |
965 // UE already allocated for HARQ or without HARQ process available -> drop it |
457 } |
966 if (itRnti != rntiAllocated.end ()) |
458 int nLayer = TransmissionModesLayers::TxMode2LayerNum ((*itTxMode).second); |
967 { |
459 std::vector <uint8_t> sbCqi; |
968 NS_LOG_DEBUG (this << " RNTI discared for HARQ tx" << (uint16_t)(*it)); |
460 if (itCqi == m_a30CqiRxed.end ()) |
969 } |
461 { |
970 if (!HarqProcessAvailability ((*it))) |
462 for (uint8_t k = 0; k < nLayer; k++) |
971 { |
463 { |
972 NS_LOG_DEBUG (this << " RNTI discared for HARQ id" << (uint16_t)(*it)); |
464 sbCqi.push_back (1); // start with lowest value |
973 } |
465 } |
974 continue; |
|
975 } |
|
976 |
|
977 std::map <uint16_t,SbMeasResult_s>::iterator itCqi; |
|
978 itCqi = m_a30CqiRxed.find ((*it)); |
|
979 std::map <uint16_t,uint8_t>::iterator itTxMode; |
|
980 itTxMode = m_uesTxMode.find ((*it)); |
|
981 if (itTxMode == m_uesTxMode.end ()) |
|
982 { |
|
983 NS_FATAL_ERROR ("No Transmission Mode info on user " << (*it)); |
|
984 } |
|
985 int nLayer = TransmissionModesLayers::TxMode2LayerNum ((*itTxMode).second); |
|
986 std::vector <uint8_t> sbCqi; |
|
987 if (itCqi == m_a30CqiRxed.end ()) |
|
988 { |
|
989 for (uint8_t k = 0; k < nLayer; k++) |
|
990 { |
|
991 sbCqi.push_back (1); // start with lowest value |
|
992 } |
|
993 } |
|
994 else |
|
995 { |
|
996 sbCqi = (*itCqi).second.m_higherLayerSelected.at (i).m_sbCqi; |
|
997 } |
|
998 uint8_t cqi1 = sbCqi.at (0); |
|
999 uint8_t cqi2 = 1; |
|
1000 if (sbCqi.size () > 1) |
|
1001 { |
|
1002 cqi2 = sbCqi.at (1); |
|
1003 } |
|
1004 if ((cqi1 > 0)||(cqi2 > 0)) // CQI == 0 means "out of range" (see table 7.2.3-1 of 36.213) |
|
1005 { |
|
1006 if (LcActivePerFlow ((*it)) > 0) |
|
1007 { |
|
1008 // this UE has data to transmit |
|
1009 double achievableRate = 0.0; |
|
1010 uint8_t mcs = 0; |
|
1011 for (uint8_t k = 0; k < nLayer; k++) |
|
1012 { |
|
1013 if (sbCqi.size () > k) |
|
1014 { |
|
1015 mcs = m_amc->GetMcsFromCqi (sbCqi.at (k)); |
|
1016 } |
|
1017 else |
|
1018 { |
|
1019 // no info on this subband -> worst MCS |
|
1020 mcs = 0; |
|
1021 } |
|
1022 achievableRate += ((m_amc->GetTbSizeFromMcs (mcs, rbgSize) / 8) / 0.001); // = TB size / TTI |
|
1023 } |
|
1024 |
|
1025 double rcqi = achievableRate; |
|
1026 NS_LOG_INFO (this << " RNTI " << (*it) << " MCS " << (uint32_t)mcs << " achievableRate " << achievableRate << " RCQI " << rcqi); |
|
1027 |
|
1028 if (rcqi > rcqiMax) |
|
1029 { |
|
1030 rcqiMax = rcqi; |
|
1031 itMax = it; |
|
1032 } |
|
1033 } |
|
1034 } // end if cqi |
|
1035 |
|
1036 } // end for m_rlcBufferReq |
|
1037 |
|
1038 if (itMax == m_flowStatsDl.end ()) |
|
1039 { |
|
1040 // no UE available for this RB |
|
1041 NS_LOG_INFO (this << " any UE found"); |
466 } |
1042 } |
467 else |
1043 else |
468 { |
1044 { |
469 sbCqi = (*itCqi).second.m_higherLayerSelected.at (i).m_sbCqi; |
1045 rbgMap.at (i) = true; |
470 } |
1046 std::map <uint16_t, std::vector <uint16_t> >::iterator itMap; |
471 uint8_t cqi1 = sbCqi.at (0); |
1047 itMap = allocationMap.find ((*itMax)); |
472 uint8_t cqi2 = 1; |
1048 if (itMap == allocationMap.end ()) |
473 if (sbCqi.size () > 1) |
1049 { |
474 { |
1050 // insert new element |
475 cqi2 = sbCqi.at (1); |
1051 std::vector <uint16_t> tempMap; |
476 } |
1052 tempMap.push_back (i); |
477 |
1053 allocationMap.insert (std::pair <uint16_t, std::vector <uint16_t> > ((*itMax), tempMap)); |
478 if ((cqi1 > 0)||(cqi2 > 0)) // CQI == 0 means "out of range" (see table 7.2.3-1 of 36.213) |
1054 } |
479 { |
1055 else |
480 if (LcActivePerFlow ((*it)) > 0) |
1056 { |
481 { |
1057 (*itMap).second.push_back (i); |
482 // this UE has data to transmit |
1058 } |
483 double achievableRate = 0.0; |
1059 NS_LOG_INFO (this << " UE assigned " << (*itMax)); |
484 for (uint8_t k = 0; k < nLayer; k++) |
1060 } |
485 { |
1061 } // end for RBG free |
486 uint8_t mcs = 0; |
|
487 if (sbCqi.size () > k) |
|
488 { |
|
489 mcs = m_amc->GetMcsFromCqi (sbCqi.at (k)); |
|
490 } |
|
491 else |
|
492 { |
|
493 // no info on this subband -> worst MCS |
|
494 mcs = 0; |
|
495 } |
|
496 achievableRate += ((m_amc->GetTbSizeFromMcs (mcs, rbgSize) / 8) / 0.001); // = TB size / TTI |
|
497 } |
|
498 |
|
499 double rcqi = achievableRate; |
|
500 |
|
501 if (rcqi > rcqiMax) |
|
502 { |
|
503 rcqiMax = rcqi; |
|
504 itMax = it; |
|
505 } |
|
506 } |
|
507 } // end if cqi |
|
508 } // end for m_flowStatsDl |
|
509 |
|
510 if (itMax == m_flowStatsDl.end ()) |
|
511 { |
|
512 // no UE available for this RB |
|
513 NS_LOG_DEBUG (this << " no UE found"); |
|
514 } |
|
515 else |
|
516 { |
|
517 std::map <uint16_t, std::vector <uint16_t> >::iterator itMap; |
|
518 itMap = allocationMap.find ((*itMax)); |
|
519 if (itMap == allocationMap.end ()) |
|
520 { |
|
521 // insert new element |
|
522 std::vector <uint16_t> tempMap; |
|
523 tempMap.push_back (i); |
|
524 allocationMap.insert (std::pair <uint16_t, std::vector <uint16_t> > ((*itMax), tempMap)); |
|
525 } |
|
526 else |
|
527 { |
|
528 (*itMap).second.push_back (i); |
|
529 } |
|
530 } |
|
531 } // end for RBGs |
1062 } // end for RBGs |
532 |
1063 |
533 // generate the transmission opportunities by grouping the RBGs of the same RNTI and |
1064 // generate the transmission opportunities by grouping the RBGs of the same RNTI and |
534 // creating the correspondent DCIs |
1065 // creating the correspondent DCIs |
535 FfMacSchedSapUser::SchedDlConfigIndParameters ret; |
|
536 std::map <uint16_t, std::vector <uint16_t> >::iterator itMap = allocationMap.begin (); |
1066 std::map <uint16_t, std::vector <uint16_t> >::iterator itMap = allocationMap.begin (); |
537 while (itMap != allocationMap.end ()) |
1067 while (itMap != allocationMap.end ()) |
538 { |
1068 { |
539 // create new BuildDataListElement_s for this LC |
1069 // create new BuildDataListElement_s for this LC |
540 BuildDataListElement_s newEl; |
1070 BuildDataListElement_s newEl; |
541 newEl.m_rnti = (*itMap).first; |
1071 newEl.m_rnti = (*itMap).first; |
542 // create the DlDciListElement_s |
1072 // create the DlDciListElement_s |
543 DlDciListElement_s newDci; |
1073 DlDciListElement_s newDci; |
544 std::vector <struct RlcPduListElement_s> newRlcPduLe; |
|
545 newDci.m_rnti = (*itMap).first; |
1074 newDci.m_rnti = (*itMap).first; |
|
1075 newDci.m_harqProcess = UpdateHarqProcessId ((*itMap).first); |
546 |
1076 |
547 uint16_t lcActives = LcActivePerFlow ((*itMap).first); |
1077 uint16_t lcActives = LcActivePerFlow ((*itMap).first); |
548 uint16_t rbgPerRnti = (*itMap).second.size (); |
1078 NS_LOG_INFO (this << "Allocate user " << newEl.m_rnti << " rbg " << lcActives); |
|
1079 uint16_t RgbPerRnti = (*itMap).second.size (); |
549 std::map <uint16_t,SbMeasResult_s>::iterator itCqi; |
1080 std::map <uint16_t,SbMeasResult_s>::iterator itCqi; |
550 itCqi = m_a30CqiRxed.find ((*itMap).first); |
1081 itCqi = m_a30CqiRxed.find ((*itMap).first); |
551 std::map <uint16_t,uint8_t>::iterator itTxMode; |
1082 std::map <uint16_t,uint8_t>::iterator itTxMode; |
552 itTxMode = m_uesTxMode.find ((*itMap).first); |
1083 itTxMode = m_uesTxMode.find ((*itMap).first); |
553 if (itTxMode == m_uesTxMode.end ()) |
1084 if (itTxMode == m_uesTxMode.end ()) |
592 for (uint8_t j = 0; j < nLayer; j++) |
1124 for (uint8_t j = 0; j < nLayer; j++) |
593 { |
1125 { |
594 worstCqi.at (j) = 1; // try with lowest MCS in RBG with no info on channel |
1126 worstCqi.at (j) = 1; // try with lowest MCS in RBG with no info on channel |
595 } |
1127 } |
596 } |
1128 } |
|
1129 for (uint8_t j = 0; j < nLayer; j++) |
|
1130 { |
|
1131 NS_LOG_INFO (this << " Layer " << (uint16_t)j << " CQI selected " << (uint16_t)worstCqi.at (j)); |
|
1132 } |
597 uint32_t bytesTxed = 0; |
1133 uint32_t bytesTxed = 0; |
598 for (uint8_t j = 0; j < nLayer; j++) |
1134 for (uint8_t j = 0; j < nLayer; j++) |
599 { |
1135 { |
600 newDci.m_mcs.push_back (m_amc->GetMcsFromCqi (worstCqi.at (j))); |
1136 newDci.m_mcs.push_back (m_amc->GetMcsFromCqi (worstCqi.at (j))); |
601 int tbSize = (m_amc->GetTbSizeFromMcs (newDci.m_mcs.at (j), rbgPerRnti * rbgSize) / 8); // (size of TB in bytes according to table 7.1.7.2.1-1 of 36.213) |
1137 int tbSize = (m_amc->GetTbSizeFromMcs (newDci.m_mcs.at (j), RgbPerRnti * rbgSize) / 8); // (size of TB in bytes according to table 7.1.7.2.1-1 of 36.213) |
602 newDci.m_tbsSize.push_back (tbSize); |
1138 newDci.m_tbsSize.push_back (tbSize); |
|
1139 NS_LOG_INFO (this << " Layer " << (uint16_t)j << " MCS selected" << m_amc->GetMcsFromCqi (worstCqi.at (j))); |
603 bytesTxed += tbSize; |
1140 bytesTxed += tbSize; |
604 } |
1141 } |
605 |
1142 |
606 newDci.m_resAlloc = 0; // only allocation type 0 at this stage |
1143 newDci.m_resAlloc = 0; // only allocation type 0 at this stage |
607 newDci.m_rbBitmap = 0; // TBD (32 bit bitmap see 7.1.6 of 36.213) |
1144 newDci.m_rbBitmap = 0; // TBD (32 bit bitmap see 7.1.6 of 36.213) |
608 uint32_t rbgMask = 0; |
1145 uint32_t rbgMask = 0; |
609 for (uint16_t k = 0; k < (*itMap).second.size (); k++) |
1146 for (uint16_t k = 0; k < (*itMap).second.size (); k++) |
610 { |
1147 { |
611 rbgMask = rbgMask + (0x1 << (*itMap).second.at (k)); |
1148 rbgMask = rbgMask + (0x1 << (*itMap).second.at (k)); |
|
1149 NS_LOG_INFO (this << " Allocated RBG " << (*itMap).second.at (k)); |
612 } |
1150 } |
613 newDci.m_rbBitmap = rbgMask; // (32 bit bitmap see 7.1.6 of 36.213) |
1151 newDci.m_rbBitmap = rbgMask; // (32 bit bitmap see 7.1.6 of 36.213) |
614 |
1152 |
615 // create the rlc PDUs -> equally divide resources among actives LCs |
1153 // create the rlc PDUs -> equally divide resources among actives LCs |
616 std::map <LteFlowId_t, FfMacSchedSapProvider::SchedDlRlcBufferReqParameters>::iterator itBufReq; |
1154 std::map <LteFlowId_t, FfMacSchedSapProvider::SchedDlRlcBufferReqParameters>::iterator itBufReq; |
617 for (itBufReq = m_rlcBufferReq.begin (); itBufReq != m_rlcBufferReq.end (); itBufReq++) |
1155 for (itBufReq = m_rlcBufferReq.begin (); itBufReq != m_rlcBufferReq.end (); itBufReq++) |
618 { |
1156 { |
619 if (((*itBufReq).first.m_rnti == (*itMap).first) && |
1157 if (((*itBufReq).first.m_rnti == (*itMap).first) |
620 (((*itBufReq).second.m_rlcTransmissionQueueSize > 0) |
1158 && (((*itBufReq).second.m_rlcTransmissionQueueSize > 0) |
621 || ((*itBufReq).second.m_rlcRetransmissionQueueSize > 0) |
1159 || ((*itBufReq).second.m_rlcRetransmissionQueueSize > 0) |
622 || ((*itBufReq).second.m_rlcStatusPduSize > 0) )) |
1160 || ((*itBufReq).second.m_rlcStatusPduSize > 0) )) |
623 { |
1161 { |
|
1162 std::vector <struct RlcPduListElement_s> newRlcPduLe; |
624 for (uint8_t j = 0; j < nLayer; j++) |
1163 for (uint8_t j = 0; j < nLayer; j++) |
625 { |
1164 { |
626 RlcPduListElement_s newRlcEl; |
1165 RlcPduListElement_s newRlcEl; |
627 newRlcEl.m_logicalChannelIdentity = (*itBufReq).first.m_lcId; |
1166 newRlcEl.m_logicalChannelIdentity = (*itBufReq).first.m_lcId; |
628 newRlcEl.m_size = newDci.m_tbsSize.at (j) / lcActives; |
1167 newRlcEl.m_size = newDci.m_tbsSize.at (j) / lcActives; |
|
1168 NS_LOG_INFO (this << " LCID " << (uint32_t) newRlcEl.m_logicalChannelIdentity << " size " << newRlcEl.m_size << " layer " << (uint16_t)j); |
629 newRlcPduLe.push_back (newRlcEl); |
1169 newRlcPduLe.push_back (newRlcEl); |
630 UpdateDlRlcBufferInfo (newDci.m_rnti, newRlcEl.m_logicalChannelIdentity, newRlcEl.m_size); |
1170 UpdateDlRlcBufferInfo (newDci.m_rnti, newRlcEl.m_logicalChannelIdentity, newRlcEl.m_size); |
631 } |
1171 if (m_harqOn == true) |
|
1172 { |
|
1173 // store RLC PDU list for HARQ |
|
1174 std::map <uint16_t, DlHarqRlcPduListBuffer_t>::iterator itRlcPdu = m_dlHarqProcessesRlcPduListBuffer.find ((*itMap).first); |
|
1175 if (itRlcPdu == m_dlHarqProcessesRlcPduListBuffer.end ()) |
|
1176 { |
|
1177 NS_FATAL_ERROR ("Unable to find RlcPdcList in HARQ buffer for RNTI " << (*itMap).first); |
|
1178 } |
|
1179 (*itRlcPdu).second.at (j).at (newDci.m_harqProcess).push_back (newRlcEl); |
|
1180 } |
|
1181 } |
|
1182 newEl.m_rlcPduList.push_back (newRlcPduLe); |
632 } |
1183 } |
633 if ((*itBufReq).first.m_rnti > (*itMap).first) |
1184 if ((*itBufReq).first.m_rnti > (*itMap).first) |
634 { |
1185 { |
635 break; |
1186 break; |
636 } |
1187 } |
637 } |
1188 } |
638 newDci.m_ndi.push_back (1); // TBD (new data indicator) |
1189 for (uint8_t j = 0; j < nLayer; j++) |
639 newDci.m_rv.push_back (0); // TBD (redundancy version) |
1190 { |
|
1191 newDci.m_ndi.push_back (1); |
|
1192 newDci.m_rv.push_back (0); |
|
1193 } |
640 |
1194 |
641 newEl.m_dci = newDci; |
1195 newEl.m_dci = newDci; |
|
1196 |
|
1197 if (m_harqOn == true) |
|
1198 { |
|
1199 // store DCI for HARQ |
|
1200 std::map <uint16_t, DlHarqProcessesDciBuffer_t>::iterator itDci = m_dlHarqProcessesDciBuffer.find (newEl.m_rnti); |
|
1201 if (itDci == m_dlHarqProcessesDciBuffer.end ()) |
|
1202 { |
|
1203 NS_FATAL_ERROR ("Unable to find RNTI entry in DCI HARQ buffer for RNTI " << newEl.m_rnti); |
|
1204 } |
|
1205 (*itDci).second.at (newDci.m_harqProcess) = newDci; |
|
1206 // refresh timer |
|
1207 std::map <uint16_t, DlHarqProcessesTimer_t>::iterator itHarqTimer = m_dlHarqProcessesTimer.find (newEl.m_rnti); |
|
1208 if (itHarqTimer== m_dlHarqProcessesTimer.end ()) |
|
1209 { |
|
1210 NS_FATAL_ERROR ("Unable to find HARQ timer for RNTI " << (uint16_t)newEl.m_rnti); |
|
1211 } |
|
1212 (*itHarqTimer).second.at (newDci.m_harqProcess) = 0; |
|
1213 } |
|
1214 |
642 // ...more parameters -> ingored in this version |
1215 // ...more parameters -> ingored in this version |
643 |
1216 |
644 newEl.m_rlcPduList.push_back (newRlcPduLe); |
|
645 ret.m_buildDataList.push_back (newEl); |
1217 ret.m_buildDataList.push_back (newEl); |
646 |
|
647 |
1218 |
648 itMap++; |
1219 itMap++; |
649 } // end while allocation |
1220 } // end while allocation |
650 ret.m_nrOfPdcchOfdmSymbols = 1; // TODO: check correct value according the DCIs txed |
1221 ret.m_nrOfPdcchOfdmSymbols = 1; // TODO: check correct value according the DCIs txed |
651 |
1222 |
652 m_schedSapUser->SchedDlConfigInd (ret); |
1223 m_schedSapUser->SchedDlConfigInd (ret); |
653 |
1224 |
|
1225 |
654 return; |
1226 return; |
655 } |
1227 } |
656 |
1228 |
657 void |
1229 void |
658 FdMtFfMacScheduler::DoSchedDlRachInfoReq (const struct FfMacSchedSapProvider::SchedDlRachInfoReqParameters& params) |
1230 FdMtFfMacScheduler::DoSchedDlRachInfoReq (const struct FfMacSchedSapProvider::SchedDlRachInfoReqParameters& params) |
659 { |
1231 { |
660 NS_FATAL_ERROR ("unimplemented"); |
1232 NS_LOG_FUNCTION (this); |
|
1233 |
|
1234 m_rachList = params.m_rachList; |
|
1235 |
661 return; |
1236 return; |
662 } |
1237 } |
663 |
1238 |
664 void |
1239 void |
665 FdMtFfMacScheduler::DoSchedDlCqiInfoReq (const struct FfMacSchedSapProvider::SchedDlCqiInfoReqParameters& params) |
1240 FdMtFfMacScheduler::DoSchedDlCqiInfoReq (const struct FfMacSchedSapProvider::SchedDlCqiInfoReqParameters& params) |
754 } |
1329 } |
755 |
1330 |
756 void |
1331 void |
757 FdMtFfMacScheduler::DoSchedUlTriggerReq (const struct FfMacSchedSapProvider::SchedUlTriggerReqParameters& params) |
1332 FdMtFfMacScheduler::DoSchedUlTriggerReq (const struct FfMacSchedSapProvider::SchedUlTriggerReqParameters& params) |
758 { |
1333 { |
759 NS_LOG_FUNCTION (this << " UL - Frame no. " << (params.m_sfnSf >> 4) << " subframe no. " << (0xF & params.m_sfnSf)); |
1334 NS_LOG_FUNCTION (this << " UL - Frame no. " << (params.m_sfnSf >> 4) << " subframe no. " << (0xF & params.m_sfnSf) << " size " << params.m_ulInfoList.size ()); |
760 |
1335 |
761 RefreshUlCqiMaps (); |
1336 RefreshUlCqiMaps (); |
762 |
1337 |
763 std::map <uint16_t,uint32_t>::iterator it; |
1338 // Generate RBs map |
|
1339 FfMacSchedSapUser::SchedUlConfigIndParameters ret; |
|
1340 std::vector <bool> rbMap; |
|
1341 uint16_t rbAllocatedNum = 0; |
|
1342 std::set <uint16_t> rntiAllocated; |
|
1343 std::vector <uint16_t> rbgAllocationMap; |
|
1344 // update with RACH allocation map |
|
1345 rbgAllocationMap = m_rachAllocationMap; |
|
1346 //rbgAllocationMap.resize (m_cschedCellConfig.m_ulBandwidth, 0); |
|
1347 m_rachAllocationMap.clear (); |
|
1348 m_rachAllocationMap.resize (m_cschedCellConfig.m_ulBandwidth, 0); |
|
1349 |
|
1350 rbMap.resize (m_cschedCellConfig.m_ulBandwidth, false); |
|
1351 // remove RACH allocation |
|
1352 for (uint16_t i = 0; i < m_cschedCellConfig.m_ulBandwidth; i++) |
|
1353 { |
|
1354 if (rbgAllocationMap.at (i) != 0) |
|
1355 { |
|
1356 rbMap.at (i) = true; |
|
1357 NS_LOG_DEBUG (this << " Allocated for RACH " << i); |
|
1358 } |
|
1359 } |
|
1360 |
|
1361 |
|
1362 if (m_harqOn == true) |
|
1363 { |
|
1364 // Process UL HARQ feedback |
|
1365 // update UL HARQ proc id |
|
1366 std::map <uint16_t, uint8_t>::iterator itProcId; |
|
1367 for (itProcId = m_ulHarqCurrentProcessId.begin (); itProcId != m_ulHarqCurrentProcessId.end (); itProcId++) |
|
1368 { |
|
1369 (*itProcId).second = ((*itProcId).second + 1) % HARQ_PROC_NUM; |
|
1370 } |
|
1371 |
|
1372 for (uint16_t i = 0; i < params.m_ulInfoList.size (); i++) |
|
1373 { |
|
1374 if (params.m_ulInfoList.at (i).m_receptionStatus == UlInfoListElement_s::NotOk) |
|
1375 { |
|
1376 // retx correspondent block: retrieve the UL-DCI |
|
1377 uint16_t rnti = params.m_ulInfoList.at (i).m_rnti; |
|
1378 itProcId = m_ulHarqCurrentProcessId.find (rnti); |
|
1379 if (itProcId == m_ulHarqCurrentProcessId.end ()) |
|
1380 { |
|
1381 NS_LOG_ERROR ("No info find in HARQ buffer for UE (might change eNB) " << rnti); |
|
1382 } |
|
1383 uint8_t harqId = (uint8_t)((*itProcId).second - HARQ_PERIOD) % HARQ_PROC_NUM; |
|
1384 NS_LOG_INFO (this << " UL-HARQ retx RNTI " << rnti << " harqId " << (uint16_t)harqId << " i " << i << " size " << params.m_ulInfoList.size ()); |
|
1385 std::map <uint16_t, UlHarqProcessesDciBuffer_t>::iterator itHarq = m_ulHarqProcessesDciBuffer.find (rnti); |
|
1386 if (itHarq == m_ulHarqProcessesDciBuffer.end ()) |
|
1387 { |
|
1388 NS_LOG_ERROR ("No info find in HARQ buffer for UE (might change eNB) " << rnti); |
|
1389 continue; |
|
1390 } |
|
1391 UlDciListElement_s dci = (*itHarq).second.at (harqId); |
|
1392 std::map <uint16_t, UlHarqProcessesStatus_t>::iterator itStat = m_ulHarqProcessesStatus.find (rnti); |
|
1393 if (itStat == m_ulHarqProcessesStatus.end ()) |
|
1394 { |
|
1395 NS_LOG_ERROR ("No info find in HARQ buffer for UE (might change eNB) " << rnti); |
|
1396 } |
|
1397 if ((*itStat).second.at (harqId) >= 3) |
|
1398 { |
|
1399 NS_LOG_INFO ("Max number of retransmissions reached (UL)-> drop process"); |
|
1400 continue; |
|
1401 } |
|
1402 bool free = true; |
|
1403 for (int j = dci.m_rbStart; j < dci.m_rbStart + dci.m_rbLen; j++) |
|
1404 { |
|
1405 if (rbMap.at (j) == true) |
|
1406 { |
|
1407 free = false; |
|
1408 NS_LOG_INFO (this << " BUSY " << j); |
|
1409 } |
|
1410 } |
|
1411 if (free) |
|
1412 { |
|
1413 // retx on the same RBs |
|
1414 for (int j = dci.m_rbStart; j < dci.m_rbStart + dci.m_rbLen; j++) |
|
1415 { |
|
1416 rbMap.at (j) = true; |
|
1417 rbgAllocationMap.at (j) = dci.m_rnti; |
|
1418 NS_LOG_INFO ("\tRB " << j); |
|
1419 rbAllocatedNum++; |
|
1420 } |
|
1421 NS_LOG_INFO (this << " Send retx in the same RBs " << (uint16_t)dci.m_rbStart << " to " << dci.m_rbStart + dci.m_rbLen << " RV " << (*itStat).second.at (harqId) + 1); |
|
1422 } |
|
1423 else |
|
1424 { |
|
1425 NS_LOG_INFO ("Cannot allocate retx due to RACH allocations for UE " << rnti); |
|
1426 continue; |
|
1427 } |
|
1428 dci.m_ndi = 0; |
|
1429 // Update HARQ buffers with new HarqId |
|
1430 (*itStat).second.at ((*itProcId).second) = (*itStat).second.at (harqId) + 1; |
|
1431 (*itStat).second.at (harqId) = 0; |
|
1432 (*itHarq).second.at ((*itProcId).second) = dci; |
|
1433 ret.m_dciList.push_back (dci); |
|
1434 rntiAllocated.insert (dci.m_rnti); |
|
1435 } |
|
1436 else |
|
1437 { |
|
1438 NS_LOG_INFO (this << " HARQ-ACK feedback from RNTI " << params.m_ulInfoList.at (i).m_rnti); |
|
1439 } |
|
1440 } |
|
1441 } |
|
1442 |
|
1443 std::map <uint16_t,uint32_t>::iterator it; |
764 int nflows = 0; |
1444 int nflows = 0; |
765 |
1445 |
766 for (it = m_ceBsrRxed.begin (); it != m_ceBsrRxed.end (); it++) |
1446 for (it = m_ceBsrRxed.begin (); it != m_ceBsrRxed.end (); it++) |
767 { |
1447 { |
768 // remove old entries of this UE-LC |
1448 std::set <uint16_t>::iterator itRnti = rntiAllocated.find ((*it).first); |
769 if ((*it).second > 0) |
1449 // select UEs with queues not empty and not yet allocated for HARQ |
|
1450 if (((*it).second > 0)&&(itRnti == rntiAllocated.end ())) |
770 { |
1451 { |
771 nflows++; |
1452 nflows++; |
772 } |
1453 } |
773 } |
1454 } |
774 |
1455 |
775 if (nflows == 0) |
1456 if (nflows == 0) |
776 { |
1457 { |
777 return ; // no flows to be scheduled |
1458 if (ret.m_dciList.size () > 0) |
778 } |
1459 { |
779 |
1460 m_schedSapUser->SchedUlConfigInd (ret); |
780 |
1461 } |
781 // Divide the resource equally among the active users |
1462 |
782 int rbPerFlow = m_cschedCellConfig.m_ulBandwidth / nflows; |
1463 return; // no flows to be scheduled |
783 if (rbPerFlow == 0) |
1464 } |
784 { |
1465 |
785 rbPerFlow = 1; // at least 1 rbg per flow (till available resource) |
1466 |
|
1467 // Divide the remaining resources equally among the active users starting from the subsequent one served last scheduling trigger |
|
1468 uint16_t rbPerFlow = (m_cschedCellConfig.m_ulBandwidth) / (nflows + rntiAllocated.size ()); |
|
1469 if (rbPerFlow < 3) |
|
1470 { |
|
1471 rbPerFlow = 3; // at least 3 rbg per flow (till available resource) to ensure TxOpportunity >= 7 bytes |
786 } |
1472 } |
787 int rbAllocated = 0; |
1473 int rbAllocated = 0; |
788 |
1474 |
789 FfMacSchedSapUser::SchedUlConfigIndParameters ret; |
|
790 std::vector <uint16_t> rbgAllocationMap; |
|
791 if (m_nextRntiUl != 0) |
1475 if (m_nextRntiUl != 0) |
792 { |
1476 { |
793 for (it = m_ceBsrRxed.begin (); it != m_ceBsrRxed.end (); it++) |
1477 for (it = m_ceBsrRxed.begin (); it != m_ceBsrRxed.end (); it++) |
794 { |
1478 { |
795 if ((*it).first == m_nextRntiUl) |
1479 if ((*it).first == m_nextRntiUl) |
885 uldci.m_ulIndex = 0; // TDD parameter |
1634 uldci.m_ulIndex = 0; // TDD parameter |
886 uldci.m_dai = 1; // TDD parameter |
1635 uldci.m_dai = 1; // TDD parameter |
887 uldci.m_freqHopping = 0; |
1636 uldci.m_freqHopping = 0; |
888 uldci.m_pdcchPowerOffset = 0; // not used |
1637 uldci.m_pdcchPowerOffset = 0; // not used |
889 ret.m_dciList.push_back (uldci); |
1638 ret.m_dciList.push_back (uldci); |
|
1639 // store DCI for HARQ_PERIOD |
|
1640 uint8_t harqId = 0; |
|
1641 if (m_harqOn == true) |
|
1642 { |
|
1643 std::map <uint16_t, uint8_t>::iterator itProcId; |
|
1644 itProcId = m_ulHarqCurrentProcessId.find (uldci.m_rnti); |
|
1645 if (itProcId == m_ulHarqCurrentProcessId.end ()) |
|
1646 { |
|
1647 NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << uldci.m_rnti); |
|
1648 } |
|
1649 harqId = (*itProcId).second; |
|
1650 std::map <uint16_t, UlHarqProcessesDciBuffer_t>::iterator itDci = m_ulHarqProcessesDciBuffer.find (uldci.m_rnti); |
|
1651 if (itDci == m_ulHarqProcessesDciBuffer.end ()) |
|
1652 { |
|
1653 NS_FATAL_ERROR ("Unable to find RNTI entry in UL DCI HARQ buffer for RNTI " << uldci.m_rnti); |
|
1654 } |
|
1655 (*itDci).second.at (harqId) = uldci; |
|
1656 } |
|
1657 |
|
1658 NS_LOG_INFO (this << " UE Allocation RNTI " << (*it).first << " startPRB " << (uint32_t)uldci.m_rbStart << " nPRB " << (uint32_t)uldci.m_rbLen << " CQI " << cqi << " MCS " << (uint32_t)uldci.m_mcs << " TBsize " << uldci.m_tbSize << " RbAlloc " << rbAllocated << " harqId " << (uint16_t)harqId); |
890 |
1659 |
891 |
1660 |
892 it++; |
1661 it++; |
893 if (it == m_ceBsrRxed.end ()) |
1662 if (it == m_ceBsrRxed.end ()) |
894 { |
1663 { |
895 // restart from the first |
1664 // restart from the first |
896 it = m_ceBsrRxed.begin (); |
1665 it = m_ceBsrRxed.begin (); |
897 } |
1666 } |
898 if (rbAllocated == m_cschedCellConfig.m_ulBandwidth) |
1667 if ((rbAllocated == m_cschedCellConfig.m_ulBandwidth) || (rbPerFlow == 0)) |
899 { |
1668 { |
900 // Stop allocation: no more PRBs |
1669 // Stop allocation: no more PRBs |
901 m_nextRntiUl = (*it).first; |
1670 m_nextRntiUl = (*it).first; |
902 break; |
1671 break; |
903 } |
1672 } |
904 } |
1673 } |
905 while ((*it).first != m_nextRntiUl); |
1674 while (((*it).first != m_nextRntiUl)&&(rbPerFlow!=0)); |
906 |
1675 |
907 |
1676 |
908 m_allocationMaps.insert (std::pair <uint16_t, std::vector <uint16_t> > (params.m_sfnSf, rbgAllocationMap)); |
1677 m_allocationMaps.insert (std::pair <uint16_t, std::vector <uint16_t> > (params.m_sfnSf, rbgAllocationMap)); |
909 m_schedSapUser->SchedUlConfigInd (ret); |
1678 m_schedSapUser->SchedUlConfigInd (ret); |
|
1679 |
910 return; |
1680 return; |
911 } |
1681 } |
912 |
1682 |
913 void |
1683 void |
914 FdMtFfMacScheduler::DoSchedUlNoiseInterferenceReq (const struct FfMacSchedSapProvider::SchedUlNoiseInterferenceReqParameters& params) |
1684 FdMtFfMacScheduler::DoSchedUlNoiseInterferenceReq (const struct FfMacSchedSapProvider::SchedUlNoiseInterferenceReqParameters& params) |
915 { |
1685 { |
916 NS_FATAL_ERROR ("unimplemented"); |
1686 NS_LOG_FUNCTION (this); |
|
1687 // TODO: Implementation of the API |
917 return; |
1688 return; |
918 } |
1689 } |
919 |
1690 |
920 void |
1691 void |
921 FdMtFfMacScheduler::DoSchedUlSrInfoReq (const struct FfMacSchedSapProvider::SchedUlSrInfoReqParameters& params) |
1692 FdMtFfMacScheduler::DoSchedUlSrInfoReq (const struct FfMacSchedSapProvider::SchedUlSrInfoReqParameters& params) |
922 { |
1693 { |
923 NS_FATAL_ERROR ("unimplemented"); |
1694 NS_LOG_FUNCTION (this); |
|
1695 // TODO: Implementation of the API |
924 return; |
1696 return; |
925 } |
1697 } |
926 |
1698 |
927 void |
1699 void |
928 FdMtFfMacScheduler::DoSchedUlMacCtrlInfoReq (const struct FfMacSchedSapProvider::SchedUlMacCtrlInfoReqParameters& params) |
1700 FdMtFfMacScheduler::DoSchedUlMacCtrlInfoReq (const struct FfMacSchedSapProvider::SchedUlMacCtrlInfoReqParameters& params) |
929 { |
1701 { |
930 NS_LOG_FUNCTION (this); |
1702 NS_LOG_FUNCTION (this); |
931 |
1703 |
932 std::map <uint16_t,uint32_t>::iterator it; |
1704 std::map <uint16_t,uint32_t>::iterator it; |
933 |
1705 |
934 for (unsigned int i = 0; i < params.m_macCeList.size (); i++) |
1706 for (unsigned int i = 0; i < params.m_macCeList.size (); i++) |
935 { |
1707 { |
936 if ( params.m_macCeList.at (i).m_macCeType == MacCeListElement_s::BSR ) |
1708 if ( params.m_macCeList.at (i).m_macCeType == MacCeListElement_s::BSR ) |
937 { |
1709 { |
938 // buffer status report |
1710 // buffer status report |
939 // note that we only consider LCG 0, the other three LCGs are neglected |
1711 // note that this scheduler does not differentiate the |
940 // this is consistent with the assumption in LteUeMac that the first LCG gathers all LCs |
1712 // allocation according to which LCGs have more/less bytes |
941 uint16_t rnti = params.m_macCeList.at (i).m_rnti; |
1713 // to send. |
942 it = m_ceBsrRxed.find (rnti); |
1714 // Hence the BSR of different LCGs are just summed up to get |
943 if (it == m_ceBsrRxed.end ()) |
1715 // a total queue size that is used for allocation purposes. |
|
1716 |
|
1717 uint32_t buffer = 0; |
|
1718 for (uint8_t lcg = 0; lcg < 4; ++lcg) |
|
1719 { |
|
1720 uint8_t bsrId = params.m_macCeList.at (i).m_macCeValue.m_bufferStatus.at (lcg); |
|
1721 buffer += BufferSizeLevelBsr::BsrId2BufferSize (bsrId); |
|
1722 } |
|
1723 |
|
1724 uint16_t rnti = params.m_macCeList.at (i).m_rnti; |
|
1725 NS_LOG_LOGIC (this << "RNTI=" << rnti << " buffer=" << buffer); |
|
1726 it = m_ceBsrRxed.find (rnti); |
|
1727 if (it == m_ceBsrRxed.end ()) |
|
1728 { |
|
1729 // create the new entry |
|
1730 m_ceBsrRxed.insert ( std::pair<uint16_t, uint32_t > (rnti, buffer)); |
|
1731 } |
|
1732 else |
|
1733 { |
|
1734 // update the buffer size value |
|
1735 (*it).second = buffer; |
|
1736 } |
|
1737 } |
|
1738 } |
|
1739 |
|
1740 return; |
|
1741 } |
|
1742 |
|
1743 void |
|
1744 FdMtFfMacScheduler::DoSchedUlCqiInfoReq (const struct FfMacSchedSapProvider::SchedUlCqiInfoReqParameters& params) |
|
1745 { |
|
1746 NS_LOG_FUNCTION (this); |
|
1747 // retrieve the allocation for this subframe |
|
1748 switch (m_ulCqiFilter) |
|
1749 { |
|
1750 case FfMacScheduler::SRS_UL_CQI: |
944 { |
1751 { |
945 // create the new entry |
1752 // filter all the CQIs that are not SRS based |
946 uint8_t bsrId = params.m_macCeList.at (i).m_macCeValue.m_bufferStatus.at (0); |
1753 if (params.m_ulCqi.m_type != UlCqi_s::SRS) |
947 int buffer = BufferSizeLevelBsr::BsrId2BufferSize (bsrId); |
1754 { |
948 m_ceBsrRxed.insert ( std::pair<uint16_t, uint32_t > (rnti, buffer)); |
1755 return; |
|
1756 } |
949 } |
1757 } |
950 else |
1758 break; |
|
1759 case FfMacScheduler::PUSCH_UL_CQI: |
951 { |
1760 { |
952 // update the buffer size value |
1761 // filter all the CQIs that are not SRS based |
953 (*it).second = BufferSizeLevelBsr::BsrId2BufferSize (params.m_macCeList.at (i).m_macCeValue.m_bufferStatus.at (0)); |
1762 if (params.m_ulCqi.m_type != UlCqi_s::PUSCH) |
|
1763 { |
|
1764 return; |
|
1765 } |
954 } |
1766 } |
955 } |
1767 case FfMacScheduler::ALL_UL_CQI: |
956 } |
|
957 |
|
958 return; |
|
959 } |
|
960 |
|
961 void |
|
962 FdMtFfMacScheduler::DoSchedUlCqiInfoReq (const struct FfMacSchedSapProvider::SchedUlCqiInfoReqParameters& params) |
|
963 { |
|
964 NS_LOG_FUNCTION (this); |
|
965 // retrieve the allocation for this subframe |
|
966 switch (m_ulCqiFilter) |
|
967 { |
|
968 case FfMacScheduler::SRS_UL_CQI: |
|
969 { |
|
970 // filter all the CQIs that are not SRS based |
|
971 if (params.m_ulCqi.m_type!=UlCqi_s::SRS) |
|
972 { |
|
973 return; |
|
974 } |
|
975 } |
|
976 break; |
1768 break; |
977 case FfMacScheduler::PUSCH_UL_CQI: |
1769 |
978 { |
1770 default: |
979 // filter all the CQIs that are not SRS based |
1771 NS_FATAL_ERROR ("Unknown UL CQI type"); |
980 if (params.m_ulCqi.m_type!=UlCqi_s::PUSCH) |
|
981 { |
|
982 return; |
|
983 } |
|
984 } |
|
985 case FfMacScheduler::ALL_UL_CQI: |
|
986 break; |
|
987 |
|
988 default: |
|
989 NS_FATAL_ERROR ("Unknown UL CQI type"); |
|
990 } |
1772 } |
991 |
1773 |
992 switch (params.m_ulCqi.m_type) |
1774 switch (params.m_ulCqi.m_type) |
993 { |
1775 { |
994 case UlCqi_s::PUSCH: |
1776 case UlCqi_s::PUSCH: |
995 { |
1777 { |
996 std::map <uint16_t, std::vector <uint16_t> >::iterator itMap; |
1778 std::map <uint16_t, std::vector <uint16_t> >::iterator itMap; |
997 std::map <uint16_t, std::vector <double> >::iterator itCqi; |
1779 std::map <uint16_t, std::vector <double> >::iterator itCqi; |
|
1780 NS_LOG_DEBUG (this << " Collect PUSCH CQIs of Frame no. " << (params.m_sfnSf >> 4) << " subframe no. " << (0xF & params.m_sfnSf)); |
998 itMap = m_allocationMaps.find (params.m_sfnSf); |
1781 itMap = m_allocationMaps.find (params.m_sfnSf); |
999 if (itMap == m_allocationMaps.end ()) |
1782 if (itMap == m_allocationMaps.end ()) |
1000 { |
1783 { |
1001 NS_LOG_DEBUG (this << " Does not find info on allocation, size : " << m_allocationMaps.size ()); |
|
1002 return; |
1784 return; |
1003 } |
1785 } |
1004 for (uint32_t i = 0; i < (*itMap).second.size (); i++) |
1786 for (uint32_t i = 0; i < (*itMap).second.size (); i++) |
1005 { |
1787 { |
1006 // convert from fixed point notation Sxxxxxxxxxxx.xxx to double |
1788 // convert from fixed point notation Sxxxxxxxxxxx.xxx to double |
1007 // NS_LOG_INFO (this << " i " << i << " size " << params.m_ulCqi.m_sinr.size () << " mapSIze " << (*itMap).second.size ()); |
|
1008 double sinr = LteFfConverter::fpS11dot3toDouble (params.m_ulCqi.m_sinr.at (i)); |
1789 double sinr = LteFfConverter::fpS11dot3toDouble (params.m_ulCqi.m_sinr.at (i)); |
1009 itCqi = m_ueCqi.find ((*itMap).second.at (i)); |
1790 itCqi = m_ueCqi.find ((*itMap).second.at (i)); |
1010 if (itCqi == m_ueCqi.end ()) |
1791 if (itCqi == m_ueCqi.end ()) |
1011 { |
1792 { |
1012 // create a new entry |
1793 // create a new entry |