implement RTP audio pacing by timestamp sent by source;

r05a04_dev
Bryan Biedenkapp 3 weeks ago
parent cad8ac7481
commit fa86412a3c

@ -84,6 +84,10 @@ network:
# Flag indicating UDP audio should be RTP framed. # Flag indicating UDP audio should be RTP framed.
udpRTPFrames: true udpRTPFrames: true
# Flag indicating UDP audio RTP timing should be ignored.
# (This allows the sending source to send audio as fast as it wants. This should not be used in combination
# with 'udpFrameTiming', and is intended for diagnostic purposes only.)
udpIgnoreRTPTiming: false
# Flag indicating UDP audio should be encoded using G.711 uLaw. # Flag indicating UDP audio should be encoded using G.711 uLaw.
# NOTE: This flag is only applicable when sending audio via RTP. # NOTE: This flag is only applicable when sending audio via RTP.
udpUseULaw: true udpUseULaw: true

@ -173,6 +173,7 @@ HostBridge::HostBridge(const std::string& confFile) :
m_udpReceivePort(32001), m_udpReceivePort(32001),
m_udpReceiveAddress("127.0.0.1"), m_udpReceiveAddress("127.0.0.1"),
m_udpRTPFrames(false), m_udpRTPFrames(false),
m_udpIgnoreRTPTiming(false),
m_udpUseULaw(false), m_udpUseULaw(false),
m_udpUsrp(false), m_udpUsrp(false),
m_udpFrameTiming(false), m_udpFrameTiming(false),
@ -259,6 +260,8 @@ HostBridge::HostBridge(const std::string& confFile) :
m_ctsCorHoldoffMs(250U), m_ctsCorHoldoffMs(250U),
m_rtpSeqNo(0U), m_rtpSeqNo(0U),
m_rtpTimestamp(INVALID_TS), m_rtpTimestamp(INVALID_TS),
m_udpNetPktSeq(0U),
m_udpNetLastPktSeq(0U),
m_usrpSeqNo(0U) m_usrpSeqNo(0U)
#if defined(_WIN32) #if defined(_WIN32)
, ,
@ -1051,6 +1054,7 @@ bool HostBridge::createNetwork()
} }
m_udpRTPFrames = networkConf["udpRTPFrames"].as<bool>(false); m_udpRTPFrames = networkConf["udpRTPFrames"].as<bool>(false);
m_udpIgnoreRTPTiming = networkConf["udpIgnoreRTPTiming"].as<bool>(false);
if (m_udpRTPFrames) { if (m_udpRTPFrames) {
m_udpUsrp = false; // RTP disabled USRP m_udpUsrp = false; // RTP disabled USRP
m_udpFrameTiming = false; m_udpFrameTiming = false;
@ -1061,6 +1065,9 @@ bool HostBridge::createNetwork()
} }
} }
if (m_udpIgnoreRTPTiming)
::LogWarning(LOG_HOST, "Ignoring RTP timing, audio frames will be processed as they arrive.");
yaml::Node tekConf = networkConf["tek"]; yaml::Node tekConf = networkConf["tek"];
bool tekEnable = tekConf["enable"].as<bool>(false); bool tekEnable = tekConf["enable"].as<bool>(false);
std::string tekAlgo = tekConf["tekAlgo"].as<std::string>(); std::string tekAlgo = tekConf["tekAlgo"].as<std::string>();
@ -1202,6 +1209,7 @@ bool HostBridge::createNetwork()
LogInfo(" UDP Audio RTP Framed: %s", m_udpRTPFrames ? "yes" : "no"); LogInfo(" UDP Audio RTP Framed: %s", m_udpRTPFrames ? "yes" : "no");
if (m_udpRTPFrames) { if (m_udpRTPFrames) {
LogInfo(" UDP Audio Use uLaw Encoding: %s", m_udpUseULaw ? "yes" : "no"); LogInfo(" UDP Audio Use uLaw Encoding: %s", m_udpUseULaw ? "yes" : "no");
LogInfo(" UDP Audio Ignore RTP Timing: %s", m_udpIgnoreRTPTiming ? "yes" : "no");
} }
LogInfo(" UDP Audio USRP: %s", m_udpUsrp ? "yes" : "no"); LogInfo(" UDP Audio USRP: %s", m_udpUsrp ? "yes" : "no");
LogInfo(" UDP Frame Timing: %s", m_udpFrameTiming ? "yes" : "no"); LogInfo(" UDP Frame Timing: %s", m_udpFrameTiming ? "yes" : "no");
@ -1318,9 +1326,10 @@ void HostBridge::processUDPAudio()
pcmLength = AUDIO_SAMPLES_LENGTH_BYTES; pcmLength = AUDIO_SAMPLES_LENGTH_BYTES;
DECLARE_UINT8_ARRAY(pcm, pcmLength); DECLARE_UINT8_ARRAY(pcm, pcmLength);
RTPHeader rtpHeader = RTPHeader();
// are we setup for receiving RTP frames?
if (m_udpRTPFrames) { if (m_udpRTPFrames) {
RTPHeader rtpHeader = RTPHeader();
rtpHeader.decode(buffer); rtpHeader.decode(buffer);
if (rtpHeader.getPayloadType() != RTP_G711_PAYLOAD_TYPE) { if (rtpHeader.getPayloadType() != RTP_G711_PAYLOAD_TYPE) {
@ -1328,6 +1337,35 @@ void HostBridge::processUDPAudio()
return; return;
} }
m_udpNetPktSeq = rtpHeader.getSequence();
if (m_udpNetPktSeq == RTP_END_OF_CALL_SEQ) {
// reset the received sequence back to 0
m_udpNetLastPktSeq = 0U;
}
else {
uint16_t lastRxSeq = m_udpNetLastPktSeq;
if ((m_udpNetPktSeq >= m_udpNetLastPktSeq) || (m_udpNetPktSeq == 0U)) {
// if the sequence isn't 0, and is greater then the last received sequence + 1 frame
// assume a packet was lost
if ((m_udpNetPktSeq != 0U) && m_udpNetPktSeq > m_udpNetLastPktSeq + 1U) {
LogWarning(LOG_NET, "audio possible lost frames; got %u, expected %u",
m_udpNetPktSeq, lastRxSeq);
}
m_udpNetPktSeq = m_udpNetPktSeq;
}
else {
if (m_udpNetPktSeq < m_udpNetPktSeq) {
LogWarning(LOG_NET, "audio out-of-order; got %u, expected %u",
m_udpNetPktSeq, lastRxSeq);
}
}
}
m_udpNetLastPktSeq = m_udpNetPktSeq;
::memcpy(pcm, buffer + RTP_HEADER_LENGTH_BYTES, AUDIO_SAMPLES_LENGTH_BYTES); ::memcpy(pcm, buffer + RTP_HEADER_LENGTH_BYTES, AUDIO_SAMPLES_LENGTH_BYTES);
} else { } else {
if (m_udpUsrp) { if (m_udpUsrp) {
@ -1350,6 +1388,7 @@ void HostBridge::processUDPAudio()
::memset(req->pcm, 0x00U, pcmLength); ::memset(req->pcm, 0x00U, pcmLength);
::memcpy(req->pcm, pcm, pcmLength); ::memcpy(req->pcm, pcm, pcmLength);
req->rtpHeader = rtpHeader;
req->pcmLength = pcmLength; req->pcmLength = pcmLength;
if (m_udpMetadata) { if (m_udpMetadata) {
@ -1400,6 +1439,8 @@ void HostBridge::writeUDPAudio(uint32_t srcId, uint32_t dstId, uint8_t* pcm, uin
} }
m_rtpSeqNo++; m_rtpSeqNo++;
if (m_rtpSeqNo == RTP_END_OF_CALL_SEQ)
m_rtpSeqNo = 0U;
} }
else { else {
// are we sending USRP formatted audio frames? // are we sending USRP formatted audio frames?
@ -1602,6 +1643,7 @@ void HostBridge::processDMRNetwork(uint8_t* buffer, uint32_t length)
m_callAlgoId = lc.getAlgId(); m_callAlgoId = lc.getAlgId();
} }
// process call termination
if (dataSync && (dataType == DataType::TERMINATOR_WITH_LC)) { if (dataSync && (dataType == DataType::TERMINATOR_WITH_LC)) {
m_callInProgress = false; m_callInProgress = false;
m_ignoreCall = false; m_ignoreCall = false;
@ -1652,6 +1694,7 @@ void HostBridge::processDMRNetwork(uint8_t* buffer, uint32_t length)
return; return;
} }
// process audio frames
if (dataType == DataType::VOICE_SYNC || dataType == DataType::VOICE) { if (dataType == DataType::VOICE_SYNC || dataType == DataType::VOICE) {
uint8_t ambe[27U]; uint8_t ambe[27U];
::memcpy(ambe, data.get(), 14U); ::memcpy(ambe, data.get(), 14U);
@ -2024,6 +2067,7 @@ void HostBridge::processP25Network(uint8_t* buffer, uint32_t length)
generatePreambleTone(); generatePreambleTone();
} }
// process call termination
if ((duid == DUID::TDU) || (duid == DUID::TDULC)) { if ((duid == DUID::TDU) || (duid == DUID::TDULC)) {
m_callInProgress = false; m_callInProgress = false;
m_ignoreCall = false; m_ignoreCall = false;
@ -2073,6 +2117,7 @@ void HostBridge::processP25Network(uint8_t* buffer, uint32_t length)
return; return;
} }
// unsupported change of encryption parameters during call
if (m_callAlgoId != ALGO_UNENCRYPT && m_callAlgoId != m_tekAlgoId && callKID != m_tekKeyId) { if (m_callAlgoId != ALGO_UNENCRYPT && m_callAlgoId != m_tekAlgoId && callKID != m_tekKeyId) {
if (m_callInProgress) { if (m_callInProgress) {
m_callInProgress = false; m_callInProgress = false;
@ -2593,6 +2638,7 @@ void HostBridge::processAnalogNetwork(uint8_t* buffer, uint32_t length)
generatePreambleTone(); generatePreambleTone();
} }
// process call termination
if (frameType == AudioFrameType::TERMINATOR) { if (frameType == AudioFrameType::TERMINATOR) {
m_callInProgress = false; m_callInProgress = false;
m_ignoreCall = false; m_ignoreCall = false;
@ -2619,6 +2665,7 @@ void HostBridge::processAnalogNetwork(uint8_t* buffer, uint32_t length)
if (m_ignoreCall) if (m_ignoreCall)
return; return;
// decode audio frames
if (frameType == AudioFrameType::VOICE_START || frameType == AudioFrameType::VOICE) { if (frameType == AudioFrameType::VOICE_START || frameType == AudioFrameType::VOICE) {
LogInfoEx(LOG_NET, ANO_VOICE ", audio, srcId = %u, dstId = %u, seqNo = %u", srcId, dstId, analogData.getSeqNo()); LogInfoEx(LOG_NET, ANO_VOICE ", audio, srcId = %u, dstId = %u, seqNo = %u", srcId, dstId, analogData.getSeqNo());
@ -2787,7 +2834,7 @@ uint8_t* HostBridge::generateRTPHeaders(uint8_t msgLen, uint16_t& rtpSeq)
{ {
uint32_t timestamp = m_rtpTimestamp; uint32_t timestamp = m_rtpTimestamp;
if (timestamp != INVALID_TS) { if (timestamp != INVALID_TS) {
timestamp += (RTP_GENERIC_CLOCK_RATE / 50); timestamp += (RTP_GENERIC_CLOCK_RATE / AUDIO_SAMPLES_LENGTH);
if (m_debug) if (m_debug)
LogDebugEx(LOG_NET, "HostBridge::generateRTPHeaders()", "RTP, previous TS = %u, TS = %u, rtpSeq = %u", m_rtpTimestamp, timestamp, rtpSeq); LogDebugEx(LOG_NET, "HostBridge::generateRTPHeaders()", "RTP, previous TS = %u, TS = %u, rtpSeq = %u", m_rtpTimestamp, timestamp, rtpSeq);
m_rtpTimestamp = timestamp; m_rtpTimestamp = timestamp;
@ -3378,17 +3425,45 @@ void* HostBridge::threadUDPAudioProcess(void* arg)
} }
} }
uint16_t pktSeq = 0U;
if (bridge->m_udpRTPFrames) {
pktSeq = req->rtpHeader.getSequence();
// are we timing based on RTP timestamps?
if (!bridge->m_udpIgnoreRTPTiming) {
if (lastFrameTime == 0U)
lastFrameTime = req->rtpHeader.getTimestamp();
else {
if (lastFrameTime + (RTP_GENERIC_CLOCK_RATE / AUDIO_SAMPLES_LENGTH) >= req->rtpHeader.getTimestamp()) {
// already time to send next frame
}
else {
if (bridge->m_debug)
LogDebugEx(LOG_HOST, "HostBridge::threadUDPAudioProcess()", "RTP frame timing, delaying packet, now = %llu, lastUdpFrameTime = %llu, pktSeq = %u",
now, lastFrameTime, pktSeq);
continue;
}
}
lastFrameTime = now;
}
}
if (bridge->m_debug) if (bridge->m_debug)
LogDebugEx(LOG_HOST, "HostBridge::threadUDPAudioProcess()", "now = %llu, lastUdpFrameTime = %llu, audioDetect = %u, callInProgress = %u, p25N = %u, dmrN = %u, analogN = %u, frameCnt = %u", LogDebugEx(LOG_HOST, "HostBridge::threadUDPAudioProcess()", "now = %llu, lastUdpFrameTime = %llu, audioDetect = %u, callInProgress = %u, p25N = %u, dmrN = %u, analogN = %u, frameCnt = %u, pktSeq = %u",
now, lastFrameTime, bridge->m_audioDetect, bridge->m_callInProgress, bridge->m_p25N, bridge->m_dmrN, bridge->m_analogN, bridge->m_udpFrameCnt); now, lastFrameTime, bridge->m_audioDetect, bridge->m_callInProgress, bridge->m_p25N, bridge->m_dmrN, bridge->m_analogN, bridge->m_udpFrameCnt, pktSeq);
bridge->m_udpPackets.pop_front(); bridge->m_udpPackets.pop_front();
bridge->m_udpDropTime.start(); bridge->m_udpDropTime.start();
frameTimeout.start(); frameTimeout.start();
// handle source ID management
bool forceCallStart = false; bool forceCallStart = false;
uint32_t txStreamId = bridge->m_txStreamId; uint32_t txStreamId = bridge->m_txStreamId;
// determine source ID to use for this UDP audio frame
if (bridge->m_udpMetadata) { if (bridge->m_udpMetadata) {
// use source ID from UDP metadata if available and override is enabled
if (bridge->m_overrideSrcIdFromUDP) { if (bridge->m_overrideSrcIdFromUDP) {
if (req->srcId != 0U && bridge->m_udpSrcId != 0U) { if (req->srcId != 0U && bridge->m_udpSrcId != 0U) {
// if the UDP source ID now doesn't match the current call ID, reset call states // if the UDP source ID now doesn't match the current call ID, reset call states
@ -3460,6 +3535,7 @@ void* HostBridge::threadUDPAudioProcess(void* arg)
bridge->m_udpDropTime.start(); bridge->m_udpDropTime.start();
} }
// process the received audio frame
std::lock_guard<std::mutex> lock(s_audioMutex); std::lock_guard<std::mutex> lock(s_audioMutex);
uint8_t pcm[AUDIO_SAMPLES_LENGTH_BYTES]; uint8_t pcm[AUDIO_SAMPLES_LENGTH_BYTES];
::memset(pcm, 0x00U, AUDIO_SAMPLES_LENGTH_BYTES); ::memset(pcm, 0x00U, AUDIO_SAMPLES_LENGTH_BYTES);

@ -23,6 +23,7 @@
#include "common/dmr/lc/PrivacyLC.h" #include "common/dmr/lc/PrivacyLC.h"
#include "common/p25/Crypto.h" #include "common/p25/Crypto.h"
#include "common/network/udp/Socket.h" #include "common/network/udp/Socket.h"
#include "common/network/RTPHeader.h"
#include "common/yaml/Yaml.h" #include "common/yaml/Yaml.h"
#include "common/RingBuffer.h" #include "common/RingBuffer.h"
#include "common/Timer.h" #include "common/Timer.h"
@ -105,11 +106,13 @@ void mdcPacketDetected(int frameCount, mdc_u8_t op, mdc_u8_t arg, mdc_u16_t unit
* @ingroup bridge * @ingroup bridge
*/ */
struct NetPacketRequest { struct NetPacketRequest {
uint32_t srcId; //!< Source Address uint32_t srcId; //!< Source Address
uint32_t dstId; //!< Destination Address uint32_t dstId; //!< Destination Address
int pcmLength = 0U; //!< Length of PCM data buffer network::frame::RTPHeader rtpHeader; //!< RTP Header
uint8_t* pcm = nullptr; //!< Raw PCM buffer
int pcmLength = 0U; //!< Length of PCM data buffer
uint8_t* pcm = nullptr; //!< Raw PCM buffer
}; };
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -157,6 +160,7 @@ private:
std::string m_udpReceiveAddress; std::string m_udpReceiveAddress;
bool m_udpRTPFrames; bool m_udpRTPFrames;
bool m_udpIgnoreRTPTiming;
bool m_udpUseULaw; bool m_udpUseULaw;
bool m_udpUsrp; bool m_udpUsrp;
bool m_udpFrameTiming; bool m_udpFrameTiming;
@ -275,6 +279,9 @@ private:
uint16_t m_rtpSeqNo; uint16_t m_rtpSeqNo;
uint32_t m_rtpTimestamp; uint32_t m_rtpTimestamp;
uint16_t m_udpNetPktSeq;
uint16_t m_udpNetLastPktSeq;
uint32_t m_usrpSeqNo; uint32_t m_usrpSeqNo;
static std::mutex s_audioMutex; static std::mutex s_audioMutex;

Loading…
Cancel
Save

Powered by TurnKey Linux.