summaryrefslogtreecommitdiff
path: root/indra/newview/llvoicewebrtc.cpp
diff options
context:
space:
mode:
authorRoxie Linden <roxie@lindenlab.com>2024-01-29 21:25:13 -0800
committerRoxie Linden <roxie@lindenlab.com>2024-02-08 18:35:21 -0800
commit02423047646cfc7d410e223611033d488f1c26b0 (patch)
treeb92da89347cdb51163cf6660d47ca915646f8a29 /indra/newview/llvoicewebrtc.cpp
parent0e6103e3a943c7f7726a93535048c634eb85eefc (diff)
Treat adhoc/p2p as primary connections
Diffstat (limited to 'indra/newview/llvoicewebrtc.cpp')
-rw-r--r--indra/newview/llvoicewebrtc.cpp20
1 files changed, 10 insertions, 10 deletions
diff --git a/indra/newview/llvoicewebrtc.cpp b/indra/newview/llvoicewebrtc.cpp
index fcdd818757..a5c647c675 100644
--- a/indra/newview/llvoicewebrtc.cpp
+++ b/indra/newview/llvoicewebrtc.cpp
@@ -2853,7 +2853,7 @@ void LLVoiceWebRTCConnection::OnDataReceived(const std::string &data, bool binar
}
new_participant |= joined;
- if (!participant && joined && primary)
+ if (!participant && joined && (primary || !isSpatial()))
{
participant = LLWebRTCVoiceClient::getInstance()->addParticipantByID(mChannelID, agent_id);
}
@@ -2861,19 +2861,19 @@ void LLVoiceWebRTCConnection::OnDataReceived(const std::string &data, bool binar
{
if (voice_data[participant_id].get("l", Json::Value(false)).asBool())
{
- if (agent_id != gAgentID)
- {
+ if (agent_id != gAgentID)
+ {
LLWebRTCVoiceClient::getInstance()->removeParticipantByID(mChannelID, agent_id);
- }
+ }
}
else
{
- F32 level = (F32) (voice_data[participant_id].get("p", Json::Value(participant->mLevel)).asInt()) / 128;
- // convert to decibles
- participant->mLevel = level;
- /* WebRTC appears to have deprecated VAD, but it's still in the Audio Processing Module so maybe we
- can use it at some point when we actually process frames. */
- participant->mIsSpeaking = participant->mLevel > SPEAKING_AUDIO_LEVEL;
+ F32 level = (F32) (voice_data[participant_id].get("p", Json::Value(participant->mLevel)).asInt()) / 128;
+ // convert to decibles
+ participant->mLevel = level;
+ /* WebRTC appears to have deprecated VAD, but it's still in the Audio Processing Module so maybe we
+ can use it at some point when we actually process frames. */
+ participant->mIsSpeaking = participant->mLevel > SPEAKING_AUDIO_LEVEL;
}
}
}