summaryrefslogtreecommitdiff
path: root/indra/newview/llvoicewebrtc.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'indra/newview/llvoicewebrtc.cpp')
-rw-r--r--indra/newview/llvoicewebrtc.cpp301
1 files changed, 229 insertions, 72 deletions
diff --git a/indra/newview/llvoicewebrtc.cpp b/indra/newview/llvoicewebrtc.cpp
index facda1d876..447f1b652a 100644
--- a/indra/newview/llvoicewebrtc.cpp
+++ b/indra/newview/llvoicewebrtc.cpp
@@ -52,6 +52,7 @@
#include "llcachename.h"
#include "llimview.h" // for LLIMMgr
#include "llworld.h"
+#include "llviewerregion.h"
#include "llparcel.h"
#include "llviewerparcelmgr.h"
#include "llfirstuse.h"
@@ -81,9 +82,15 @@ const std::string WEBRTC_VOICE_SERVER_TYPE = "webrtc";
namespace {
- const F32 MAX_AUDIO_DIST = 50.0f;
- const F32 VOLUME_SCALE_WEBRTC = 0.01f;
- const F32 LEVEL_SCALE_WEBRTC = 0.008f;
+ const F32 MAX_AUDIO_DIST = 50.0f;
+ const F32 VOLUME_SCALE_WEBRTC = 0.01f;
+ const F32 TUNING_LEVEL_SCALE = 0.01f;
+ const F32 TUNING_LEVEL_START_POINT = 0.8f;
+ const F32 LEVEL_SCALE = 0.005f;
+ const F32 LEVEL_START_POINT = 0.18f;
+ const uint32_t SET_HIDDEN_RESTORE_DELAY_MS = 200; // 200 ms to unmute again after hiding during teleport
+ const uint32_t MUTE_FADE_DELAY_MS = 500; // 20ms fade followed by 480ms silence gets rid of the click just after unmuting.
+ // This is because the buffers and processing is cleared by the silence.
const F32 SPEAKING_AUDIO_LEVEL = 0.30;
@@ -200,7 +207,6 @@ bool LLWebRTCVoiceClient::sShuttingDown = false;
LLWebRTCVoiceClient::LLWebRTCVoiceClient() :
mHidden(false),
- mTuningMode(false),
mTuningMicGain(0.0),
mTuningSpeakerVolume(50), // Set to 50 so the user can hear themselves when he sets his mic volume
mDevicesListUpdated(false),
@@ -267,6 +273,11 @@ void LLWebRTCVoiceClient::cleanupSingleton()
void LLWebRTCVoiceClient::init(LLPumpIO* pump)
{
// constructor will set up LLVoiceClient::getInstance()
+ initWebRTC();
+}
+
+void LLWebRTCVoiceClient::initWebRTC()
+{
llwebrtc::init(this);
mWebRTCDeviceInterface = llwebrtc::getDeviceInterface();
@@ -282,10 +293,13 @@ void LLWebRTCVoiceClient::terminate()
return;
}
+ LL_INFOS("Voice") << "Terminating WebRTC" << LL_ENDL;
+
mVoiceEnabled = false;
+ sShuttingDown = true; // so that coroutines won't post more work.
llwebrtc::terminate();
- sShuttingDown = true;
+ mWebRTCDeviceInterface = nullptr;
}
//---------------------------------------------------
@@ -336,35 +350,57 @@ void LLWebRTCVoiceClient::updateSettings()
LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE;
setVoiceEnabled(LLVoiceClient::getInstance()->voiceEnabled());
- static LLCachedControl<S32> sVoiceEarLocation(gSavedSettings, "VoiceEarLocation");
- setEarLocation(sVoiceEarLocation);
-
- static LLCachedControl<std::string> sInputDevice(gSavedSettings, "VoiceInputAudioDevice");
- setCaptureDevice(sInputDevice);
+ if (mVoiceEnabled)
+ {
+ static LLCachedControl<S32> sVoiceEarLocation(gSavedSettings, "VoiceEarLocation");
+ setEarLocation(sVoiceEarLocation);
- static LLCachedControl<std::string> sOutputDevice(gSavedSettings, "VoiceOutputAudioDevice");
- setRenderDevice(sOutputDevice);
+ static LLCachedControl<std::string> sInputDevice(gSavedSettings, "VoiceInputAudioDevice");
+ setCaptureDevice(sInputDevice);
- LL_INFOS("Voice") << "Input device: " << std::quoted(sInputDevice()) << ", output device: " << std::quoted(sOutputDevice()) << LL_ENDL;
+ static LLCachedControl<std::string> sOutputDevice(gSavedSettings, "VoiceOutputAudioDevice");
+ setRenderDevice(sOutputDevice);
- static LLCachedControl<F32> sMicLevel(gSavedSettings, "AudioLevelMic");
- setMicGain(sMicLevel);
+ LL_INFOS("Voice") << "Input device: " << std::quoted(sInputDevice()) << ", output device: " << std::quoted(sOutputDevice())
+ << LL_ENDL;
- llwebrtc::LLWebRTCDeviceInterface::AudioConfig config;
+ static LLCachedControl<F32> sMicLevel(gSavedSettings, "AudioLevelMic");
+ setMicGain(sMicLevel);
- static LLCachedControl<bool> sEchoCancellation(gSavedSettings, "VoiceEchoCancellation", true);
- config.mEchoCancellation = sEchoCancellation;
+ llwebrtc::LLWebRTCDeviceInterface::AudioConfig config;
- static LLCachedControl<bool> sAGC(gSavedSettings, "VoiceAutomaticGainControl", true);
- config.mAGC = sAGC;
+ bool audioConfigChanged = false;
- static LLCachedControl<U32> sNoiseSuppressionLevel(gSavedSettings,
- "VoiceNoiseSuppressionLevel",
- llwebrtc::LLWebRTCDeviceInterface::AudioConfig::ENoiseSuppressionLevel::NOISE_SUPPRESSION_LEVEL_VERY_HIGH);
- config.mNoiseSuppressionLevel = (llwebrtc::LLWebRTCDeviceInterface::AudioConfig::ENoiseSuppressionLevel) (U32)sNoiseSuppressionLevel;
+ static LLCachedControl<bool> sEchoCancellation(gSavedSettings, "VoiceEchoCancellation", true);
+ if (sEchoCancellation != config.mEchoCancellation)
+ {
+ config.mEchoCancellation = sEchoCancellation;
+ audioConfigChanged = true;
+ }
- mWebRTCDeviceInterface->setAudioConfig(config);
+ static LLCachedControl<bool> sAGC(gSavedSettings, "VoiceAutomaticGainControl", true);
+ if (sAGC != config.mAGC)
+ {
+ config.mAGC = sAGC;
+ audioConfigChanged = true;
+ }
+ static LLCachedControl<U32> sNoiseSuppressionLevel(
+ gSavedSettings,
+ "VoiceNoiseSuppressionLevel",
+ llwebrtc::LLWebRTCDeviceInterface::AudioConfig::ENoiseSuppressionLevel::NOISE_SUPPRESSION_LEVEL_VERY_HIGH);
+ auto noiseSuppressionLevel =
+ (llwebrtc::LLWebRTCDeviceInterface::AudioConfig::ENoiseSuppressionLevel)(U32)sNoiseSuppressionLevel;
+ if (noiseSuppressionLevel != config.mNoiseSuppressionLevel)
+ {
+ config.mNoiseSuppressionLevel = noiseSuppressionLevel;
+ audioConfigChanged = true;
+ }
+ if (audioConfigChanged && mWebRTCDeviceInterface)
+ {
+ mWebRTCDeviceInterface->setAudioConfig(config);
+ }
+ }
}
// Observers
@@ -661,7 +697,10 @@ LLVoiceDeviceList& LLWebRTCVoiceClient::getCaptureDevices()
void LLWebRTCVoiceClient::setCaptureDevice(const std::string& name)
{
- mWebRTCDeviceInterface->setCaptureDevice(name);
+ if (mWebRTCDeviceInterface)
+ {
+ mWebRTCDeviceInterface->setCaptureDevice(name);
+ }
}
void LLWebRTCVoiceClient::setDevicesListUpdated(bool state)
{
@@ -692,21 +731,38 @@ void LLWebRTCVoiceClient::OnDevicesChangedImpl(const llwebrtc::LLWebRTCVoiceDevi
std::string outputDevice = gSavedSettings.getString("VoiceOutputAudioDevice");
LL_DEBUGS("Voice") << "Setting devices to-input: '" << inputDevice << "' output: '" << outputDevice << "'" << LL_ENDL;
- clearRenderDevices();
- for (auto &device : render_devices)
+
+ // only set the render device if the device list has changed.
+ if (mRenderDevices.size() != render_devices.size() || !std::equal(mRenderDevices.begin(),
+ mRenderDevices.end(),
+ render_devices.begin(),
+ [](const LLVoiceDevice& a, const llwebrtc::LLWebRTCVoiceDevice& b) {
+ return a.display_name == b.mDisplayName && a.full_name == b.mID; }))
{
- addRenderDevice(LLVoiceDevice(device.mDisplayName, device.mID));
+ clearRenderDevices();
+ for (auto& device : render_devices)
+ {
+ addRenderDevice(LLVoiceDevice(device.mDisplayName, device.mID));
+ }
+ setRenderDevice(outputDevice);
}
- setRenderDevice(outputDevice);
- clearCaptureDevices();
- for (auto &device : capture_devices)
+ // only set the capture device if the device list has changed.
+ if (mCaptureDevices.size() != capture_devices.size() ||!std::equal(mCaptureDevices.begin(),
+ mCaptureDevices.end(),
+ capture_devices.begin(),
+ [](const LLVoiceDevice& a, const llwebrtc::LLWebRTCVoiceDevice& b)
+ { return a.display_name == b.mDisplayName && a.full_name == b.mID; }))
{
- LL_DEBUGS("Voice") << "Checking capture device:'" << device.mID << "'" << LL_ENDL;
+ clearCaptureDevices();
+ for (auto& device : capture_devices)
+ {
+ LL_DEBUGS("Voice") << "Checking capture device:'" << device.mID << "'" << LL_ENDL;
- addCaptureDevice(LLVoiceDevice(device.mDisplayName, device.mID));
+ addCaptureDevice(LLVoiceDevice(device.mDisplayName, device.mID));
+ }
+ setCaptureDevice(inputDevice);
}
- setCaptureDevice(inputDevice);
setDevicesListUpdated(true);
}
@@ -731,14 +787,20 @@ LLVoiceDeviceList& LLWebRTCVoiceClient::getRenderDevices()
void LLWebRTCVoiceClient::setRenderDevice(const std::string& name)
{
- mWebRTCDeviceInterface->setRenderDevice(name);
+ if (mWebRTCDeviceInterface)
+ {
+ mWebRTCDeviceInterface->setRenderDevice(name);
+ }
}
void LLWebRTCVoiceClient::tuningStart()
{
if (!mIsInTuningMode)
{
- mWebRTCDeviceInterface->setTuningMode(true);
+ if (mWebRTCDeviceInterface)
+ {
+ mWebRTCDeviceInterface->setTuningMode(true);
+ }
mIsInTuningMode = true;
}
}
@@ -747,7 +809,10 @@ void LLWebRTCVoiceClient::tuningStop()
{
if (mIsInTuningMode)
{
- mWebRTCDeviceInterface->setTuningMode(false);
+ if (mWebRTCDeviceInterface)
+ {
+ mWebRTCDeviceInterface->setTuningMode(false);
+ }
mIsInTuningMode = false;
}
}
@@ -759,7 +824,14 @@ bool LLWebRTCVoiceClient::inTuningMode()
void LLWebRTCVoiceClient::tuningSetMicVolume(float volume)
{
- mTuningMicGain = volume;
+ if (volume != mTuningMicGain)
+ {
+ mTuningMicGain = volume;
+ if (mWebRTCDeviceInterface)
+ {
+ mWebRTCDeviceInterface->setTuningMicGain(volume);
+ }
+ }
}
void LLWebRTCVoiceClient::tuningSetSpeakerVolume(float volume)
@@ -771,21 +843,14 @@ void LLWebRTCVoiceClient::tuningSetSpeakerVolume(float volume)
}
}
-float LLWebRTCVoiceClient::getAudioLevel()
+float LLWebRTCVoiceClient::tuningGetEnergy(void)
{
- if (mIsInTuningMode)
+ if (!mWebRTCDeviceInterface)
{
- return (1.0f - mWebRTCDeviceInterface->getTuningAudioLevel() * LEVEL_SCALE_WEBRTC) * mTuningMicGain / 2.1f;
- }
- else
- {
- return (1.0f - mWebRTCDeviceInterface->getPeerConnectionAudioLevel() * LEVEL_SCALE_WEBRTC) * mMicGain / 2.1f;
+ return 0.f;
}
-}
-
-float LLWebRTCVoiceClient::tuningGetEnergy(void)
-{
- return getAudioLevel();
+ float rms = mWebRTCDeviceInterface->getTuningAudioLevel();
+ return TUNING_LEVEL_START_POINT - TUNING_LEVEL_SCALE * rms;
}
bool LLWebRTCVoiceClient::deviceSettingsAvailable()
@@ -811,7 +876,10 @@ void LLWebRTCVoiceClient::refreshDeviceLists(bool clearCurrentList)
clearCaptureDevices();
clearRenderDevices();
}
- mWebRTCDeviceInterface->refreshDevices();
+ if (mWebRTCDeviceInterface)
+ {
+ mWebRTCDeviceInterface->refreshDevices();
+ }
}
@@ -821,6 +889,11 @@ void LLWebRTCVoiceClient::setHidden(bool hidden)
if (inSpatialChannel())
{
+ if (mWebRTCDeviceInterface)
+ {
+ mWebRTCDeviceInterface->setMute(mHidden || mMuteMic,
+ mHidden ? 0 : SET_HIDDEN_RESTORE_DELAY_MS); // delay 200ms so as to not pile up mutes/unmutes.
+ }
if (mHidden)
{
// get out of the channel entirely
@@ -987,7 +1060,6 @@ void LLWebRTCVoiceClient::updatePosition(void)
{
if (participant->mRegion != region->getRegionID()) {
participant->mRegion = region->getRegionID();
- setMuteMic(mMuteMic);
}
}
}
@@ -1112,13 +1184,14 @@ void LLWebRTCVoiceClient::sendPositionUpdate(bool force)
// Update our own volume on our participant, so it'll show up
// in the UI. This is done on all sessions, so switching
// sessions retains consistent volume levels.
-void LLWebRTCVoiceClient::updateOwnVolume() {
- F32 audio_level = 0.0;
- if (!mMuteMic && !mTuningMode)
+void LLWebRTCVoiceClient::updateOwnVolume()
+{
+ F32 audio_level = 0.0f;
+ if (!mMuteMic && mWebRTCDeviceInterface)
{
- audio_level = getAudioLevel();
+ float rms = mWebRTCDeviceInterface->getPeerConnectionAudioLevel();
+ audio_level = LEVEL_START_POINT - LEVEL_SCALE * rms;
}
-
sessionState::for_each(boost::bind(predUpdateOwnVolume, _1, audio_level));
}
@@ -1515,6 +1588,17 @@ void LLWebRTCVoiceClient::setMuteMic(bool muted)
}
mMuteMic = muted;
+
+ if (mIsInTuningMode)
+ {
+ return;
+ }
+
+ if (mWebRTCDeviceInterface)
+ {
+ mWebRTCDeviceInterface->setMute(muted, muted ? MUTE_FADE_DELAY_MS : 0); // delay for 40ms on mute to allow buffers to empty
+ }
+
// when you're hidden, your mic is always muted.
if (!mHidden)
{
@@ -1553,7 +1637,10 @@ void LLWebRTCVoiceClient::setMicGain(F32 gain)
if (gain != mMicGain)
{
mMicGain = gain;
- mWebRTCDeviceInterface->setPeerConnectionGain(gain);
+ if (mWebRTCDeviceInterface)
+ {
+ mWebRTCDeviceInterface->setMicGain(gain);
+ }
}
}
@@ -1737,6 +1824,15 @@ void LLWebRTCVoiceClient::onChangeDetailed(const LLMute& mute)
}
}
+void LLWebRTCVoiceClient::userAuthorized(const std::string& user_id, const LLUUID& agentID)
+{
+ if (sShuttingDown)
+ {
+ sShuttingDown = false; // was terminated, restart
+ initWebRTC();
+ }
+}
+
void LLWebRTCVoiceClient::predSetUserMute(const LLWebRTCVoiceClient::sessionStatePtr_t &session, const LLUUID &id, bool mute)
{
session->setUserMute(id, mute);
@@ -2002,6 +2098,33 @@ bool LLWebRTCVoiceClient::sessionState::processConnectionStates()
return !mWebRTCConnections.empty();
}
+// Helper function to check if a region supports WebRTC voice
+bool LLWebRTCVoiceClient::estateSessionState::isRegionWebRTCEnabled(const LLUUID& regionID)
+{
+ LLViewerRegion* region = LLWorld::getInstance()->getRegionFromID(regionID);
+ if (!region)
+ {
+ LL_WARNS("Voice") << "Could not find region " << regionID
+ << " for voice server type validation" << LL_ENDL;
+ return false;
+ }
+
+ LLSD simulatorFeatures;
+ region->getSimulatorFeatures(simulatorFeatures);
+
+ bool isWebRTCEnabled = simulatorFeatures.has("VoiceServerType") &&
+ simulatorFeatures["VoiceServerType"].asString() == "webrtc";
+
+ if (!isWebRTCEnabled)
+ {
+ LL_DEBUGS("Voice") << "Region " << regionID << " VoiceServerType is not 'webrtc' (got: "
+ << (simulatorFeatures.has("VoiceServerType") ? simulatorFeatures["VoiceServerType"].asString() : "none") << ")"
+ << LL_ENDL;
+ }
+
+ return isWebRTCEnabled;
+}
+
// processing of spatial voice connection states requires special handling.
// as neighboring regions need to be started up or shut down depending
// on our location.
@@ -2026,6 +2149,13 @@ bool LLWebRTCVoiceClient::estateSessionState::processConnectionStates()
// shut down connections to neighbors that are too far away.
spatialConnection.get()->shutDown();
}
+ else if (!isRegionWebRTCEnabled(regionID))
+ {
+ // shut down connections to neighbors that no longer support WebRTC voice.
+ LL_DEBUGS("Voice") << "Shutting down connection to neighbor region " << regionID
+ << " - no longer supports WebRTC voice" << LL_ENDL;
+ spatialConnection.get()->shutDown();
+ }
if (!spatialConnection.get()->isShuttingDown())
{
neighbor_ids.erase(regionID);
@@ -2035,11 +2165,20 @@ bool LLWebRTCVoiceClient::estateSessionState::processConnectionStates()
// add new connections for new neighbors
for (auto &neighbor : neighbor_ids)
{
- connectionPtr_t connection(new LLVoiceWebRTCSpatialConnection(neighbor, INVALID_PARCEL_ID, mChannelID));
+ // Only connect if the region supports WebRTC voice server type
+ if (isRegionWebRTCEnabled(neighbor))
+ {
+ connectionPtr_t connection(new LLVoiceWebRTCSpatialConnection(neighbor, INVALID_PARCEL_ID, mChannelID));
- mWebRTCConnections.push_back(connection);
- connection->setMuteMic(mMuted);
- connection->setSpeakerVolume(mSpeakerVolume);
+ mWebRTCConnections.push_back(connection);
+ connection->setMuteMic(mMuted); // mute will be set for primary connection when that connection comes up
+ connection->setSpeakerVolume(mSpeakerVolume);
+ }
+ else
+ {
+ LL_DEBUGS("Voice") << "Skipping neighbor region " << neighbor
+ << " - does not support WebRTC voice" << LL_ENDL;
+ }
}
}
return LLWebRTCVoiceClient::sessionState::processConnectionStates();
@@ -2262,7 +2401,6 @@ void LLVoiceWebRTCConnection::processIceUpdatesCoro(connectionPtr_t connection)
return;
}
- bool iceCompleted = false;
LLSD body;
if (!connection->mIceCandidates.empty() || connection->mIceCompleted)
{
@@ -2301,7 +2439,6 @@ void LLVoiceWebRTCConnection::processIceUpdatesCoro(connectionPtr_t connection)
LLSD body_candidate;
body_candidate["completed"] = true;
body["candidate"] = body_candidate;
- iceCompleted = connection->mIceCompleted;
connection->mIceCompleted = false;
}
@@ -2389,6 +2526,7 @@ void LLVoiceWebRTCConnection::OnAudioEstablished(llwebrtc::LLWebRTCAudioInterfac
}
LL_DEBUGS("Voice") << "On AudioEstablished." << LL_ENDL;
mWebRTCAudioInterface = audio_interface;
+ mWebRTCAudioInterface->setMute(true); // mute will be set appropriately later when we finish setting up.
setVoiceConnectionState(VOICE_STATE_SESSION_ESTABLISHED);
});
}
@@ -2549,6 +2687,11 @@ void LLVoiceWebRTCConnection::breakVoiceConnectionCoro(connectionPtr_t connectio
void LLVoiceWebRTCSpatialConnection::requestVoiceConnection()
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE;
+ if (LLWebRTCVoiceClient::isShuttingDown())
+ {
+ mOutstandingRequests--;
+ return;
+ }
LLViewerRegion *regionp = LLWorld::instance().getRegionFromID(mRegionID);
@@ -2748,7 +2891,12 @@ bool LLVoiceWebRTCConnection::connectionStateMachine()
}
// update the peer connection with the various characteristics of
// this connection.
- mWebRTCAudioInterface->setMute(mMuted);
+ // For spatial this connection will come up as muted, but will be set to the appropriate
+ // value later on when we determine the regions we connect to.
+ if (!isSpatial())
+ {
+ mWebRTCAudioInterface->setMute(mMuted);
+ }
mWebRTCAudioInterface->setReceiveVolume(mSpeakerVolume);
LLWebRTCVoiceClient::getInstance()->OnConnectionEstablished(mChannelID, mRegionID);
setVoiceConnectionState(VOICE_STATE_WAIT_FOR_DATA_CHANNEL);
@@ -2793,6 +2941,10 @@ bool LLVoiceWebRTCConnection::connectionStateMachine()
if (primary != mPrimary)
{
mPrimary = primary;
+ if (mWebRTCAudioInterface)
+ {
+ mWebRTCAudioInterface->setMute(mMuted || !mPrimary);
+ }
sendJoin();
}
}
@@ -2846,9 +2998,13 @@ bool LLVoiceWebRTCConnection::connectionStateMachine()
}
// else was already posted by llwebrtc::terminate().
break;
+ }
+
case VOICE_STATE_WAIT_FOR_CLOSE:
break;
+
case VOICE_STATE_CLOSED:
+ {
if (!mShutDown)
{
mVoiceConnectionState = VOICE_STATE_START_SESSION;
@@ -2924,7 +3080,6 @@ void LLVoiceWebRTCConnection::OnDataReceivedImpl(const std::string &data, bool b
return;
}
boost::json::object voice_data = voice_data_parsed.as_object();
- bool new_participant = false;
boost::json::object mute;
boost::json::object user_gain;
for (auto &participant_elem : voice_data)
@@ -2977,7 +3132,6 @@ void LLVoiceWebRTCConnection::OnDataReceivedImpl(const std::string &data, bool b
}
}
- new_participant |= joined;
if (!participant && joined && (primary || !isSpatial()))
{
participant = LLWebRTCVoiceClient::getInstance()->addParticipantByID(mChannelID, agent_id, mRegionID);
@@ -3095,10 +3249,7 @@ LLVoiceWebRTCSpatialConnection::LLVoiceWebRTCSpatialConnection(const LLUUID &reg
LLVoiceWebRTCConnection(regionID, channelID),
mParcelLocalID(parcelLocalID)
{
- if (gAgent.getRegion())
- {
- mPrimary = (regionID == gAgent.getRegion()->getRegionID());
- }
+ mPrimary = false; // will be set to primary after connection established
}
LLVoiceWebRTCSpatialConnection::~LLVoiceWebRTCSpatialConnection()
@@ -3148,6 +3299,12 @@ void LLVoiceWebRTCAdHocConnection::requestVoiceConnection()
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE;
+ if (LLWebRTCVoiceClient::isShuttingDown())
+ {
+ mOutstandingRequests--;
+ return;
+ }
+
LLViewerRegion *regionp = LLWorld::instance().getRegionFromID(mRegionID);
LL_DEBUGS("Voice") << "Requesting voice connection." << LL_ENDL;