From a6d4c1d394eef2cea41f6c6bcd751fec746ec17d Mon Sep 17 00:00:00 2001 From: Roxanne Skelly Date: Fri, 12 Sep 2025 17:07:51 -0700 Subject: [WebRTC] Rework device handling sequence so that we can handle unplugging/re-plugging devices (#4593) * [WebRTC] Rework device handling sequence so that we can handle unplugging/re-plugging devices The device handling was not processing device updates in the proper sequence as things like AEC use both input and output devices. Devices like headsets are both so unplugging them resulted in various mute conditions and sometimes even a crash. Now, we update both capture and render devices at once in the proper sequence. Test Guidance: * Bring two users in the same place in webrtc regions. * The 'listening' one should have a headset or something set oas 'Default' * Press 'talk' on one, and verify the other can hear. * Unplug the headset from the listening one. * Validate that audio changes from the headset to the speakers. * Plug the headset back in. * Validate that audio changes from speakers to headset. * Do the same type of test with the headset viewer talking. * The microphone used should switch from the headset to the computer (it should have one) Do other various device tests, such as setting devices explicitly, messing with the device selector, etc. * Fix race condition when multiple change device requests might come in at once * Update to m137 The primary feature of this commit is to update libwebrtc from m114 to m137. This is needed to make webrtc buildable, as m114 is not buildable by the current toolset. m137 had some changes to the API, which required renaming or changing namespace of some of the calls. Additionally, this PR moves from a callback mechanism for gathering the energy levels for tuning to a wrapper AudioDeviceModule, which gives us more control over the audio stream. Finally, the new m137-based webrtc has been updated to allow for 192khz audio streams. * Properly pass the observer setting into the inner audio device module * Update to m137 and get rid of some noise This change updates to m137 from m114, which required a few API changes. Additionally, this fixes the hiss that happens shortly after someone unmutes: https://github.com/secondlife/server/issues/2094 There was also an issue with a slight amount of repeated after unmuting if there was audio right before unmuting. This is because the audio processing and buffering still had audio from the previous speaking session. Now, we inject nearly a half second of silence into the audio buffers/processor after unmuting to flush things. * Install nsis on windows * Use the newer digital AGC pipeline m137 improved the AGC pipeline and the existing analog style is going away so move to the new digital pipeline. Also, some tweaking for audio levels so that we don't see inworld bars when tuning, so one's own bars seem a reasonable size, etc. * Install NSIS during windows sisgning and package build step * Try pinning the packaging to windows 2022 to deal with missing nsis * Adjust gain calculation and audio level calculations for tuning and peer connections * Update with mac universal webrtc build * Tuning of voice indicators for both tuning mode and inworld for self. * Redo device deployment to handle cases where multiple deploy requests pile up Also, mute when leaving webrtc-enabled regions or parcels, and unmute when voice comes back. * pre commit issue --- indra/llwebrtc/llwebrtc.cpp | 740 ++++++++++++++++++++++++-------------------- 1 file changed, 398 insertions(+), 342 deletions(-) (limited to 'indra/llwebrtc/llwebrtc.cpp') diff --git a/indra/llwebrtc/llwebrtc.cpp b/indra/llwebrtc/llwebrtc.cpp index 20951ff816..edba2bee9a 100644 --- a/indra/llwebrtc/llwebrtc.cpp +++ b/indra/llwebrtc/llwebrtc.cpp @@ -9,7 +9,7 @@ * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; - * version 2.1 of the License only. + * version 2.1 of the License only * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -32,41 +32,79 @@ #include "api/audio_codecs/audio_encoder_factory.h" #include "api/audio_codecs/builtin_audio_decoder_factory.h" #include "api/audio_codecs/builtin_audio_encoder_factory.h" +#include "api/audio/builtin_audio_processing_builder.h" #include "api/media_stream_interface.h" #include "api/media_stream_track.h" #include "modules/audio_processing/audio_buffer.h" #include "modules/audio_mixer/audio_mixer_impl.h" +#include "api/environment/environment_factory.h" namespace llwebrtc { +#if WEBRTC_WIN +static int16_t PLAYOUT_DEVICE_DEFAULT = webrtc::AudioDeviceModule::kDefaultDevice; +static int16_t RECORD_DEVICE_DEFAULT = webrtc::AudioDeviceModule::kDefaultDevice; +#else +static int16_t PLAYOUT_DEVICE_DEFAULT = 0; +static int16_t RECORD_DEVICE_DEFAULT = 0; +#endif -static int16_t PLAYOUT_DEVICE_DEFAULT = -1; -static int16_t PLAYOUT_DEVICE_BAD = -2; -static int16_t RECORD_DEVICE_DEFAULT = -1; -static int16_t RECORD_DEVICE_BAD = -2; -LLAudioDeviceObserver::LLAudioDeviceObserver() : mSumVector {0}, mMicrophoneEnergy(0.0) {} +// +// LLWebRTCAudioTransport implementation +// -float LLAudioDeviceObserver::getMicrophoneEnergy() { return mMicrophoneEnergy; } +LLWebRTCAudioTransport::LLWebRTCAudioTransport() : mMicrophoneEnergy(0.0) +{ + memset(mSumVector, 0, sizeof(mSumVector)); +} -// TODO: Pull smoothing/filtering code into a common helper function -// for LLAudioDeviceObserver and LLCustomProcessor +void LLWebRTCAudioTransport::SetEngineTransport(webrtc::AudioTransport* t) +{ + engine_.store(t, std::memory_order_release); +} -void LLAudioDeviceObserver::OnCaptureData(const void *audio_samples, - const size_t num_samples, - const size_t bytes_per_sample, - const size_t num_channels, - const uint32_t samples_per_sec) +int32_t LLWebRTCAudioTransport::RecordedDataIsAvailable(const void* audio_data, + size_t number_of_frames, + size_t bytes_per_frame, + size_t number_of_channels, + uint32_t samples_per_sec, + uint32_t total_delay_ms, + int32_t clock_drift, + uint32_t current_mic_level, + bool key_pressed, + uint32_t& new_mic_level) { + auto* engine = engine_.load(std::memory_order_acquire); + + // 1) Deliver to engine (authoritative). + int32_t ret = 0; + if (engine) + { + ret = engine->RecordedDataIsAvailable(audio_data, + number_of_frames, + bytes_per_frame, + number_of_channels, + samples_per_sec, + total_delay_ms, + clock_drift, + current_mic_level, + key_pressed, + new_mic_level); + } + + // 2) Calculate energy for microphone level monitoring // calculate the energy float energy = 0; - const short *samples = (const short *) audio_samples; - for (size_t index = 0; index < num_samples * num_channels; index++) + const short *samples = (const short *) audio_data; + + for (size_t index = 0; index < number_of_frames * number_of_channels; index++) { float sample = (static_cast(samples[index]) / (float) 32767); energy += sample * sample; } - + float gain = mGain.load(std::memory_order_relaxed); + energy = energy * gain * gain; // smooth it. size_t buffer_size = sizeof(mSumVector) / sizeof(mSumVector[0]); float totalSum = 0; @@ -78,18 +116,59 @@ void LLAudioDeviceObserver::OnCaptureData(const void *audio_samples, } mSumVector[i] = energy; totalSum += energy; - mMicrophoneEnergy = std::sqrt(totalSum / (num_samples * buffer_size)); + mMicrophoneEnergy = std::sqrt(totalSum / (number_of_frames * number_of_channels * buffer_size)); + + return ret; } -void LLAudioDeviceObserver::OnRenderData(const void *audio_samples, - const size_t num_samples, - const size_t bytes_per_sample, - const size_t num_channels, - const uint32_t samples_per_sec) +int32_t LLWebRTCAudioTransport::NeedMorePlayData(size_t number_of_frames, + size_t bytes_per_frame, + size_t number_of_channels, + uint32_t samples_per_sec, + void* audio_data, + size_t& number_of_samples_out, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) { + auto* engine = engine_.load(std::memory_order_acquire); + if (!engine) + { + // No engine sink; output silence to be safe. + const size_t bytes = number_of_frames * bytes_per_frame * number_of_channels; + memset(audio_data, 0, bytes); + number_of_samples_out = bytes_per_frame; + return 0; + } + + // Only the engine should fill the buffer. + return engine->NeedMorePlayData(number_of_frames, + bytes_per_frame, + number_of_channels, + samples_per_sec, + audio_data, + number_of_samples_out, + elapsed_time_ms, + ntp_time_ms); } -LLCustomProcessor::LLCustomProcessor() : mSampleRateHz(0), mNumChannels(0), mMicrophoneEnergy(0.0), mGain(1.0) +void LLWebRTCAudioTransport::PullRenderData(int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames, + void* audio_data, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) +{ + auto* engine = engine_.load(std::memory_order_acquire); + + if (engine) + { + engine + ->PullRenderData(bits_per_sample, sample_rate, number_of_channels, number_of_frames, audio_data, elapsed_time_ms, ntp_time_ms); + } +} + +LLCustomProcessor::LLCustomProcessor(LLCustomProcessorStatePtr state) : mSampleRateHz(0), mNumChannels(0), mState(state) { memset(mSumVector, 0, sizeof(mSumVector)); } @@ -101,40 +180,61 @@ void LLCustomProcessor::Initialize(int sample_rate_hz, int num_channels) memset(mSumVector, 0, sizeof(mSumVector)); } -void LLCustomProcessor::Process(webrtc::AudioBuffer *audio_in) +void LLCustomProcessor::Process(webrtc::AudioBuffer *audio) { - webrtc::StreamConfig stream_config; - stream_config.set_sample_rate_hz(mSampleRateHz); - stream_config.set_num_channels(mNumChannels); - std::vector frame; - std::vector frame_samples; - - if (audio_in->num_channels() < 1 || audio_in->num_frames() < 480) + if (audio->num_channels() < 1 || audio->num_frames() < 480) { return; } - // grab the input audio - frame_samples.resize(stream_config.num_samples()); - frame.resize(stream_config.num_channels()); - for (size_t ch = 0; ch < stream_config.num_channels(); ++ch) + // calculate the energy + + float desired_gain = mState->getGain(); + if (mState->getDirty()) { - frame[ch] = &(frame_samples)[ch * stream_config.num_frames()]; + // We'll delay ramping by 30ms in order to clear out buffers that may + // have had content before muting. And for the last 20ms, we'll ramp + // down or up smoothly. + mRampFrames = 5; + + // we've changed our desired gain, so set the incremental + // gain change so that we smoothly step over 20ms + mGainStep = (desired_gain - mCurrentGain) / (mSampleRateHz / 50); } - audio_in->CopyTo(stream_config, &frame[0]); - - // calculate the energy - float energy = 0; - for (size_t index = 0; index < stream_config.num_samples(); index++) + if (mRampFrames) { - float sample = frame_samples[index]; - sample = sample * mGain; // apply gain - frame_samples[index] = sample; // write processed sample back to buffer. - energy += sample * sample; + if (mRampFrames-- > 2) + { + // don't change the gain if we're still in the 'don't move' phase + mGainStep = 0.0f; + } + } + else + { + // We've ramped all the way down, so don't step the gain any more and + // just maintaint he current gain. + mGainStep = 0.0f; + mCurrentGain = desired_gain; } - audio_in->CopyFrom(&frame[0], stream_config); + float energy = 0; + + auto chans = audio->channels(); + for (size_t ch = 0; ch < audio->num_channels(); ch++) + { + float* frame_samples = chans[ch]; + float gain = mCurrentGain; + for (size_t index = 0; index < audio->num_frames(); index++) + { + float sample = frame_samples[index]; + sample = sample * gain; // apply gain + frame_samples[index] = sample; // write processed sample back to buffer. + energy += sample * sample; + gain += mGainStep; + } + } + mCurrentGain += audio->num_frames() * mGainStep; // smooth it. size_t buffer_size = sizeof(mSumVector) / sizeof(mSumVector[0]); @@ -147,7 +247,7 @@ void LLCustomProcessor::Process(webrtc::AudioBuffer *audio_in) } mSumVector[i] = energy; totalSum += energy; - mMicrophoneEnergy = std::sqrt(totalSum / (stream_config.num_samples() * buffer_size)); + mState->setMicrophoneEnergy(std::sqrt(totalSum / (audio->num_channels() * audio->num_frames() * buffer_size))); } // @@ -159,89 +259,54 @@ LLWebRTCImpl::LLWebRTCImpl(LLWebRTCLogCallback* logCallback) : mPeerCustomProcessor(nullptr), mMute(true), mTuningMode(false), - mPlayoutDevice(0), - mRecordingDevice(0), - mTuningAudioDeviceObserver(nullptr) + mDevicesDeploying(0), + mGain(0.0f) { } void LLWebRTCImpl::init() { - mPlayoutDevice = 0; - mRecordingDevice = 0; - rtc::InitializeSSL(); + webrtc::InitializeSSL(); // Normal logging is rather spammy, so turn it off. - rtc::LogMessage::LogToDebug(rtc::LS_NONE); - rtc::LogMessage::SetLogToStderr(true); - rtc::LogMessage::AddLogToStream(mLogSink, rtc::LS_VERBOSE); + webrtc::LogMessage::LogToDebug(webrtc::LS_NONE); + webrtc::LogMessage::SetLogToStderr(true); + webrtc::LogMessage::AddLogToStream(mLogSink, webrtc::LS_VERBOSE); mTaskQueueFactory = webrtc::CreateDefaultTaskQueueFactory(); // Create the native threads. - mNetworkThread = rtc::Thread::CreateWithSocketServer(); + mNetworkThread = webrtc::Thread::CreateWithSocketServer(); mNetworkThread->SetName("WebRTCNetworkThread", nullptr); mNetworkThread->Start(); - mWorkerThread = rtc::Thread::Create(); + mWorkerThread = webrtc::Thread::Create(); mWorkerThread->SetName("WebRTCWorkerThread", nullptr); mWorkerThread->Start(); - mSignalingThread = rtc::Thread::Create(); + mSignalingThread = webrtc::Thread::Create(); mSignalingThread->SetName("WebRTCSignalingThread", nullptr); mSignalingThread->Start(); - mTuningAudioDeviceObserver = new LLAudioDeviceObserver; - mWorkerThread->PostTask( - [this]() - { - // Initialize the audio devices on the Worker Thread - mTuningDeviceModule = - webrtc::CreateAudioDeviceWithDataObserver(webrtc::AudioDeviceModule::AudioLayer::kPlatformDefaultAudio, - mTaskQueueFactory.get(), - std::unique_ptr(mTuningAudioDeviceObserver)); - - mTuningDeviceModule->Init(); - mTuningDeviceModule->SetPlayoutDevice(mPlayoutDevice); - mTuningDeviceModule->SetRecordingDevice(mRecordingDevice); - mTuningDeviceModule->EnableBuiltInAEC(false); - mTuningDeviceModule->SetAudioDeviceSink(this); - mTuningDeviceModule->InitMicrophone(); - mTuningDeviceModule->InitSpeaker(); - mTuningDeviceModule->SetStereoRecording(false); - mTuningDeviceModule->SetStereoPlayout(true); - mTuningDeviceModule->InitRecording(); - mTuningDeviceModule->InitPlayout(); - updateDevices(); - }); - mWorkerThread->BlockingCall( [this]() { - // the peer device module doesn't need an observer - // as we pull peer data after audio processing. - mPeerDeviceModule = webrtc::CreateAudioDeviceWithDataObserver(webrtc::AudioDeviceModule::AudioLayer::kPlatformDefaultAudio, - mTaskQueueFactory.get(), - nullptr); - mPeerDeviceModule->Init(); - mPeerDeviceModule->SetPlayoutDevice(mPlayoutDevice); - mPeerDeviceModule->SetRecordingDevice(mRecordingDevice); - mPeerDeviceModule->EnableBuiltInAEC(false); - mPeerDeviceModule->InitMicrophone(); - mPeerDeviceModule->InitSpeaker(); + webrtc::scoped_refptr realADM = + webrtc::AudioDeviceModule::Create(webrtc::AudioDeviceModule::AudioLayer::kPlatformDefaultAudio, mTaskQueueFactory.get()); + mDeviceModule = webrtc::make_ref_counted(realADM); + mDeviceModule->SetObserver(this); }); // The custom processor allows us to retrieve audio data (and levels) // from after other audio processing such as AEC, AGC, etc. - mPeerCustomProcessor = new LLCustomProcessor; - webrtc::AudioProcessingBuilder apb; - apb.SetCapturePostProcessing(std::unique_ptr(mPeerCustomProcessor)); - mAudioProcessingModule = apb.Create(); + mPeerCustomProcessor = std::make_shared(); + webrtc::BuiltinAudioProcessingBuilder apb; + apb.SetCapturePostProcessing(std::make_unique(mPeerCustomProcessor)); + mAudioProcessingModule = apb.Build(webrtc::CreateEnvironment()); webrtc::AudioProcessing::Config apm_config; apm_config.echo_canceller.enabled = false; apm_config.echo_canceller.mobile_mode = false; apm_config.gain_controller1.enabled = false; - apm_config.gain_controller1.mode = webrtc::AudioProcessing::Config::GainController1::kAdaptiveAnalog; - apm_config.gain_controller2.enabled = false; + apm_config.gain_controller2.enabled = true; apm_config.high_pass_filter.enabled = true; apm_config.noise_suppression.enabled = true; apm_config.noise_suppression.level = webrtc::AudioProcessing::Config::NoiseSuppression::kVeryHigh; @@ -252,6 +317,7 @@ void LLWebRTCImpl::init() mAudioProcessingModule->ApplyConfig(apm_config); webrtc::ProcessingConfig processing_config; + processing_config.input_stream().set_num_channels(2); processing_config.input_stream().set_sample_rate_hz(48000); processing_config.output_stream().set_num_channels(2); @@ -266,13 +332,19 @@ void LLWebRTCImpl::init() mPeerConnectionFactory = webrtc::CreatePeerConnectionFactory(mNetworkThread.get(), mWorkerThread.get(), mSignalingThread.get(), - mPeerDeviceModule, + mDeviceModule, webrtc::CreateBuiltinAudioEncoderFactory(), webrtc::CreateBuiltinAudioDecoderFactory(), nullptr /* video_encoder_factory */, nullptr /* video_decoder_factory */, nullptr /* audio_mixer */, mAudioProcessingModule); + mWorkerThread->PostTask( + [this]() + { + mDeviceModule->EnableBuiltInAEC(false); + updateDevices(); + }); } @@ -294,64 +366,16 @@ void LLWebRTCImpl::terminate() mWorkerThread->BlockingCall( [this]() { - if (mTuningDeviceModule) - { - mTuningDeviceModule->StopRecording(); - mTuningDeviceModule->Terminate(); - } - if (mPeerDeviceModule) + if (mDeviceModule) { - mPeerDeviceModule->StopRecording(); - mPeerDeviceModule->Terminate(); - } - mTuningDeviceModule = nullptr; - mPeerDeviceModule = nullptr; - mTaskQueueFactory = nullptr; - }); - rtc::LogMessage::RemoveLogToStream(mLogSink); -} - -// -// Devices functions -// -// Most device-related functionality needs to happen -// on the worker thread (the audio thread,) so those calls will be -// proxied over to that thread. -// -void LLWebRTCImpl::setRecording(bool recording) -{ - mWorkerThread->PostTask( - [this, recording]() - { - if (recording) - { - mPeerDeviceModule->SetStereoRecording(false); - mPeerDeviceModule->InitRecording(); - mPeerDeviceModule->StartRecording(); - } - else - { - mPeerDeviceModule->StopRecording(); - } - }); -} - -void LLWebRTCImpl::setPlayout(bool playing) -{ - mWorkerThread->PostTask( - [this, playing]() - { - if (playing) - { - mPeerDeviceModule->SetStereoPlayout(true); - mPeerDeviceModule->InitPlayout(); - mPeerDeviceModule->StartPlayout(); - } - else - { - mPeerDeviceModule->StopPlayout(); + mDeviceModule->StopRecording(); + mDeviceModule->StopPlayout(); + mDeviceModule->Terminate(); } + mDeviceModule = nullptr; + mTaskQueueFactory = nullptr; }); + webrtc::LogMessage::RemoveLogToStream(mLogSink); } void LLWebRTCImpl::setAudioConfig(LLWebRTCDeviceInterface::AudioConfig config) @@ -359,9 +383,9 @@ void LLWebRTCImpl::setAudioConfig(LLWebRTCDeviceInterface::AudioConfig config) webrtc::AudioProcessing::Config apm_config; apm_config.echo_canceller.enabled = config.mEchoCancellation; apm_config.echo_canceller.mobile_mode = false; - apm_config.gain_controller1.enabled = config.mAGC; - apm_config.gain_controller1.mode = webrtc::AudioProcessing::Config::GainController1::kAdaptiveAnalog; - apm_config.gain_controller2.enabled = false; + apm_config.gain_controller1.enabled = false; + apm_config.gain_controller2.enabled = config.mAGC; + apm_config.gain_controller2.adaptive_digital.enabled = true; // auto-level speech apm_config.high_pass_filter.enabled = true; apm_config.transient_suppression.enabled = true; apm_config.pipeline.multi_channel_render = true; @@ -414,142 +438,134 @@ void LLWebRTCImpl::unsetDevicesObserver(LLWebRTCDevicesObserver *observer) } } -void ll_set_device_module_capture_device(rtc::scoped_refptr device_module, int16_t device) +// must be run in the worker thread. +void LLWebRTCImpl::workerDeployDevices() { + int16_t recordingDevice = RECORD_DEVICE_DEFAULT; #if WEBRTC_WIN - if (device < 0) - { - device_module->SetRecordingDevice(webrtc::AudioDeviceModule::kDefaultDevice); - } - else - { - device_module->SetRecordingDevice(device); - } + int16_t recording_device_start = 0; #else - // passed in default is -1, but the device list - // has it at 0 - device_module->SetRecordingDevice(device + 1); + int16_t recording_device_start = 1; #endif - device_module->InitMicrophone(); -} -void LLWebRTCImpl::setCaptureDevice(const std::string &id) -{ - int16_t recordingDevice = RECORD_DEVICE_DEFAULT; - if (id != "Default") + if (mRecordingDevice != "Default") { - for (int16_t i = 0; i < mRecordingDeviceList.size(); i++) + for (int16_t i = recording_device_start; i < mRecordingDeviceList.size(); i++) { - if (mRecordingDeviceList[i].mID == id) + if (mRecordingDeviceList[i].mID == mRecordingDevice) { recordingDevice = i; break; } } } - if (recordingDevice == mRecordingDevice) - { - return; - } - mRecordingDevice = recordingDevice; - if (mTuningMode) - { - mWorkerThread->PostTask([this, recordingDevice]() - { - ll_set_device_module_capture_device(mTuningDeviceModule, recordingDevice); - }); - } - else - { - mWorkerThread->PostTask([this, recordingDevice]() - { - bool recording = mPeerDeviceModule->Recording(); - if (recording) - { - mPeerDeviceModule->StopRecording(); - } - ll_set_device_module_capture_device(mPeerDeviceModule, recordingDevice); - if (recording) - { - mPeerDeviceModule->SetStereoRecording(false); - mPeerDeviceModule->InitRecording(); - mPeerDeviceModule->StartRecording(); - } - }); - } -} - -void ll_set_device_module_render_device(rtc::scoped_refptr device_module, int16_t device) -{ + mDeviceModule->StopPlayout(); + mDeviceModule->ForceStopRecording(); #if WEBRTC_WIN - if (device < 0) + if (recordingDevice < 0) { - device_module->SetPlayoutDevice(webrtc::AudioDeviceModule::kDefaultDevice); + mDeviceModule->SetRecordingDevice((webrtc::AudioDeviceModule::WindowsDeviceType)recordingDevice); } else { - device_module->SetPlayoutDevice(device); + mDeviceModule->SetRecordingDevice(recordingDevice); } #else - device_module->SetPlayoutDevice(device + 1); + mDeviceModule->SetRecordingDevice(recordingDevice); #endif - device_module->InitSpeaker(); -} + mDeviceModule->InitMicrophone(); + mDeviceModule->SetStereoRecording(false); + mDeviceModule->InitRecording(); -void LLWebRTCImpl::setRenderDevice(const std::string &id) -{ int16_t playoutDevice = PLAYOUT_DEVICE_DEFAULT; - if (id != "Default") +#if WEBRTC_WIN + int16_t playout_device_start = 0; +#else + int16_t playout_device_start = 1; +#endif + if (mPlayoutDevice != "Default") { - for (int16_t i = 0; i < mPlayoutDeviceList.size(); i++) + for (int16_t i = playout_device_start; i < mPlayoutDeviceList.size(); i++) { - if (mPlayoutDeviceList[i].mID == id) + if (mPlayoutDeviceList[i].mID == mPlayoutDevice) { playoutDevice = i; break; } } } - if (playoutDevice == mPlayoutDevice) + +#if WEBRTC_WIN + if (playoutDevice < 0) { - return; + mDeviceModule->SetPlayoutDevice((webrtc::AudioDeviceModule::WindowsDeviceType)playoutDevice); + } + else + { + mDeviceModule->SetPlayoutDevice(playoutDevice); } - mPlayoutDevice = playoutDevice; +#else + mDeviceModule->SetPlayoutDevice(playoutDevice); +#endif + mDeviceModule->InitSpeaker(); + mDeviceModule->SetStereoPlayout(true); + mDeviceModule->InitPlayout(); - if (mTuningMode) + if ((!mMute && mPeerConnections.size()) || mTuningMode) { - mWorkerThread->PostTask( - [this, playoutDevice]() - { - ll_set_device_module_render_device(mTuningDeviceModule, playoutDevice); - }); + mDeviceModule->ForceStartRecording(); } - else + + if (!mTuningMode) { - mWorkerThread->PostTask( - [this, playoutDevice]() + mDeviceModule->StartPlayout(); + } + mSignalingThread->PostTask( + [this] + { + for (auto& connection : mPeerConnections) { - bool playing = mPeerDeviceModule->Playing(); - if (playing) + if (mTuningMode) { - mPeerDeviceModule->StopPlayout(); + connection->enableSenderTracks(false); } - ll_set_device_module_render_device(mPeerDeviceModule, playoutDevice); - if (playing) + else { - mPeerDeviceModule->SetStereoPlayout(true); - mPeerDeviceModule->InitPlayout(); - mPeerDeviceModule->StartPlayout(); + connection->resetMute(); } - }); + connection->enableReceiverTracks(!mTuningMode); + } + if (1 < mDevicesDeploying.fetch_sub(1, std::memory_order_relaxed)) + { + mWorkerThread->PostTask([this] { workerDeployDevices(); }); + } + }); +} + +void LLWebRTCImpl::setCaptureDevice(const std::string &id) +{ + + if (mRecordingDevice != id) + { + mRecordingDevice = id; + deployDevices(); + } +} + +void LLWebRTCImpl::setRenderDevice(const std::string &id) +{ + if (mPlayoutDevice != id) + { + mPlayoutDevice = id; + deployDevices(); } } // updateDevices needs to happen on the worker thread. void LLWebRTCImpl::updateDevices() { - int16_t renderDeviceCount = mTuningDeviceModule->PlayoutDevices(); + int16_t renderDeviceCount = mDeviceModule->PlayoutDevices(); mPlayoutDeviceList.clear(); #if WEBRTC_WIN @@ -563,11 +579,11 @@ void LLWebRTCImpl::updateDevices() { char name[webrtc::kAdmMaxDeviceNameSize]; char guid[webrtc::kAdmMaxGuidSize]; - mTuningDeviceModule->PlayoutDeviceName(index, name, guid); + mDeviceModule->PlayoutDeviceName(index, name, guid); mPlayoutDeviceList.emplace_back(name, guid); } - int16_t captureDeviceCount = mTuningDeviceModule->RecordingDevices(); + int16_t captureDeviceCount = mDeviceModule->RecordingDevices(); mRecordingDeviceList.clear(); #if WEBRTC_WIN @@ -581,7 +597,7 @@ void LLWebRTCImpl::updateDevices() { char name[webrtc::kAdmMaxDeviceNameSize]; char guid[webrtc::kAdmMaxGuidSize]; - mTuningDeviceModule->RecordingDeviceName(index, name, guid); + mDeviceModule->RecordingDeviceName(index, name, guid); mRecordingDeviceList.emplace_back(name, guid); } @@ -593,11 +609,7 @@ void LLWebRTCImpl::updateDevices() void LLWebRTCImpl::OnDevicesUpdated() { - // reset these to a bad value so an update is forced - mRecordingDevice = RECORD_DEVICE_BAD; - mPlayoutDevice = PLAYOUT_DEVICE_BAD; - - updateDevices(); + deployDevices(); } @@ -605,60 +617,109 @@ void LLWebRTCImpl::setTuningMode(bool enable) { mTuningMode = enable; mWorkerThread->PostTask( - [this, enable] { - if (enable) - { - mPeerDeviceModule->StopRecording(); - mPeerDeviceModule->StopPlayout(); - ll_set_device_module_render_device(mTuningDeviceModule, mPlayoutDevice); - ll_set_device_module_capture_device(mTuningDeviceModule, mRecordingDevice); - mTuningDeviceModule->InitPlayout(); - mTuningDeviceModule->InitRecording(); - mTuningDeviceModule->StartRecording(); - // TODO: Starting Playout on the TDM appears to create an audio artifact (click) - // in this case, so disabling it for now. We may have to do something different - // if we enable 'echo playback' via the TDM when tuning. - //mTuningDeviceModule->StartPlayout(); - } - else - { - mTuningDeviceModule->StopRecording(); - //mTuningDeviceModule->StopPlayout(); - ll_set_device_module_render_device(mPeerDeviceModule, mPlayoutDevice); - ll_set_device_module_capture_device(mPeerDeviceModule, mRecordingDevice); - mPeerDeviceModule->SetStereoPlayout(true); - mPeerDeviceModule->SetStereoRecording(false); - mPeerDeviceModule->InitPlayout(); - mPeerDeviceModule->InitRecording(); - mPeerDeviceModule->StartPlayout(); - mPeerDeviceModule->StartRecording(); - } - } - ); - mSignalingThread->PostTask( - [this, enable] + [this] { - for (auto &connection : mPeerConnections) - { - if (enable) + mDeviceModule->SetTuning(mTuningMode, mMute); + mSignalingThread->PostTask( + [this] { - connection->enableSenderTracks(false); - } - else - { - connection->resetMute(); - } - connection->enableReceiverTracks(!enable); - } + for (auto& connection : mPeerConnections) + { + if (mTuningMode) + { + connection->enableSenderTracks(false); + } + else + { + connection->resetMute(); + } + connection->enableReceiverTracks(!mTuningMode); + } + }); + }); +} + +void LLWebRTCImpl::deployDevices() +{ + if (0 < mDevicesDeploying.fetch_add(1, std::memory_order_relaxed)) + { + return; + } + mWorkerThread->PostTask( + [this] { + workerDeployDevices(); }); } -float LLWebRTCImpl::getTuningAudioLevel() { return -20 * log10f(mTuningAudioDeviceObserver->getMicrophoneEnergy()); } +float LLWebRTCImpl::getTuningAudioLevel() +{ + return mDeviceModule ? -20 * log10f(mDeviceModule->GetMicrophoneEnergy()) : std::numeric_limits::infinity(); +} -float LLWebRTCImpl::getPeerConnectionAudioLevel() { return -20 * log10f(mPeerCustomProcessor->getMicrophoneEnergy()); } +void LLWebRTCImpl::setTuningMicGain(float gain) +{ + if (mTuningMode && mDeviceModule) + { + mDeviceModule->SetTuningMicGain(gain); + } +} + +float LLWebRTCImpl::getPeerConnectionAudioLevel() +{ + return mTuningMode ? std::numeric_limits::infinity() + : (mPeerCustomProcessor ? -20 * log10f(mPeerCustomProcessor->getMicrophoneEnergy()) + : std::numeric_limits::infinity()); +} -void LLWebRTCImpl::setPeerConnectionGain(float gain) { mPeerCustomProcessor->setGain(gain); } +void LLWebRTCImpl::setMicGain(float gain) +{ + mGain = gain; + if (!mTuningMode && mPeerCustomProcessor) + { + mPeerCustomProcessor->setGain(gain); + } +} +void LLWebRTCImpl::setMute(bool mute, int delay_ms) +{ + if (mMute != mute) + { + mMute = mute; + intSetMute(mute, delay_ms); + } +} + +void LLWebRTCImpl::intSetMute(bool mute, int delay_ms) +{ + if (mPeerCustomProcessor) + { + mPeerCustomProcessor->setGain(mMute ? 0.0f : mGain); + } + if (mMute) + { + mWorkerThread->PostDelayedTask( + [this] + { + if (mDeviceModule) + { + mDeviceModule->ForceStopRecording(); + } + }, + webrtc::TimeDelta::Millis(delay_ms)); + } + else + { + mWorkerThread->PostTask( + [this] + { + if (mDeviceModule) + { + mDeviceModule->InitRecording(); + mDeviceModule->ForceStartRecording(); + } + }); + } +} // // Peer Connection Helpers @@ -666,34 +727,31 @@ void LLWebRTCImpl::setPeerConnectionGain(float gain) { mPeerCustomProcessor->set LLWebRTCPeerConnectionInterface *LLWebRTCImpl::newPeerConnection() { - rtc::scoped_refptr peerConnection = rtc::scoped_refptr(new rtc::RefCountedObject()); + bool empty = mPeerConnections.empty(); + webrtc::scoped_refptr peerConnection = webrtc::scoped_refptr(new webrtc::RefCountedObject()); peerConnection->init(this); - - mPeerConnections.emplace_back(peerConnection); - // Should it really start disabled? - // Seems like something doesn't get the memo and senders need to be reset later - // to remove the voice indicator from taskbar - peerConnection->enableSenderTracks(false); if (mPeerConnections.empty()) { - setRecording(true); - setPlayout(true); + intSetMute(mMute); } + mPeerConnections.emplace_back(peerConnection); + + peerConnection->enableSenderTracks(false); + peerConnection->resetMute(); return peerConnection.get(); } void LLWebRTCImpl::freePeerConnection(LLWebRTCPeerConnectionInterface* peer_connection) { - std::vector>::iterator it = + std::vector>::iterator it = std::find(mPeerConnections.begin(), mPeerConnections.end(), peer_connection); if (it != mPeerConnections.end()) { mPeerConnections.erase(it); - } - if (mPeerConnections.empty()) - { - setRecording(false); - setPlayout(false); + if (mPeerConnections.empty()) + { + intSetMute(true); + } } } @@ -729,7 +787,7 @@ void LLWebRTCPeerConnectionImpl::init(LLWebRTCImpl * webrtc_impl) } void LLWebRTCPeerConnectionImpl::terminate() { - mWebRTCImpl->SignalingBlockingCall( + mWebRTCImpl->PostSignalingTask( [this]() { if (mPeerConnection) @@ -753,7 +811,6 @@ void LLWebRTCPeerConnectionImpl::terminate() track->set_enabled(false); } } - mPeerConnection->SetAudioRecording(false); mPeerConnection->Close(); if (mLocalStream) @@ -840,7 +897,7 @@ bool LLWebRTCPeerConnectionImpl::initializeConnection(const LLWebRTCPeerConnecti mDataChannel->RegisterObserver(this); } - cricket::AudioOptions audioOptions; + webrtc::AudioOptions audioOptions; audioOptions.auto_gain_control = true; audioOptions.echo_cancellation = true; audioOptions.noise_suppression = true; @@ -848,7 +905,7 @@ bool LLWebRTCPeerConnectionImpl::initializeConnection(const LLWebRTCPeerConnecti mLocalStream = mPeerConnectionFactory->CreateLocalMediaStream("SLStream"); - rtc::scoped_refptr audio_track( + webrtc::scoped_refptr audio_track( mPeerConnectionFactory->CreateAudioTrack("SLAudio", mPeerConnectionFactory->CreateAudioSource(audioOptions).get())); audio_track->set_enabled(false); mLocalStream->AddTrack(audio_track); @@ -862,7 +919,7 @@ bool LLWebRTCPeerConnectionImpl::initializeConnection(const LLWebRTCPeerConnecti webrtc::RtpParameters params; webrtc::RtpCodecParameters codecparam; codecparam.name = "opus"; - codecparam.kind = cricket::MEDIA_TYPE_AUDIO; + codecparam.kind = webrtc::MediaType::AUDIO; codecparam.clock_rate = 48000; codecparam.num_channels = 2; codecparam.parameters["stereo"] = "1"; @@ -877,7 +934,7 @@ bool LLWebRTCPeerConnectionImpl::initializeConnection(const LLWebRTCPeerConnecti webrtc::RtpParameters params; webrtc::RtpCodecParameters codecparam; codecparam.name = "opus"; - codecparam.kind = cricket::MEDIA_TYPE_AUDIO; + codecparam.kind = webrtc::MediaType::AUDIO; codecparam.clock_rate = 48000; codecparam.num_channels = 2; codecparam.parameters["stereo"] = "1"; @@ -904,7 +961,6 @@ void LLWebRTCPeerConnectionImpl::enableSenderTracks(bool enable) // set_enabled shouldn't be done on the worker thread. if (mPeerConnection) { - mPeerConnection->SetAudioRecording(enable); auto senders = mPeerConnection->GetSenders(); for (auto &sender : senders) { @@ -938,7 +994,7 @@ void LLWebRTCPeerConnectionImpl::AnswerAvailable(const std::string &sdp) { RTC_LOG(LS_INFO) << __FUNCTION__ << " " << mPeerConnection->peer_connection_state(); mPeerConnection->SetRemoteDescription(webrtc::CreateSessionDescription(webrtc::SdpType::kAnswer, sdp), - rtc::scoped_refptr(this)); + webrtc::scoped_refptr(this)); } }); } @@ -951,22 +1007,22 @@ void LLWebRTCPeerConnectionImpl::AnswerAvailable(const std::string &sdp) void LLWebRTCPeerConnectionImpl::setMute(bool mute) { EMicMuteState new_state = mute ? MUTE_MUTED : MUTE_UNMUTED; - if (new_state == mMute) - { - return; // no change - } + + // even if mute hasn't changed, we still need to update the mute + // state on the connections to handle cases where the 'Default' device + // has changed in the OS (unplugged headset, etc.) which messes + // with the mute state. + bool force_reset = mMute == MUTE_INITIAL && mute; bool enable = !mute; mMute = new_state; + mWebRTCImpl->PostSignalingTask( [this, force_reset, enable]() { if (mPeerConnection) { - // SetAudioRecording must be called before enabling/disabling tracks. - mPeerConnection->SetAudioRecording(enable); - auto senders = mPeerConnection->GetSenders(); RTC_LOG(LS_INFO) << __FUNCTION__ << (mMute ? "disabling" : "enabling") << " streams count " << senders.size(); @@ -1046,14 +1102,14 @@ void LLWebRTCPeerConnectionImpl::setSendVolume(float volume) // PeerConnectionObserver implementation. // -void LLWebRTCPeerConnectionImpl::OnAddTrack(rtc::scoped_refptr receiver, - const std::vector> &streams) +void LLWebRTCPeerConnectionImpl::OnAddTrack(webrtc::scoped_refptr receiver, + const std::vector> &streams) { RTC_LOG(LS_INFO) << __FUNCTION__ << " " << receiver->id(); webrtc::RtpParameters params; webrtc::RtpCodecParameters codecparam; codecparam.name = "opus"; - codecparam.kind = cricket::MEDIA_TYPE_AUDIO; + codecparam.kind = webrtc::MediaType::AUDIO; codecparam.clock_rate = 48000; codecparam.num_channels = 2; codecparam.parameters["stereo"] = "1"; @@ -1062,12 +1118,12 @@ void LLWebRTCPeerConnectionImpl::OnAddTrack(rtc::scoped_refptrSetParameters(params); } -void LLWebRTCPeerConnectionImpl::OnRemoveTrack(rtc::scoped_refptr receiver) +void LLWebRTCPeerConnectionImpl::OnRemoveTrack(webrtc::scoped_refptr receiver) { RTC_LOG(LS_INFO) << __FUNCTION__ << " " << receiver->id(); } -void LLWebRTCPeerConnectionImpl::OnDataChannel(rtc::scoped_refptr channel) +void LLWebRTCPeerConnectionImpl::OnDataChannel(webrtc::scoped_refptr channel) { if (mDataChannel) { @@ -1154,23 +1210,23 @@ static std::string iceCandidateToTrickleString(const webrtc::IceCandidateInterfa candidate->candidate().address().ipaddr().ToString() << " " << candidate->candidate().address().PortAsString() << " typ "; - if (candidate->candidate().type() == cricket::LOCAL_PORT_TYPE) + if (candidate->candidate().type() == webrtc::IceCandidateType::kHost) { candidate_stream << "host"; } - else if (candidate->candidate().type() == cricket::STUN_PORT_TYPE) + else if (candidate->candidate().type() == webrtc::IceCandidateType::kSrflx) { candidate_stream << "srflx " << "raddr " << candidate->candidate().related_address().ipaddr().ToString() << " " << "rport " << candidate->candidate().related_address().PortAsString(); } - else if (candidate->candidate().type() == cricket::RELAY_PORT_TYPE) + else if (candidate->candidate().type() == webrtc::IceCandidateType::kRelay) { candidate_stream << "relay " << "raddr " << candidate->candidate().related_address().ipaddr().ToString() << " " << "rport " << candidate->candidate().related_address().PortAsString(); } - else if (candidate->candidate().type() == cricket::PRFLX_PORT_TYPE) + else if (candidate->candidate().type() == webrtc::IceCandidateType::kPrflx) { candidate_stream << "prflx " << "raddr " << candidate->candidate().related_address().ipaddr().ToString() << " " << @@ -1265,7 +1321,7 @@ void LLWebRTCPeerConnectionImpl::OnSuccess(webrtc::SessionDescriptionInterface * mPeerConnection->SetLocalDescription(std::unique_ptr( webrtc::CreateSessionDescription(webrtc::SdpType::kOffer, mangled_sdp)), - rtc::scoped_refptr(this)); + webrtc::scoped_refptr(this)); } @@ -1375,7 +1431,7 @@ void LLWebRTCPeerConnectionImpl::sendData(const std::string& data, bool binary) { if (mDataChannel) { - rtc::CopyOnWriteBuffer cowBuffer(data.data(), data.length()); + webrtc::CopyOnWriteBuffer cowBuffer(data.data(), data.length()); webrtc::DataBuffer buffer(cowBuffer, binary); mWebRTCImpl->PostNetworkTask([this, buffer]() { if (mDataChannel) -- cgit v1.2.3 From c743ea2b6dc60312b29f2fb5972171ead26ee448 Mon Sep 17 00:00:00 2001 From: Roxanne Skelly Date: Tue, 16 Sep 2025 10:36:26 -0700 Subject: Fix indexing problem with mac devices (#4676) * Fix indexing problem with mac devices This resulted in the wrong device being selected. Also, fix a shutdown crash where recording was not being stopped, hence the recording thread was still running on shutdown and crashed because it lost access to resources. Fix an issue with p2p calls where they're coming up muted even though the button indicates they are unmuted. * Always refresh device list on notification of device changes Even when the selected device doesn't change, we need to re-deploy it as it might have had characteristics (sampling rate, etc.) changed. Also, we need to redeploy when the Default device has changed --- indra/llwebrtc/llwebrtc.cpp | 48 +++++++++++++++++++++++++-------------------- 1 file changed, 27 insertions(+), 21 deletions(-) (limited to 'indra/llwebrtc/llwebrtc.cpp') diff --git a/indra/llwebrtc/llwebrtc.cpp b/indra/llwebrtc/llwebrtc.cpp index edba2bee9a..23e1076765 100644 --- a/indra/llwebrtc/llwebrtc.cpp +++ b/indra/llwebrtc/llwebrtc.cpp @@ -350,6 +350,16 @@ void LLWebRTCImpl::init() void LLWebRTCImpl::terminate() { + mWorkerThread->BlockingCall( + [this]() + { + if (mDeviceModule) + { + mDeviceModule->ForceStopRecording(); + mDeviceModule->StopPlayout(); + } + }); + for (auto &connection : mPeerConnections) { connection->terminate(); @@ -368,8 +378,6 @@ void LLWebRTCImpl::terminate() { if (mDeviceModule) { - mDeviceModule->StopRecording(); - mDeviceModule->StopPlayout(); mDeviceModule->Terminate(); } mDeviceModule = nullptr; @@ -442,11 +450,7 @@ void LLWebRTCImpl::unsetDevicesObserver(LLWebRTCDevicesObserver *observer) void LLWebRTCImpl::workerDeployDevices() { int16_t recordingDevice = RECORD_DEVICE_DEFAULT; -#if WEBRTC_WIN int16_t recording_device_start = 0; -#else - int16_t recording_device_start = 1; -#endif if (mRecordingDevice != "Default") { @@ -455,6 +459,12 @@ void LLWebRTCImpl::workerDeployDevices() if (mRecordingDeviceList[i].mID == mRecordingDevice) { recordingDevice = i; +#if !WEBRTC_WIN + // linux and mac devices range from 1 to the end of the list, with the index 0 being the + // 'default' device. Windows has a special 'default' device and other devices are indexed + // from 0 + recordingDevice++; +#endif break; } } @@ -479,11 +489,7 @@ void LLWebRTCImpl::workerDeployDevices() mDeviceModule->InitRecording(); int16_t playoutDevice = PLAYOUT_DEVICE_DEFAULT; -#if WEBRTC_WIN int16_t playout_device_start = 0; -#else - int16_t playout_device_start = 1; -#endif if (mPlayoutDevice != "Default") { for (int16_t i = playout_device_start; i < mPlayoutDeviceList.size(); i++) @@ -491,6 +497,12 @@ void LLWebRTCImpl::workerDeployDevices() if (mPlayoutDeviceList[i].mID == mPlayoutDevice) { playoutDevice = i; +#if !WEBRTC_WIN + // linux and mac devices range from 1 to the end of the list, with the index 0 being the + // 'default' device. Windows has a special 'default' device and other devices are indexed + // from 0 + playoutDevice++; +#endif break; } } @@ -546,20 +558,14 @@ void LLWebRTCImpl::workerDeployDevices() void LLWebRTCImpl::setCaptureDevice(const std::string &id) { - if (mRecordingDevice != id) - { - mRecordingDevice = id; - deployDevices(); - } + mRecordingDevice = id; + deployDevices(); } void LLWebRTCImpl::setRenderDevice(const std::string &id) { - if (mPlayoutDevice != id) - { - mPlayoutDevice = id; - deployDevices(); - } + mPlayoutDevice = id; + deployDevices(); } // updateDevices needs to happen on the worker thread. @@ -609,7 +615,7 @@ void LLWebRTCImpl::updateDevices() void LLWebRTCImpl::OnDevicesUpdated() { - deployDevices(); + updateDevices(); } -- cgit v1.2.3 From 3ff163887d49363e9d2d48ea343fa1c8da19c061 Mon Sep 17 00:00:00 2001 From: Andrey Kleshchev <117672381+akleshchev@users.noreply.github.com> Date: Fri, 19 Sep 2025 21:27:01 +0300 Subject: #4695 Fix missing voice cleanup Cleanup is in LLVoiceClient::terminate() gWebRTCImpl was never deleted Added mDeviceModule security --- indra/llwebrtc/llwebrtc.cpp | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'indra/llwebrtc/llwebrtc.cpp') diff --git a/indra/llwebrtc/llwebrtc.cpp b/indra/llwebrtc/llwebrtc.cpp index 23e1076765..828896f620 100644 --- a/indra/llwebrtc/llwebrtc.cpp +++ b/indra/llwebrtc/llwebrtc.cpp @@ -342,8 +342,11 @@ void LLWebRTCImpl::init() mWorkerThread->PostTask( [this]() { - mDeviceModule->EnableBuiltInAEC(false); - updateDevices(); + if (mDeviceModule) + { + mDeviceModule->EnableBuiltInAEC(false); + updateDevices(); + } }); } @@ -449,6 +452,11 @@ void LLWebRTCImpl::unsetDevicesObserver(LLWebRTCDevicesObserver *observer) // must be run in the worker thread. void LLWebRTCImpl::workerDeployDevices() { + if (!mDeviceModule) + { + return; + } + int16_t recordingDevice = RECORD_DEVICE_DEFAULT; int16_t recording_device_start = 0; @@ -571,6 +579,11 @@ void LLWebRTCImpl::setRenderDevice(const std::string &id) // updateDevices needs to happen on the worker thread. void LLWebRTCImpl::updateDevices() { + if (!mDeviceModule) + { + return; + } + int16_t renderDeviceCount = mDeviceModule->PlayoutDevices(); mPlayoutDeviceList.clear(); @@ -1491,6 +1504,7 @@ void terminate() if (gWebRTCImpl) { gWebRTCImpl->terminate(); + delete gWebRTCImpl; gWebRTCImpl = nullptr; } } -- cgit v1.2.3 From 57a9e51360aebf142bbbdc2663f68ebacfb7d8f5 Mon Sep 17 00:00:00 2001 From: Andrey Kleshchev <117672381+akleshchev@users.noreply.github.com> Date: Mon, 13 Oct 2025 20:16:52 +0300 Subject: #4819 WebRTC crashes after a failed login --- indra/llwebrtc/llwebrtc.cpp | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'indra/llwebrtc/llwebrtc.cpp') diff --git a/indra/llwebrtc/llwebrtc.cpp b/indra/llwebrtc/llwebrtc.cpp index 828896f620..d5182b16e7 100644 --- a/indra/llwebrtc/llwebrtc.cpp +++ b/indra/llwebrtc/llwebrtc.cpp @@ -1495,6 +1495,10 @@ void freePeerConnection(LLWebRTCPeerConnectionInterface* peer_connection) void init(LLWebRTCLogCallback* logCallback) { + if (gWebRTCImpl) + { + return; + } gWebRTCImpl = new LLWebRTCImpl(logCallback); gWebRTCImpl->init(); } -- cgit v1.2.3