summaryrefslogtreecommitdiff
path: root/indra/llwebrtc
diff options
context:
space:
mode:
Diffstat (limited to 'indra/llwebrtc')
-rw-r--r--indra/llwebrtc/CMakeLists.txt61
-rw-r--r--indra/llwebrtc/llwebrtc.cpp1345
-rw-r--r--indra/llwebrtc/llwebrtc.h278
-rw-r--r--indra/llwebrtc/llwebrtc_impl.h382
4 files changed, 2066 insertions, 0 deletions
diff --git a/indra/llwebrtc/CMakeLists.txt b/indra/llwebrtc/CMakeLists.txt
new file mode 100644
index 0000000000..fd1788c5d4
--- /dev/null
+++ b/indra/llwebrtc/CMakeLists.txt
@@ -0,0 +1,61 @@
+# -*- cmake -*-
+
+# some webrtc headers require C++ 20
+set(CMAKE_CXX_STANDARD 20)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+
+include(00-Common)
+include(Linking)
+include(WebRTC)
+
+project(llwebrtc)
+
+if (LINUX)
+ add_compile_options(-Wno-deprecated-declarations) # webrtc::CreateAudioDeviceWithDataObserver is deprecated
+endif (LINUX)
+
+set(llwebrtc_SOURCE_FILES
+ llwebrtc.cpp
+ )
+
+set(llwebrtc_HEADER_FILES
+ CMakeLists.txt
+ llwebrtc.h
+ llwebrtc_impl.h
+ )
+
+list(APPEND llwebrtc_SOURCE_FILES ${llwebrtc_HEADER_FILES})
+
+add_library (llwebrtc SHARED ${llwebrtc_SOURCE_FILES})
+
+set_target_properties(llwebrtc PROPERTIES PUBLIC_HEADER llwebrtc.h)
+
+if (WINDOWS)
+ target_link_libraries(llwebrtc PRIVATE ll::webrtc
+ secur32
+ winmm
+ dmoguids
+ wmcodecdspuuid
+ msdmo
+ strmiids
+ iphlpapi)
+elseif (DARWIN)
+ target_link_libraries(llwebrtc PRIVATE ll::webrtc)
+elseif (LINUX)
+ target_link_libraries(llwebrtc PRIVATE ll::webrtc)
+endif (WINDOWS)
+
+target_include_directories( llwebrtc INTERFACE ${CMAKE_CURRENT_SOURCE_DIR})
+
+if (WINDOWS)
+ set_property(TARGET llwebrtc PROPERTY
+ MSVC_RUNTIME_LIBRARY "MultiThreadedDebug")
+endif (WINDOWS)
+
+ADD_CUSTOM_COMMAND(TARGET llwebrtc POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different
+ $<TARGET_FILE:llwebrtc>
+ ${SHARED_LIB_STAGING_DIR})
+# Add tests
+if (LL_TESTS)
+endif (LL_TESTS)
diff --git a/indra/llwebrtc/llwebrtc.cpp b/indra/llwebrtc/llwebrtc.cpp
new file mode 100644
index 0000000000..d5bd913315
--- /dev/null
+++ b/indra/llwebrtc/llwebrtc.cpp
@@ -0,0 +1,1345 @@
+/**
+ * @file llwebrtc.cpp
+ * @brief WebRTC interface implementation
+ *
+ * $LicenseInfo:firstyear=2023&license=viewerlgpl$
+ * Second Life Viewer Source Code
+ * Copyright (C) 2023, Linden Research, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License only.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
+ * $/LicenseInfo$
+ */
+
+#include "llwebrtc_impl.h"
+#include <algorithm>
+#include <string.h>
+
+#include "api/audio_codecs/audio_decoder_factory.h"
+#include "api/audio_codecs/audio_encoder_factory.h"
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/audio_codecs/builtin_audio_encoder_factory.h"
+#include "api/media_stream_interface.h"
+#include "api/media_stream_track.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_mixer/audio_mixer_impl.h"
+
+namespace llwebrtc
+{
+
+static int16_t PLAYOUT_DEVICE_DEFAULT = -1;
+static int16_t PLAYOUT_DEVICE_BAD = -2;
+static int16_t RECORD_DEVICE_DEFAULT = -1;
+static int16_t RECORD_DEVICE_BAD = -2;
+
+LLAudioDeviceObserver::LLAudioDeviceObserver() : mSumVector {0}, mMicrophoneEnergy(0.0) {}
+
+float LLAudioDeviceObserver::getMicrophoneEnergy() { return mMicrophoneEnergy; }
+
+// TODO: Pull smoothing/filtering code into a common helper function
+// for LLAudioDeviceObserver and LLCustomProcessor
+
+void LLAudioDeviceObserver::OnCaptureData(const void *audio_samples,
+ const size_t num_samples,
+ const size_t bytes_per_sample,
+ const size_t num_channels,
+ const uint32_t samples_per_sec)
+{
+ // calculate the energy
+ float energy = 0;
+ const short *samples = (const short *) audio_samples;
+ for (size_t index = 0; index < num_samples * num_channels; index++)
+ {
+ float sample = (static_cast<float>(samples[index]) / (float) 32767);
+ energy += sample * sample;
+ }
+
+ // smooth it.
+ size_t buffer_size = sizeof(mSumVector) / sizeof(mSumVector[0]);
+ float totalSum = 0;
+ int i;
+ for (i = 0; i < (buffer_size - 1); i++)
+ {
+ mSumVector[i] = mSumVector[i + 1];
+ totalSum += mSumVector[i];
+ }
+ mSumVector[i] = energy;
+ totalSum += energy;
+ mMicrophoneEnergy = std::sqrt(totalSum / (num_samples * buffer_size));
+}
+
+void LLAudioDeviceObserver::OnRenderData(const void *audio_samples,
+ const size_t num_samples,
+ const size_t bytes_per_sample,
+ const size_t num_channels,
+ const uint32_t samples_per_sec)
+{
+}
+
+LLCustomProcessor::LLCustomProcessor() : mSampleRateHz(0), mNumChannels(0), mMicrophoneEnergy(0.0), mGain(1.0)
+{
+ memset(mSumVector, 0, sizeof(mSumVector));
+}
+
+void LLCustomProcessor::Initialize(int sample_rate_hz, int num_channels)
+{
+ mSampleRateHz = sample_rate_hz;
+ mNumChannels = num_channels;
+ memset(mSumVector, 0, sizeof(mSumVector));
+}
+
+void LLCustomProcessor::Process(webrtc::AudioBuffer *audio_in)
+{
+ webrtc::StreamConfig stream_config;
+ stream_config.set_sample_rate_hz(mSampleRateHz);
+ stream_config.set_num_channels(mNumChannels);
+ std::vector<float *> frame;
+ std::vector<float> frame_samples;
+
+ if (audio_in->num_channels() < 1 || audio_in->num_frames() < 480)
+ {
+ return;
+ }
+
+ // grab the input audio
+ frame_samples.resize(stream_config.num_samples());
+ frame.resize(stream_config.num_channels());
+ for (size_t ch = 0; ch < stream_config.num_channels(); ++ch)
+ {
+ frame[ch] = &(frame_samples)[ch * stream_config.num_frames()];
+ }
+
+ audio_in->CopyTo(stream_config, &frame[0]);
+
+ // calculate the energy
+ float energy = 0;
+ for (size_t index = 0; index < stream_config.num_samples(); index++)
+ {
+ float sample = frame_samples[index];
+ sample = sample * mGain; // apply gain
+ frame_samples[index] = sample; // write processed sample back to buffer.
+ energy += sample * sample;
+ }
+
+ audio_in->CopyFrom(&frame[0], stream_config);
+
+ // smooth it.
+ size_t buffer_size = sizeof(mSumVector) / sizeof(mSumVector[0]);
+ float totalSum = 0;
+ int i;
+ for (i = 0; i < (buffer_size - 1); i++)
+ {
+ mSumVector[i] = mSumVector[i + 1];
+ totalSum += mSumVector[i];
+ }
+ mSumVector[i] = energy;
+ totalSum += energy;
+ mMicrophoneEnergy = std::sqrt(totalSum / (stream_config.num_samples() * buffer_size));
+}
+
+//
+// LLWebRTCImpl implementation
+//
+
+LLWebRTCImpl::LLWebRTCImpl() :
+ mPeerCustomProcessor(nullptr),
+ mMute(true),
+ mTuningMode(false),
+ mPlayoutDevice(0),
+ mRecordingDevice(0),
+ mTuningAudioDeviceObserver(nullptr)
+{
+}
+
+void LLWebRTCImpl::init()
+{
+ mPlayoutDevice = 0;
+ mRecordingDevice = 0;
+ rtc::InitializeSSL();
+
+ // Normal logging is rather spammy, so turn it off.
+ rtc::LogMessage::LogToDebug(rtc::LS_NONE);
+ rtc::LogMessage::SetLogToStderr(true);
+
+ mTaskQueueFactory = webrtc::CreateDefaultTaskQueueFactory();
+
+ // Create the native threads.
+ mNetworkThread = rtc::Thread::CreateWithSocketServer();
+ mNetworkThread->SetName("WebRTCNetworkThread", nullptr);
+ mNetworkThread->Start();
+ mWorkerThread = rtc::Thread::Create();
+ mWorkerThread->SetName("WebRTCWorkerThread", nullptr);
+ mWorkerThread->Start();
+ mSignalingThread = rtc::Thread::Create();
+ mSignalingThread->SetName("WebRTCSignalingThread", nullptr);
+ mSignalingThread->Start();
+
+ mTuningAudioDeviceObserver = new LLAudioDeviceObserver;
+ mWorkerThread->PostTask(
+ [this]()
+ {
+ // Initialize the audio devices on the Worker Thread
+ mTuningDeviceModule =
+ webrtc::CreateAudioDeviceWithDataObserver(webrtc::AudioDeviceModule::AudioLayer::kPlatformDefaultAudio,
+ mTaskQueueFactory.get(),
+ std::unique_ptr<webrtc::AudioDeviceDataObserver>(mTuningAudioDeviceObserver));
+
+ mTuningDeviceModule->Init();
+ mTuningDeviceModule->SetPlayoutDevice(mPlayoutDevice);
+ mTuningDeviceModule->SetRecordingDevice(mRecordingDevice);
+ mTuningDeviceModule->EnableBuiltInAEC(false);
+ mTuningDeviceModule->SetAudioDeviceSink(this);
+ mTuningDeviceModule->InitMicrophone();
+ mTuningDeviceModule->InitSpeaker();
+ mTuningDeviceModule->InitRecording();
+ mTuningDeviceModule->InitPlayout();
+ mTuningDeviceModule->SetStereoRecording(true);
+ mTuningDeviceModule->SetStereoPlayout(true);
+ updateDevices();
+ });
+
+ mWorkerThread->BlockingCall(
+ [this]()
+ {
+ // the peer device module doesn't need an observer
+ // as we pull peer data after audio processing.
+ mPeerDeviceModule = webrtc::CreateAudioDeviceWithDataObserver(webrtc::AudioDeviceModule::AudioLayer::kPlatformDefaultAudio,
+ mTaskQueueFactory.get(),
+ nullptr);
+ mPeerDeviceModule->Init();
+ mPeerDeviceModule->SetPlayoutDevice(mPlayoutDevice);
+ mPeerDeviceModule->SetRecordingDevice(mRecordingDevice);
+ mPeerDeviceModule->EnableBuiltInAEC(false);
+ mPeerDeviceModule->InitMicrophone();
+ mPeerDeviceModule->InitSpeaker();
+ mPeerDeviceModule->InitRecording();
+ mPeerDeviceModule->InitPlayout();
+ mPeerDeviceModule->SetStereoRecording(true);
+ mPeerDeviceModule->SetStereoPlayout(true);
+ });
+
+ // The custom processor allows us to retrieve audio data (and levels)
+ // from after other audio processing such as AEC, AGC, etc.
+ mPeerCustomProcessor = new LLCustomProcessor;
+ webrtc::AudioProcessingBuilder apb;
+ apb.SetCapturePostProcessing(std::unique_ptr<webrtc::CustomProcessing>(mPeerCustomProcessor));
+ mAudioProcessingModule = apb.Create();
+
+ webrtc::AudioProcessing::Config apm_config;
+ apm_config.echo_canceller.enabled = false;
+ apm_config.echo_canceller.mobile_mode = false;
+ apm_config.gain_controller1.enabled = false;
+ apm_config.gain_controller1.mode = webrtc::AudioProcessing::Config::GainController1::kAdaptiveAnalog;
+ apm_config.gain_controller2.enabled = false;
+ apm_config.high_pass_filter.enabled = true;
+ apm_config.noise_suppression.enabled = true;
+ apm_config.noise_suppression.level = webrtc::AudioProcessing::Config::NoiseSuppression::kVeryHigh;
+ apm_config.transient_suppression.enabled = true;
+ apm_config.pipeline.multi_channel_render = true;
+ apm_config.pipeline.multi_channel_capture = true;
+ apm_config.pipeline.multi_channel_capture = true;
+
+ webrtc::ProcessingConfig processing_config;
+ processing_config.input_stream().set_num_channels(2);
+ processing_config.input_stream().set_sample_rate_hz(48000);
+ processing_config.output_stream().set_num_channels(2);
+ processing_config.output_stream().set_sample_rate_hz(48000);
+ processing_config.reverse_input_stream().set_num_channels(2);
+ processing_config.reverse_input_stream().set_sample_rate_hz(48000);
+ processing_config.reverse_output_stream().set_num_channels(2);
+ processing_config.reverse_output_stream().set_sample_rate_hz(48000);
+
+ mAudioProcessingModule->ApplyConfig(apm_config);
+ mAudioProcessingModule->Initialize(processing_config);
+
+
+ mPeerConnectionFactory = webrtc::CreatePeerConnectionFactory(mNetworkThread.get(),
+ mWorkerThread.get(),
+ mSignalingThread.get(),
+ mPeerDeviceModule,
+ webrtc::CreateBuiltinAudioEncoderFactory(),
+ webrtc::CreateBuiltinAudioDecoderFactory(),
+ nullptr /* video_encoder_factory */,
+ nullptr /* video_decoder_factory */,
+ nullptr /* audio_mixer */,
+ mAudioProcessingModule);
+
+ mWorkerThread->BlockingCall([this]() { mPeerDeviceModule->StartPlayout(); });
+}
+
+void LLWebRTCImpl::terminate()
+{
+ for (auto &connection : mPeerConnections)
+ {
+ connection->terminate();
+ }
+
+ // connection->terminate() above spawns a number of Signaling thread calls to
+ // shut down the connection. The following Blocking Call will wait
+ // until they're done before it's executed, allowing time to clean up.
+
+ mSignalingThread->BlockingCall([this]() { mPeerConnectionFactory = nullptr; });
+
+ mPeerConnections.clear();
+
+ mWorkerThread->BlockingCall(
+ [this]()
+ {
+ if (mTuningDeviceModule)
+ {
+ mTuningDeviceModule->StopRecording();
+ mTuningDeviceModule->Terminate();
+ }
+ if (mPeerDeviceModule)
+ {
+ mPeerDeviceModule->StopRecording();
+ mPeerDeviceModule->Terminate();
+ }
+ mTuningDeviceModule = nullptr;
+ mPeerDeviceModule = nullptr;
+ mTaskQueueFactory = nullptr;
+ });
+}
+
+//
+// Devices functions
+//
+// Most device-related functionality needs to happen
+// on the worker thread (the audio thread,) so those calls will be
+// proxied over to that thread.
+//
+void LLWebRTCImpl::setRecording(bool recording)
+{
+ mWorkerThread->PostTask(
+ [this, recording]()
+ {
+ if (recording)
+ {
+ mPeerDeviceModule->StartRecording();
+ }
+ else
+ {
+ mPeerDeviceModule->StopRecording();
+ }
+ });
+}
+
+void LLWebRTCImpl::setAudioConfig(LLWebRTCDeviceInterface::AudioConfig config)
+{
+ webrtc::AudioProcessing::Config apm_config;
+ apm_config.echo_canceller.enabled = config.mEchoCancellation;
+ apm_config.echo_canceller.mobile_mode = false;
+ apm_config.gain_controller1.enabled = config.mAGC;
+ apm_config.gain_controller1.mode = webrtc::AudioProcessing::Config::GainController1::kAdaptiveAnalog;
+ apm_config.gain_controller2.enabled = false;
+ apm_config.high_pass_filter.enabled = true;
+ apm_config.transient_suppression.enabled = true;
+ apm_config.pipeline.multi_channel_render = true;
+ apm_config.pipeline.multi_channel_capture = true;
+ apm_config.pipeline.multi_channel_capture = true;
+
+ switch (config.mNoiseSuppressionLevel)
+ {
+ case LLWebRTCDeviceInterface::AudioConfig::NOISE_SUPPRESSION_LEVEL_NONE:
+ apm_config.noise_suppression.enabled = false;
+ apm_config.noise_suppression.level = webrtc::AudioProcessing::Config::NoiseSuppression::kLow;
+ break;
+ case LLWebRTCDeviceInterface::AudioConfig::NOISE_SUPPRESSION_LEVEL_LOW:
+ apm_config.noise_suppression.enabled = true;
+ apm_config.noise_suppression.level = webrtc::AudioProcessing::Config::NoiseSuppression::kLow;
+ break;
+ case LLWebRTCDeviceInterface::AudioConfig::NOISE_SUPPRESSION_LEVEL_MODERATE:
+ apm_config.noise_suppression.enabled = true;
+ apm_config.noise_suppression.level = webrtc::AudioProcessing::Config::NoiseSuppression::kModerate;
+ break;
+ case LLWebRTCDeviceInterface::AudioConfig::NOISE_SUPPRESSION_LEVEL_HIGH:
+ apm_config.noise_suppression.enabled = true;
+ apm_config.noise_suppression.level = webrtc::AudioProcessing::Config::NoiseSuppression::kHigh;
+ break;
+ case LLWebRTCDeviceInterface::AudioConfig::NOISE_SUPPRESSION_LEVEL_VERY_HIGH:
+ apm_config.noise_suppression.enabled = true;
+ apm_config.noise_suppression.level = webrtc::AudioProcessing::Config::NoiseSuppression::kVeryHigh;
+ break;
+ default:
+ apm_config.noise_suppression.enabled = false;
+ apm_config.noise_suppression.level = webrtc::AudioProcessing::Config::NoiseSuppression::kLow;
+ }
+ mAudioProcessingModule->ApplyConfig(apm_config);
+}
+
+void LLWebRTCImpl::refreshDevices()
+{
+ mWorkerThread->PostTask([this]() { updateDevices(); });
+}
+
+void LLWebRTCImpl::setDevicesObserver(LLWebRTCDevicesObserver *observer) { mVoiceDevicesObserverList.emplace_back(observer); }
+
+void LLWebRTCImpl::unsetDevicesObserver(LLWebRTCDevicesObserver *observer)
+{
+ std::vector<LLWebRTCDevicesObserver *>::iterator it =
+ std::find(mVoiceDevicesObserverList.begin(), mVoiceDevicesObserverList.end(), observer);
+ if (it != mVoiceDevicesObserverList.end())
+ {
+ mVoiceDevicesObserverList.erase(it);
+ }
+}
+
+void ll_set_device_module_capture_device(rtc::scoped_refptr<webrtc::AudioDeviceModule> device_module, int16_t device)
+{
+ device_module->StopRecording();
+#if WEBRTC_WIN
+ if (device < 0)
+ {
+ device_module->SetRecordingDevice(webrtc::AudioDeviceModule::kDefaultDevice);
+ }
+ else
+ {
+ device_module->SetRecordingDevice(device);
+ }
+#else
+ // passed in default is -1, but the device list
+ // has it at 0
+ device_module->SetRecordingDevice(device + 1);
+#endif
+ device_module->InitMicrophone();
+ device_module->InitRecording();
+ device_module->SetStereoRecording(false);
+ device_module->StartRecording();
+}
+
+void LLWebRTCImpl::setCaptureDevice(const std::string &id)
+{
+ int16_t recordingDevice = RECORD_DEVICE_DEFAULT;
+ if (id != "Default")
+ {
+ for (int16_t i = 0; i < mRecordingDeviceList.size(); i++)
+ {
+ if (mRecordingDeviceList[i].mID == id)
+ {
+ recordingDevice = i;
+ break;
+ }
+ }
+ }
+ if (recordingDevice == mRecordingDevice)
+ {
+ return;
+ }
+ mRecordingDevice = recordingDevice;
+ if (mTuningMode)
+ {
+ mWorkerThread->PostTask([this, recordingDevice]() { ll_set_device_module_capture_device(mTuningDeviceModule, recordingDevice); });
+ }
+ else
+ {
+ mWorkerThread->PostTask([this, recordingDevice]() { ll_set_device_module_capture_device(mPeerDeviceModule, recordingDevice); });
+ }
+}
+
+
+void ll_set_device_module_render_device(rtc::scoped_refptr<webrtc::AudioDeviceModule> device_module, int16_t device)
+{
+ device_module->StopPlayout();
+#if WEBRTC_WIN
+ if (device < 0)
+ {
+ device_module->SetPlayoutDevice(webrtc::AudioDeviceModule::kDefaultDevice);
+ }
+ else
+ {
+ device_module->SetPlayoutDevice(device);
+ }
+#else
+ device_module->SetPlayoutDevice(device + 1);
+#endif
+ device_module->InitSpeaker();
+ device_module->InitPlayout();
+ device_module->SetStereoPlayout(true);
+}
+
+void LLWebRTCImpl::setRenderDevice(const std::string &id)
+{
+ int16_t playoutDevice = PLAYOUT_DEVICE_DEFAULT;
+ if (id != "Default")
+ {
+ for (int16_t i = 0; i < mPlayoutDeviceList.size(); i++)
+ {
+ if (mPlayoutDeviceList[i].mID == id)
+ {
+ playoutDevice = i;
+ break;
+ }
+ }
+ }
+ if (playoutDevice == mPlayoutDevice)
+ {
+ return;
+ }
+ mPlayoutDevice = playoutDevice;
+
+ if (mTuningMode)
+ {
+ mWorkerThread->PostTask(
+ [this, playoutDevice]()
+ {
+ ll_set_device_module_render_device(mTuningDeviceModule, playoutDevice);
+ });
+ }
+ else
+ {
+ mWorkerThread->PostTask(
+ [this, playoutDevice]()
+ {
+ ll_set_device_module_render_device(mPeerDeviceModule, playoutDevice);
+ mPeerDeviceModule->StartPlayout();
+ });
+ }
+}
+
+// updateDevices needs to happen on the worker thread.
+void LLWebRTCImpl::updateDevices()
+{
+ int16_t renderDeviceCount = mTuningDeviceModule->PlayoutDevices();
+
+ mPlayoutDeviceList.clear();
+#if WEBRTC_WIN
+ int16_t index = 0;
+#else
+ // index zero is always "Default" for darwin/linux,
+ // which is a special case, so skip it.
+ int16_t index = 1;
+#endif
+ for (; index < renderDeviceCount; index++)
+ {
+ char name[webrtc::kAdmMaxDeviceNameSize];
+ char guid[webrtc::kAdmMaxGuidSize];
+ mTuningDeviceModule->PlayoutDeviceName(index, name, guid);
+ mPlayoutDeviceList.emplace_back(name, guid);
+ }
+
+ int16_t captureDeviceCount = mTuningDeviceModule->RecordingDevices();
+
+ mRecordingDeviceList.clear();
+#if WEBRTC_WIN
+ index = 0;
+#else
+ // index zero is always "Default" for darwin/linux,
+ // which is a special case, so skip it.
+ index = 1;
+#endif
+ for (; index < captureDeviceCount; index++)
+ {
+ char name[webrtc::kAdmMaxDeviceNameSize];
+ char guid[webrtc::kAdmMaxGuidSize];
+ mTuningDeviceModule->RecordingDeviceName(index, name, guid);
+ mRecordingDeviceList.emplace_back(name, guid);
+ }
+
+ for (auto &observer : mVoiceDevicesObserverList)
+ {
+ observer->OnDevicesChanged(mPlayoutDeviceList, mRecordingDeviceList);
+ }
+}
+
+void LLWebRTCImpl::OnDevicesUpdated()
+{
+ // reset these to a bad value so an update is forced
+ mRecordingDevice = RECORD_DEVICE_BAD;
+ mPlayoutDevice = PLAYOUT_DEVICE_BAD;
+
+ updateDevices();
+}
+
+
+void LLWebRTCImpl::setTuningMode(bool enable)
+{
+ mTuningMode = enable;
+ mWorkerThread->PostTask(
+ [this, enable] {
+ if (enable)
+ {
+ mPeerDeviceModule->StopRecording();
+ mPeerDeviceModule->StopPlayout();
+ ll_set_device_module_render_device(mTuningDeviceModule, mPlayoutDevice);
+ ll_set_device_module_capture_device(mTuningDeviceModule, mRecordingDevice);
+ mTuningDeviceModule->InitPlayout();
+ mTuningDeviceModule->InitRecording();
+ mTuningDeviceModule->StartRecording();
+ // TODO: Starting Playout on the TDM appears to create an audio artifact (click)
+ // in this case, so disabling it for now. We may have to do something different
+ // if we enable 'echo playback' via the TDM when tuning.
+ //mTuningDeviceModule->StartPlayout();
+ }
+ else
+ {
+ mTuningDeviceModule->StopRecording();
+ //mTuningDeviceModule->StopPlayout();
+ ll_set_device_module_render_device(mPeerDeviceModule, mPlayoutDevice);
+ ll_set_device_module_capture_device(mPeerDeviceModule, mRecordingDevice);
+ mPeerDeviceModule->InitPlayout();
+ mPeerDeviceModule->InitRecording();
+ mPeerDeviceModule->StartPlayout();
+ mPeerDeviceModule->StartRecording();
+ }
+ }
+ );
+ mSignalingThread->PostTask(
+ [this, enable]
+ {
+ for (auto &connection : mPeerConnections)
+ {
+ if (enable)
+ {
+ connection->enableSenderTracks(false);
+ }
+ else
+ {
+ connection->resetMute();
+ }
+ connection->enableReceiverTracks(!enable);
+ }
+ });
+}
+
+float LLWebRTCImpl::getTuningAudioLevel() { return -20 * log10f(mTuningAudioDeviceObserver->getMicrophoneEnergy()); }
+
+float LLWebRTCImpl::getPeerConnectionAudioLevel() { return -20 * log10f(mPeerCustomProcessor->getMicrophoneEnergy()); }
+
+void LLWebRTCImpl::setPeerConnectionGain(float gain) { mPeerCustomProcessor->setGain(gain); }
+
+
+//
+// Peer Connection Helpers
+//
+
+LLWebRTCPeerConnectionInterface *LLWebRTCImpl::newPeerConnection()
+{
+ rtc::scoped_refptr<LLWebRTCPeerConnectionImpl> peerConnection = rtc::scoped_refptr<LLWebRTCPeerConnectionImpl>(new rtc::RefCountedObject<LLWebRTCPeerConnectionImpl>());
+ peerConnection->init(this);
+
+ mPeerConnections.emplace_back(peerConnection);
+ peerConnection->enableSenderTracks(!mMute);
+ return peerConnection.get();
+}
+
+void LLWebRTCImpl::freePeerConnection(LLWebRTCPeerConnectionInterface* peer_connection)
+{
+ std::vector<rtc::scoped_refptr<LLWebRTCPeerConnectionImpl>>::iterator it =
+ std::find(mPeerConnections.begin(), mPeerConnections.end(), peer_connection);
+ if (it != mPeerConnections.end())
+ {
+ mPeerConnections.erase(it);
+ }
+ if (mPeerConnections.empty())
+ {
+ setRecording(false);
+ }
+}
+
+
+//
+// LLWebRTCPeerConnectionImpl implementation.
+//
+// Most peer connection (signaling) happens on
+// the signaling thread.
+
+LLWebRTCPeerConnectionImpl::LLWebRTCPeerConnectionImpl() :
+ mWebRTCImpl(nullptr),
+ mPeerConnection(nullptr),
+ mMute(false),
+ mAnswerReceived(false)
+{
+}
+
+LLWebRTCPeerConnectionImpl::~LLWebRTCPeerConnectionImpl()
+{
+ mSignalingObserverList.clear();
+ mDataObserverList.clear();
+}
+
+//
+// LLWebRTCPeerConnection interface
+//
+
+void LLWebRTCPeerConnectionImpl::init(LLWebRTCImpl * webrtc_impl)
+{
+ mWebRTCImpl = webrtc_impl;
+ mPeerConnectionFactory = mWebRTCImpl->getPeerConnectionFactory();
+}
+void LLWebRTCPeerConnectionImpl::terminate()
+{
+ mWebRTCImpl->PostSignalingTask(
+ [=]()
+ {
+ if (mPeerConnection)
+ {
+ if (mDataChannel)
+ {
+ {
+ mDataChannel->Close();
+ mDataChannel = nullptr;
+ }
+ }
+
+ mPeerConnection->Close();
+ if (mLocalStream)
+ {
+ auto tracks = mLocalStream->GetAudioTracks();
+ for (auto& track : tracks)
+ {
+ mLocalStream->RemoveTrack(track);
+ }
+ mLocalStream = nullptr;
+ }
+ mPeerConnection = nullptr;
+
+ for (auto &observer : mSignalingObserverList)
+ {
+ observer->OnPeerConnectionClosed();
+ }
+ }
+ });
+}
+
+void LLWebRTCPeerConnectionImpl::setSignalingObserver(LLWebRTCSignalingObserver *observer) { mSignalingObserverList.emplace_back(observer); }
+
+void LLWebRTCPeerConnectionImpl::unsetSignalingObserver(LLWebRTCSignalingObserver *observer)
+{
+ std::vector<LLWebRTCSignalingObserver *>::iterator it =
+ std::find(mSignalingObserverList.begin(), mSignalingObserverList.end(), observer);
+ if (it != mSignalingObserverList.end())
+ {
+ mSignalingObserverList.erase(it);
+ }
+}
+
+
+bool LLWebRTCPeerConnectionImpl::initializeConnection(const LLWebRTCPeerConnectionInterface::InitOptions& options)
+{
+ RTC_DCHECK(!mPeerConnection);
+ mAnswerReceived = false;
+
+ mWebRTCImpl->PostSignalingTask(
+ [this,options]()
+ {
+ webrtc::PeerConnectionInterface::RTCConfiguration config;
+ for (auto server : options.mServers)
+ {
+ webrtc::PeerConnectionInterface::IceServer ice_server;
+ for (auto url : server.mUrls)
+ {
+ ice_server.urls.push_back(url);
+ }
+ ice_server.username = server.mUserName;
+ ice_server.password = server.mPassword;
+ config.servers.push_back(ice_server);
+ }
+ config.sdp_semantics = webrtc::SdpSemantics::kUnifiedPlan;
+
+ config.set_min_port(60000);
+ config.set_max_port(60100);
+
+ webrtc::PeerConnectionDependencies pc_dependencies(this);
+ auto error_or_peer_connection = mPeerConnectionFactory->CreatePeerConnectionOrError(config, std::move(pc_dependencies));
+ if (error_or_peer_connection.ok())
+ {
+ mPeerConnection = std::move(error_or_peer_connection.value());
+ }
+ else
+ {
+ RTC_LOG(LS_ERROR) << __FUNCTION__ << "Error creating peer connection: " << error_or_peer_connection.error().message();
+ for (auto &observer : mSignalingObserverList)
+ {
+ observer->OnRenegotiationNeeded();
+ }
+ return;
+ }
+
+ webrtc::DataChannelInit init;
+ init.ordered = true;
+
+ auto data_channel_or_error = mPeerConnection->CreateDataChannelOrError("SLData", &init);
+ if (data_channel_or_error.ok())
+ {
+ mDataChannel = std::move(data_channel_or_error.value());
+
+ mDataChannel->RegisterObserver(this);
+ }
+
+ cricket::AudioOptions audioOptions;
+ audioOptions.auto_gain_control = true;
+ audioOptions.echo_cancellation = true;
+ audioOptions.noise_suppression = true;
+
+ mLocalStream = mPeerConnectionFactory->CreateLocalMediaStream("SLStream");
+
+ rtc::scoped_refptr<webrtc::AudioTrackInterface> audio_track(
+ mPeerConnectionFactory->CreateAudioTrack("SLAudio", mPeerConnectionFactory->CreateAudioSource(audioOptions).get()));
+ audio_track->set_enabled(false);
+ mLocalStream->AddTrack(audio_track);
+
+ mPeerConnection->AddTrack(audio_track, {"SLStream"});
+
+ auto senders = mPeerConnection->GetSenders();
+
+ for (auto &sender : senders)
+ {
+ webrtc::RtpParameters params;
+ webrtc::RtpCodecParameters codecparam;
+ codecparam.name = "opus";
+ codecparam.kind = cricket::MEDIA_TYPE_AUDIO;
+ codecparam.clock_rate = 48000;
+ codecparam.num_channels = 2;
+ codecparam.parameters["stereo"] = "1";
+ codecparam.parameters["sprop-stereo"] = "1";
+ params.codecs.push_back(codecparam);
+ sender->SetParameters(params);
+ }
+
+ auto receivers = mPeerConnection->GetReceivers();
+ for (auto &receiver : receivers)
+ {
+ webrtc::RtpParameters params;
+ webrtc::RtpCodecParameters codecparam;
+ codecparam.name = "opus";
+ codecparam.kind = cricket::MEDIA_TYPE_AUDIO;
+ codecparam.clock_rate = 48000;
+ codecparam.num_channels = 2;
+ codecparam.parameters["stereo"] = "1";
+ codecparam.parameters["sprop-stereo"] = "1";
+ params.codecs.push_back(codecparam);
+ receiver->SetParameters(params);
+ }
+
+ webrtc::PeerConnectionInterface::RTCOfferAnswerOptions offerOptions;
+ mPeerConnection->CreateOffer(this, offerOptions);
+ });
+
+ return true;
+}
+
+bool LLWebRTCPeerConnectionImpl::shutdownConnection()
+{
+ terminate();
+ return true;
+}
+
+void LLWebRTCPeerConnectionImpl::enableSenderTracks(bool enable)
+{
+ // set_enabled shouldn't be done on the worker thread.
+ if (mPeerConnection)
+ {
+ auto senders = mPeerConnection->GetSenders();
+ for (auto &sender : senders)
+ {
+ sender->track()->set_enabled(enable);
+ }
+ }
+}
+
+void LLWebRTCPeerConnectionImpl::enableReceiverTracks(bool enable)
+{
+ // set_enabled shouldn't be done on the worker thread
+ if (mPeerConnection)
+ {
+ auto receivers = mPeerConnection->GetReceivers();
+ for (auto &receiver : receivers)
+ {
+ receiver->track()->set_enabled(enable);
+ }
+ }
+}
+
+// Tell the peer connection that we've received a SDP answer from the sim.
+void LLWebRTCPeerConnectionImpl::AnswerAvailable(const std::string &sdp)
+{
+ RTC_LOG(LS_INFO) << __FUNCTION__ << " Remote SDP: " << sdp;
+
+ mWebRTCImpl->PostSignalingTask(
+ [this, sdp]()
+ {
+ if (mPeerConnection)
+ {
+ RTC_LOG(LS_INFO) << __FUNCTION__ << " " << mPeerConnection->peer_connection_state();
+ mPeerConnection->SetRemoteDescription(webrtc::CreateSessionDescription(webrtc::SdpType::kAnswer, sdp),
+ rtc::scoped_refptr<webrtc::SetRemoteDescriptionObserverInterface>(this));
+ }
+ });
+}
+
+
+//
+// LLWebRTCAudioInterface implementation
+//
+
+void LLWebRTCPeerConnectionImpl::setMute(bool mute)
+{
+ mMute = mute;
+ mWebRTCImpl->PostSignalingTask(
+ [this]()
+ {
+ if (mPeerConnection)
+ {
+ auto senders = mPeerConnection->GetSenders();
+
+ RTC_LOG(LS_INFO) << __FUNCTION__ << (mMute ? "disabling" : "enabling") << " streams count " << senders.size();
+ for (auto &sender : senders)
+ {
+ auto track = sender->track();
+ if (track)
+ {
+ track->set_enabled(!mMute);
+ }
+ }
+ }
+ });
+}
+
+void LLWebRTCPeerConnectionImpl::resetMute()
+{
+ setMute(mMute);
+}
+
+void LLWebRTCPeerConnectionImpl::setReceiveVolume(float volume)
+{
+ mWebRTCImpl->PostSignalingTask(
+ [this, volume]()
+ {
+ if (mPeerConnection)
+ {
+ auto receivers = mPeerConnection->GetReceivers();
+
+ for (auto &receiver : receivers)
+ {
+ for (auto &stream : receiver->streams())
+ {
+ for (auto &track : stream->GetAudioTracks())
+ {
+ track->GetSource()->SetVolume(volume);
+ }
+ }
+ }
+ }
+ });
+}
+
+void LLWebRTCPeerConnectionImpl::setSendVolume(float volume)
+{
+ mWebRTCImpl->PostSignalingTask(
+ [this, volume]()
+ {
+ if (mLocalStream)
+ {
+ for (auto &track : mLocalStream->GetAudioTracks())
+ {
+ track->GetSource()->SetVolume(volume*5.0);
+ }
+ }
+ });
+}
+
+//
+// PeerConnectionObserver implementation.
+//
+
+void LLWebRTCPeerConnectionImpl::OnAddTrack(rtc::scoped_refptr<webrtc::RtpReceiverInterface> receiver,
+ const std::vector<rtc::scoped_refptr<webrtc::MediaStreamInterface>> &streams)
+{
+ RTC_LOG(LS_INFO) << __FUNCTION__ << " " << receiver->id();
+ webrtc::RtpParameters params;
+ webrtc::RtpCodecParameters codecparam;
+ codecparam.name = "opus";
+ codecparam.kind = cricket::MEDIA_TYPE_AUDIO;
+ codecparam.clock_rate = 48000;
+ codecparam.num_channels = 2;
+ codecparam.parameters["stereo"] = "1";
+ codecparam.parameters["sprop-stereo"] = "1";
+ params.codecs.push_back(codecparam);
+ receiver->SetParameters(params);
+}
+
+void LLWebRTCPeerConnectionImpl::OnRemoveTrack(rtc::scoped_refptr<webrtc::RtpReceiverInterface> receiver)
+{
+ RTC_LOG(LS_INFO) << __FUNCTION__ << " " << receiver->id();
+}
+
+void LLWebRTCPeerConnectionImpl::OnDataChannel(rtc::scoped_refptr<webrtc::DataChannelInterface> channel)
+{
+ if (mDataChannel)
+ {
+ mDataChannel->UnregisterObserver();
+ }
+ mDataChannel = channel;
+ channel->RegisterObserver(this);
+}
+
+void LLWebRTCPeerConnectionImpl::OnIceGatheringChange(webrtc::PeerConnectionInterface::IceGatheringState new_state)
+{
+ LLWebRTCSignalingObserver::EIceGatheringState webrtc_new_state = LLWebRTCSignalingObserver::EIceGatheringState::ICE_GATHERING_NEW;
+ switch (new_state)
+ {
+ case webrtc::PeerConnectionInterface::IceGatheringState::kIceGatheringNew:
+ webrtc_new_state = LLWebRTCSignalingObserver::EIceGatheringState::ICE_GATHERING_NEW;
+ break;
+ case webrtc::PeerConnectionInterface::IceGatheringState::kIceGatheringGathering:
+ webrtc_new_state = LLWebRTCSignalingObserver::EIceGatheringState::ICE_GATHERING_GATHERING;
+ break;
+ case webrtc::PeerConnectionInterface::IceGatheringState::kIceGatheringComplete:
+ webrtc_new_state = LLWebRTCSignalingObserver::EIceGatheringState::ICE_GATHERING_COMPLETE;
+ break;
+ default:
+ RTC_LOG(LS_ERROR) << __FUNCTION__ << " Bad Ice Gathering State" << new_state;
+ webrtc_new_state = LLWebRTCSignalingObserver::EIceGatheringState::ICE_GATHERING_NEW;
+ return;
+ }
+
+ if (mAnswerReceived)
+ {
+ for (auto &observer : mSignalingObserverList)
+ {
+ observer->OnIceGatheringState(webrtc_new_state);
+ }
+ }
+}
+
+// Called any time the PeerConnectionState changes.
+void LLWebRTCPeerConnectionImpl::OnConnectionChange(webrtc::PeerConnectionInterface::PeerConnectionState new_state)
+{
+ RTC_LOG(LS_ERROR) << __FUNCTION__ << " Peer Connection State Change " << new_state;
+
+ switch (new_state)
+ {
+ case webrtc::PeerConnectionInterface::PeerConnectionState::kConnected:
+ {
+ mWebRTCImpl->PostWorkerTask([this]() {
+ for (auto &observer : mSignalingObserverList)
+ {
+ observer->OnAudioEstablished(this);
+ }
+ });
+ break;
+ }
+ case webrtc::PeerConnectionInterface::PeerConnectionState::kFailed:
+ case webrtc::PeerConnectionInterface::PeerConnectionState::kDisconnected:
+ {
+ for (auto &observer : mSignalingObserverList)
+ {
+ observer->OnRenegotiationNeeded();
+ }
+
+ break;
+ }
+ default:
+ {
+ break;
+ }
+ }
+}
+
+// Convert an ICE candidate into a string appropriate for trickling
+// to the Secondlife WebRTC server via the sim.
+static std::string iceCandidateToTrickleString(const webrtc::IceCandidateInterface *candidate)
+{
+ std::ostringstream candidate_stream;
+
+ candidate_stream <<
+ candidate->candidate().foundation() << " " <<
+ std::to_string(candidate->candidate().component()) << " " <<
+ candidate->candidate().protocol() << " " <<
+ std::to_string(candidate->candidate().priority()) << " " <<
+ candidate->candidate().address().ipaddr().ToString() << " " <<
+ candidate->candidate().address().PortAsString() << " typ ";
+
+ if (candidate->candidate().type() == cricket::LOCAL_PORT_TYPE)
+ {
+ candidate_stream << "host";
+ }
+ else if (candidate->candidate().type() == cricket::STUN_PORT_TYPE)
+ {
+ candidate_stream << "srflx " <<
+ "raddr " << candidate->candidate().related_address().ipaddr().ToString() << " " <<
+ "rport " << candidate->candidate().related_address().PortAsString();
+ }
+ else if (candidate->candidate().type() == cricket::RELAY_PORT_TYPE)
+ {
+ candidate_stream << "relay " <<
+ "raddr " << candidate->candidate().related_address().ipaddr().ToString() << " " <<
+ "rport " << candidate->candidate().related_address().PortAsString();
+ }
+ else if (candidate->candidate().type() == cricket::PRFLX_PORT_TYPE)
+ {
+ candidate_stream << "prflx " <<
+ "raddr " << candidate->candidate().related_address().ipaddr().ToString() << " " <<
+ "rport " << candidate->candidate().related_address().PortAsString();
+ }
+ else {
+ RTC_LOG(LS_ERROR) << __FUNCTION__ << " Unknown candidate type " << candidate->candidate().type();
+ }
+ if (candidate->candidate().protocol() == "tcp")
+ {
+ candidate_stream << " tcptype " << candidate->candidate().tcptype();
+ }
+
+ return candidate_stream.str();
+}
+
+// The webrtc library has a new ice candidate.
+void LLWebRTCPeerConnectionImpl::OnIceCandidate(const webrtc::IceCandidateInterface *candidate)
+{
+ RTC_LOG(LS_INFO) << __FUNCTION__ << " " << candidate->sdp_mline_index();
+
+ if (!candidate)
+ {
+ RTC_LOG(LS_ERROR) << __FUNCTION__ << " No Ice Candidate Given";
+ return;
+ }
+ if (mAnswerReceived)
+ {
+ // We've already received an answer SDP from the Secondlife WebRTC server
+ // so simply tell observers about our new ice candidate.
+ for (auto &observer : mSignalingObserverList)
+ {
+ LLWebRTCIceCandidate ice_candidate;
+ ice_candidate.mCandidate = iceCandidateToTrickleString(candidate);
+ ice_candidate.mMLineIndex = candidate->sdp_mline_index();
+ ice_candidate.mSdpMid = candidate->sdp_mid();
+ observer->OnIceCandidate(ice_candidate);
+ }
+ }
+ else
+ {
+ // As we've not yet received our answer, cache the candidate.
+ mCachedIceCandidates.push_back(
+ webrtc::CreateIceCandidate(candidate->sdp_mid(),
+ candidate->sdp_mline_index(),
+ candidate->candidate()));
+ }
+}
+
+//
+// CreateSessionDescriptionObserver implementation.
+//
+void LLWebRTCPeerConnectionImpl::OnSuccess(webrtc::SessionDescriptionInterface *desc)
+{
+ std::string sdp;
+ desc->ToString(&sdp);
+ RTC_LOG(LS_INFO) << sdp;
+ ;
+ // mangle the sdp as this is the only way currently to bump up
+ // the send audio rate to 48k
+ std::istringstream sdp_stream(sdp);
+ std::ostringstream sdp_mangled_stream;
+ std::string sdp_line;
+ std::string opus_payload;
+ while (std::getline(sdp_stream, sdp_line))
+ {
+ int bandwidth = 0;
+ int payload_id = 0;
+ // force mono down, stereo up
+ if (std::sscanf(sdp_line.c_str(), "a=rtpmap:%i opus/%i/2", &payload_id, &bandwidth) == 2)
+ {
+ opus_payload = std::to_string(payload_id);
+ sdp_mangled_stream << "a=rtpmap:" << opus_payload << " opus/48000/2" << "\n";
+ }
+ else if (sdp_line.find("a=fmtp:" + opus_payload) == 0)
+ {
+ sdp_mangled_stream << sdp_line << "a=fmtp:" << opus_payload
+ << " minptime=10;useinbandfec=1;stereo=1;sprop-stereo=1;maxplaybackrate=48000;sprop-maxplaybackrate=48000;sprop-maxcapturerate=48000\n";
+ }
+ else
+ {
+ sdp_mangled_stream << sdp_line << "\n";
+ }
+ }
+
+ RTC_LOG(LS_INFO) << __FUNCTION__ << " Local SDP: " << sdp_mangled_stream.str();
+ std::string mangled_sdp = sdp_mangled_stream.str();
+ for (auto &observer : mSignalingObserverList)
+ {
+ observer->OnOfferAvailable(mangled_sdp);
+ }
+
+ mPeerConnection->SetLocalDescription(std::unique_ptr<webrtc::SessionDescriptionInterface>(
+ webrtc::CreateSessionDescription(webrtc::SdpType::kOffer, mangled_sdp)),
+ rtc::scoped_refptr<webrtc::SetLocalDescriptionObserverInterface>(this));
+
+}
+
+void LLWebRTCPeerConnectionImpl::OnFailure(webrtc::RTCError error)
+{
+ RTC_LOG(LS_ERROR) << ToString(error.type()) << ": " << error.message();
+ for (auto &observer : mSignalingObserverList)
+ {
+ observer->OnRenegotiationNeeded();
+ }
+}
+
+//
+// SetRemoteDescriptionObserverInterface implementation.
+//
+void LLWebRTCPeerConnectionImpl::OnSetRemoteDescriptionComplete(webrtc::RTCError error)
+{
+ // we've received an answer SDP from the sim.
+
+ RTC_LOG(LS_INFO) << __FUNCTION__ << " " << mPeerConnection->signaling_state();
+ if (!error.ok())
+ {
+ RTC_LOG(LS_ERROR) << ToString(error.type()) << ": " << error.message();
+ for (auto &observer : mSignalingObserverList)
+ {
+ observer->OnRenegotiationNeeded();
+ }
+ return;
+ }
+ mAnswerReceived = true;
+
+ // tell the observers about any cached ICE candidates.
+ for (auto &observer : mSignalingObserverList)
+ {
+ for (auto &candidate : mCachedIceCandidates)
+ {
+ LLWebRTCIceCandidate ice_candidate;
+ ice_candidate.mCandidate = iceCandidateToTrickleString(candidate.get());
+ ice_candidate.mMLineIndex = candidate->sdp_mline_index();
+ ice_candidate.mSdpMid = candidate->sdp_mid();
+ observer->OnIceCandidate(ice_candidate);
+ }
+ }
+ mCachedIceCandidates.clear();
+ if (mPeerConnection)
+ {
+ OnIceGatheringChange(mPeerConnection->ice_gathering_state());
+ }
+
+}
+
+//
+// SetLocalDescriptionObserverInterface implementation.
+//
+void LLWebRTCPeerConnectionImpl::OnSetLocalDescriptionComplete(webrtc::RTCError error)
+{
+}
+
+//
+// DataChannelObserver implementation
+//
+
+void LLWebRTCPeerConnectionImpl::OnStateChange()
+{
+ if (!mDataChannel)
+ {
+ return;
+ }
+ RTC_LOG(LS_INFO) << __FUNCTION__ << " Data Channel State: " << webrtc::DataChannelInterface::DataStateString(mDataChannel->state());
+ switch (mDataChannel->state())
+ {
+ case webrtc::DataChannelInterface::kOpen:
+ RTC_LOG(LS_INFO) << __FUNCTION__ << " Data Channel State Open";
+ for (auto &observer : mSignalingObserverList)
+ {
+ observer->OnDataChannelReady(this);
+ }
+ break;
+ case webrtc::DataChannelInterface::kConnecting:
+ RTC_LOG(LS_INFO) << __FUNCTION__ << " Data Channel State Connecting";
+ break;
+ case webrtc::DataChannelInterface::kClosing:
+ RTC_LOG(LS_INFO) << __FUNCTION__ << " Data Channel State closing";
+ break;
+ case webrtc::DataChannelInterface::kClosed:
+ RTC_LOG(LS_INFO) << __FUNCTION__ << " Data Channel State closed";
+ break;
+ default:
+ break;
+ }
+}
+
+void LLWebRTCPeerConnectionImpl::OnMessage(const webrtc::DataBuffer& buffer)
+{
+ std::string data((const char*)buffer.data.cdata(), buffer.size());
+ for (auto &observer : mDataObserverList)
+ {
+ observer->OnDataReceived(data, buffer.binary);
+ }
+}
+
+//
+// LLWebRTCDataInterface
+//
+
+void LLWebRTCPeerConnectionImpl::sendData(const std::string& data, bool binary)
+{
+ if (mDataChannel)
+ {
+ rtc::CopyOnWriteBuffer cowBuffer(data.data(), data.length());
+ webrtc::DataBuffer buffer(cowBuffer, binary);
+ mWebRTCImpl->PostNetworkTask([this, buffer]() {
+ if (mDataChannel)
+ {
+ mDataChannel->Send(buffer);
+ }
+ });
+ }
+}
+
+void LLWebRTCPeerConnectionImpl::setDataObserver(LLWebRTCDataObserver* observer)
+{
+ mDataObserverList.emplace_back(observer);
+}
+
+void LLWebRTCPeerConnectionImpl::unsetDataObserver(LLWebRTCDataObserver* observer)
+{
+ std::vector<LLWebRTCDataObserver *>::iterator it =
+ std::find(mDataObserverList.begin(), mDataObserverList.end(), observer);
+ if (it != mDataObserverList.end())
+ {
+ mDataObserverList.erase(it);
+ }
+}
+
+LLWebRTCImpl * gWebRTCImpl = nullptr;
+LLWebRTCDeviceInterface * getDeviceInterface()
+{
+ return gWebRTCImpl;
+}
+
+LLWebRTCPeerConnectionInterface* newPeerConnection()
+{
+ return gWebRTCImpl->newPeerConnection();
+}
+
+void freePeerConnection(LLWebRTCPeerConnectionInterface* peer_connection)
+{
+ gWebRTCImpl->freePeerConnection(peer_connection);
+}
+
+
+void init()
+{
+ gWebRTCImpl = new LLWebRTCImpl();
+ gWebRTCImpl->init();
+}
+
+void terminate()
+{
+ if (gWebRTCImpl)
+ {
+ gWebRTCImpl->terminate();
+ gWebRTCImpl = nullptr;
+ }
+}
+
+} // namespace llwebrtc
diff --git a/indra/llwebrtc/llwebrtc.h b/indra/llwebrtc/llwebrtc.h
new file mode 100644
index 0000000000..f447ea990a
--- /dev/null
+++ b/indra/llwebrtc/llwebrtc.h
@@ -0,0 +1,278 @@
+/**
+ * @file llwebrtc.h
+ * @brief WebRTC interface
+ *
+ * $LicenseInfo:firstyear=2023&license=viewerlgpl$
+ * Second Life Viewer Source Code
+ * Copyright (C) 2023, Linden Research, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License only.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free tSoftware
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
+ * $/LicenseInfo$
+ */
+
+/*
+ * llwebrtc wraps the native webrtc c++ library in a dynamic library with a simlified interface
+ * so that the viewer can use it. This is done because native webrtc has a different
+ * overall threading model than the viewer.
+ * The native webrtc library is also compiled with clang, and has memory management
+ * functions that conflict namespace-wise with those in the viewer.
+ *
+ * Due to these differences, code from the viewer cannot be pulled in to this
+ * dynamic library, so it remains very simple.
+ */
+
+#ifndef LLWEBRTC_H
+#define LLWEBRTC_H
+
+#include <string>
+#include <vector>
+
+#ifdef LL_MAKEDLL
+#ifdef WEBRTC_WIN
+#define LLSYMEXPORT __declspec(dllexport)
+#elif WEBRTC_LINUX
+#define LLSYMEXPORT __attribute__((visibility("default")))
+#else
+#define LLSYMEXPORT /**/
+#endif
+#else
+#define LLSYMEXPORT /**/
+#endif // LL_MAKEDLL
+
+namespace llwebrtc
+{
+
+// LLWebRTCVoiceDevice is a simple representation of the
+// components of a device, used to communicate this
+// information to the viewer.
+
+
+// A note on threading.
+// Native WebRTC has it's own threading model. Some discussion
+// can be found here (https://webrtc.github.io/webrtc-org/native-code/native-apis/)
+//
+// Note that all callbacks to observers will occurr on one of the WebRTC native threads
+// (signaling, worker, etc.) Care should be taken to assure there are not
+// bad interactions with the viewer threads.
+
+class LLWebRTCVoiceDevice
+{
+ public:
+ std::string mDisplayName; // friendly name for user interface purposes
+ std::string mID; // internal value for selection
+
+ LLWebRTCVoiceDevice(const std::string &display_name, const std::string &id) :
+ mDisplayName(display_name),
+ mID(id)
+ {
+ if (mID.empty())
+ {
+ mID = display_name;
+ }
+ };
+};
+
+typedef std::vector<LLWebRTCVoiceDevice> LLWebRTCVoiceDeviceList;
+
+
+// The LLWebRTCDeviceObserver should be implemented by the viewer
+// webrtc module, which will receive notifications when devices
+// change (are unplugged, etc.)
+class LLWebRTCDevicesObserver
+{
+ public:
+ virtual void OnDevicesChanged(const LLWebRTCVoiceDeviceList &render_devices,
+ const LLWebRTCVoiceDeviceList &capture_devices) = 0;
+};
+
+
+// The LLWebRTCDeviceInterface provides a way for the viewer
+// to enumerate, set, and get notifications of changes
+// for both capture (microphone) and render (speaker)
+// devices.
+
+class LLWebRTCDeviceInterface
+{
+ public:
+ struct AudioConfig {
+
+ bool mAGC { true };
+
+ bool mEchoCancellation { true };
+
+ // TODO: The various levels of noise suppression are configured
+ // on the APM which would require setting config on the APM.
+ // We should pipe the various values through
+ // later.
+ typedef enum {
+ NOISE_SUPPRESSION_LEVEL_NONE = 0,
+ NOISE_SUPPRESSION_LEVEL_LOW,
+ NOISE_SUPPRESSION_LEVEL_MODERATE,
+ NOISE_SUPPRESSION_LEVEL_HIGH,
+ NOISE_SUPPRESSION_LEVEL_VERY_HIGH
+ } ENoiseSuppressionLevel;
+ ENoiseSuppressionLevel mNoiseSuppressionLevel { NOISE_SUPPRESSION_LEVEL_VERY_HIGH };
+ };
+
+ virtual void setAudioConfig(AudioConfig config) = 0;
+
+ // instructs webrtc to refresh the device list.
+ virtual void refreshDevices() = 0;
+
+ // set the capture and render devices using the unique identifier for the device
+ virtual void setCaptureDevice(const std::string& id) = 0;
+ virtual void setRenderDevice(const std::string& id) = 0;
+
+ // Device observers for device change callbacks.
+ virtual void setDevicesObserver(LLWebRTCDevicesObserver *observer) = 0;
+ virtual void unsetDevicesObserver(LLWebRTCDevicesObserver *observer) = 0;
+
+ // tuning and audio levels
+ virtual void setTuningMode(bool enable) = 0;
+ virtual float getTuningAudioLevel() = 0; // for use during tuning
+ virtual float getPeerConnectionAudioLevel() = 0; // for use when not tuning
+ virtual void setPeerConnectionGain(float gain) = 0;
+};
+
+// LLWebRTCAudioInterface provides the viewer with a way
+// to set audio characteristics (mute, send and receive volume)
+class LLWebRTCAudioInterface
+{
+ public:
+ virtual void setMute(bool mute) = 0;
+ virtual void setReceiveVolume(float volume) = 0; // volume between 0.0 and 1.0
+ virtual void setSendVolume(float volume) = 0; // volume between 0.0 and 1.0
+};
+
+// LLWebRTCDataObserver allows the viewer voice module to be notified when
+// data is received over the data channel.
+class LLWebRTCDataObserver
+{
+public:
+ virtual void OnDataReceived(const std::string& data, bool binary) = 0;
+};
+
+// LLWebRTCDataInterface allows the viewer to send data over the data channel.
+class LLWebRTCDataInterface
+{
+public:
+
+ virtual void sendData(const std::string& data, bool binary=false) = 0;
+
+ virtual void setDataObserver(LLWebRTCDataObserver *observer) = 0;
+ virtual void unsetDataObserver(LLWebRTCDataObserver *observer) = 0;
+};
+
+// LLWebRTCIceCandidate is a basic structure containing
+// information needed for ICE trickling.
+struct LLWebRTCIceCandidate
+{
+ std::string mCandidate;
+ std::string mSdpMid;
+ int mMLineIndex;
+};
+
+// LLWebRTCSignalingObserver provides a way for the native
+// webrtc library to notify the viewer voice module of
+// various state changes.
+class LLWebRTCSignalingObserver
+{
+ public:
+
+ typedef enum e_ice_gathering_state {
+ ICE_GATHERING_NEW,
+ ICE_GATHERING_GATHERING,
+ ICE_GATHERING_COMPLETE
+ } EIceGatheringState;
+
+ // Called when ICE gathering states have changed.
+ // This may be called at any time, as ICE gathering
+ // can be redone while a connection is up.
+ virtual void OnIceGatheringState(EIceGatheringState state) = 0;
+
+ // Called when a new ice candidate is available.
+ virtual void OnIceCandidate(const LLWebRTCIceCandidate& candidate) = 0;
+
+ // Called when an offer is available after a connection is requested.
+ virtual void OnOfferAvailable(const std::string& sdp) = 0;
+
+ // Called when a connection enters a failure state and renegotiation is needed.
+ virtual void OnRenegotiationNeeded() = 0;
+
+ // Called when a peer connection has shut down
+ virtual void OnPeerConnectionClosed() = 0;
+
+ // Called when the audio channel has been established and audio
+ // can begin.
+ virtual void OnAudioEstablished(LLWebRTCAudioInterface *audio_interface) = 0;
+
+ // Called when the data channel has been established and data
+ // transfer can begin.
+ virtual void OnDataChannelReady(LLWebRTCDataInterface *data_interface) = 0;
+};
+
+// LLWebRTCPeerConnectionInterface representsd a connection to a peer,
+// in most cases a Secondlife WebRTC server. This interface
+// allows for management of this peer connection.
+class LLWebRTCPeerConnectionInterface
+{
+ public:
+
+ struct InitOptions
+ {
+ // equivalent of PeerConnectionInterface::IceServer
+ struct IceServers {
+
+ // Valid formats are described in RFC7064 and RFC7065.
+ // Urls should containe dns hostnames (not IP addresses)
+ // as the TLS certificate policy is 'secure.'
+ // and we do not currentply support TLS extensions.
+ std::vector<std::string> mUrls;
+ std::string mUserName;
+ std::string mPassword;
+ };
+
+ std::vector<IceServers> mServers;
+ };
+
+ virtual bool initializeConnection(const InitOptions& options) = 0;
+ virtual bool shutdownConnection() = 0;
+
+ virtual void setSignalingObserver(LLWebRTCSignalingObserver* observer) = 0;
+ virtual void unsetSignalingObserver(LLWebRTCSignalingObserver* observer) = 0;
+
+ virtual void AnswerAvailable(const std::string &sdp) = 0;
+};
+
+// The following define the dynamic linked library
+// exports.
+
+// This library must be initialized before use.
+LLSYMEXPORT void init();
+
+// And should be terminated as part of shutdown.
+LLSYMEXPORT void terminate();
+
+// Return an interface for device management.
+LLSYMEXPORT LLWebRTCDeviceInterface* getDeviceInterface();
+
+// Allocate and free peer connections.
+LLSYMEXPORT LLWebRTCPeerConnectionInterface* newPeerConnection();
+LLSYMEXPORT void freePeerConnection(LLWebRTCPeerConnectionInterface *connection);
+}
+
+#endif // LLWEBRTC_H
diff --git a/indra/llwebrtc/llwebrtc_impl.h b/indra/llwebrtc/llwebrtc_impl.h
new file mode 100644
index 0000000000..6672f8ce90
--- /dev/null
+++ b/indra/llwebrtc/llwebrtc_impl.h
@@ -0,0 +1,382 @@
+/**
+ * @file llwebrtc_impl.h
+ * @brief WebRTC dynamic library implementation header
+ *
+ * $LicenseInfo:firstyear=2023&license=viewerlgpl$
+ * Second Life Viewer Source Code
+ * Copyright (C) 2023, Linden Research, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License only.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
+ * $/LicenseInfo$
+ */
+
+#ifndef LLWEBRTC_IMPL_H
+#define LLWEBRTC_IMPL_H
+
+#define LL_MAKEDLL
+#if defined(_WIN32) || defined(_WIN64)
+#define WEBRTC_WIN 1
+#elif defined(__APPLE__)
+#define WEBRTC_MAC 1
+#define WEBRTC_POSIX 1
+#elif __linux__
+#define WEBRTC_LINUX 1
+#define WEBRTC_POSIX 1
+#endif
+
+#include "llwebrtc.h"
+// WebRTC Includes
+#ifdef WEBRTC_WIN
+#pragma warning(disable : 4996) // ignore 'deprecated.' We don't use the functions marked
+ // deprecated in the webrtc headers, but msvc complains anyway.
+ // Clang doesn't, and that's generally what webrtc uses.
+#pragma warning(disable : 4068) // ignore 'invalid pragma.' There are clang pragma's in
+ // the webrtc headers, which msvc doesn't recognize.
+#endif // WEBRTC_WIN
+
+#include "api/scoped_refptr.h"
+#include "rtc_base/ref_count.h"
+#include "rtc_base/ref_counted_object.h"
+#include "rtc_base/ssl_adapter.h"
+#include "rtc_base/thread.h"
+#include "api/peer_connection_interface.h"
+#include "api/media_stream_interface.h"
+#include "api/create_peerconnection_factory.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_device/include/audio_device_data_observer.h"
+#include "rtc_base/task_queue.h"
+#include "api/task_queue/task_queue_factory.h"
+#include "api/task_queue/default_task_queue_factory.h"
+#include "modules/audio_device/include/audio_device_defines.h"
+
+
+namespace llwebrtc
+{
+
+class LLWebRTCPeerConnectionImpl;
+
+
+// Implements a class allowing capture of audio data
+// to determine audio level of the microphone.
+class LLAudioDeviceObserver : public webrtc::AudioDeviceDataObserver
+{
+ public:
+ LLAudioDeviceObserver();
+
+ // Retrieve the RMS audio loudness
+ float getMicrophoneEnergy();
+
+ // Data retrieved from the caputure device is
+ // passed in here for processing.
+ void OnCaptureData(const void *audio_samples,
+ const size_t num_samples,
+ const size_t bytes_per_sample,
+ const size_t num_channels,
+ const uint32_t samples_per_sec) override;
+
+ // This is for data destined for the render device.
+ // not currently used.
+ void OnRenderData(const void *audio_samples,
+ const size_t num_samples,
+ const size_t bytes_per_sample,
+ const size_t num_channels,
+ const uint32_t samples_per_sec) override;
+
+ protected:
+ static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing (30 frames)
+ float mSumVector[NUM_PACKETS_TO_FILTER];
+ float mMicrophoneEnergy;
+};
+
+// Used to process/retrieve audio levels after
+// all of the processing (AGC, AEC, etc.) for display in-world to the user.
+class LLCustomProcessor : public webrtc::CustomProcessing
+{
+ public:
+ LLCustomProcessor();
+ ~LLCustomProcessor() override {}
+
+ // (Re-) Initializes the submodule.
+ void Initialize(int sample_rate_hz, int num_channels) override;
+
+ // Analyzes the given capture or render signal.
+ void Process(webrtc::AudioBuffer *audio) override;
+
+ // Returns a string representation of the module state.
+ std::string ToString() const override { return ""; }
+
+ float getMicrophoneEnergy() { return mMicrophoneEnergy; }
+
+ void setGain(float gain) { mGain = gain; }
+
+ protected:
+ static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing
+ int mSampleRateHz;
+ int mNumChannels;
+
+ float mSumVector[NUM_PACKETS_TO_FILTER];
+ float mMicrophoneEnergy;
+ float mGain;
+};
+
+
+// Primary singleton implementation for interfacing
+// with the native webrtc library.
+class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceSink
+{
+ public:
+ LLWebRTCImpl();
+ ~LLWebRTCImpl() {}
+
+ void init();
+ void terminate();
+
+ //
+ // LLWebRTCDeviceInterface
+ //
+
+ void setAudioConfig(LLWebRTCDeviceInterface::AudioConfig config = LLWebRTCDeviceInterface::AudioConfig()) override;
+
+ void refreshDevices() override;
+
+ void setDevicesObserver(LLWebRTCDevicesObserver *observer) override;
+ void unsetDevicesObserver(LLWebRTCDevicesObserver *observer) override;
+
+ void setCaptureDevice(const std::string& id) override;
+ void setRenderDevice(const std::string& id) override;
+
+ void setTuningMode(bool enable) override;
+ float getTuningAudioLevel() override;
+ float getPeerConnectionAudioLevel() override;
+
+ void setPeerConnectionGain(float gain) override;
+
+ //
+ // AudioDeviceSink
+ //
+ void OnDevicesUpdated() override;
+
+ //
+ // Helpers
+ //
+
+ // The following thread helpers allow the
+ // LLWebRTCPeerConnectionImpl class to post
+ // tasks to the native webrtc threads.
+ void PostWorkerTask(absl::AnyInvocable<void() &&> task,
+ const webrtc::Location& location = webrtc::Location::Current())
+ {
+ mWorkerThread->PostTask(std::move(task), location);
+ }
+
+ void PostSignalingTask(absl::AnyInvocable<void() &&> task,
+ const webrtc::Location& location = webrtc::Location::Current())
+ {
+ mSignalingThread->PostTask(std::move(task), location);
+ }
+
+ void PostNetworkTask(absl::AnyInvocable<void() &&> task,
+ const webrtc::Location& location = webrtc::Location::Current())
+ {
+ mNetworkThread->PostTask(std::move(task), location);
+ }
+
+ void WorkerBlockingCall(rtc::FunctionView<void()> functor,
+ const webrtc::Location& location = webrtc::Location::Current())
+ {
+ mWorkerThread->BlockingCall(std::move(functor), location);
+ }
+
+ void SignalingBlockingCall(rtc::FunctionView<void()> functor,
+ const webrtc::Location& location = webrtc::Location::Current())
+ {
+ mSignalingThread->BlockingCall(std::move(functor), location);
+ }
+
+ void NetworkBlockingCall(rtc::FunctionView<void()> functor,
+ const webrtc::Location& location = webrtc::Location::Current())
+ {
+ mNetworkThread->BlockingCall(std::move(functor), location);
+ }
+
+ // Allows the LLWebRTCPeerConnectionImpl class to retrieve the
+ // native webrtc PeerConnectionFactory.
+ rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> getPeerConnectionFactory()
+ {
+ return mPeerConnectionFactory;
+ }
+
+ // create or destroy a peer connection.
+ LLWebRTCPeerConnectionInterface* newPeerConnection();
+ void freePeerConnection(LLWebRTCPeerConnectionInterface* peer_connection);
+
+ // enables/disables capture via the capture device
+ void setRecording(bool recording);
+
+ protected:
+ // The native webrtc threads
+ std::unique_ptr<rtc::Thread> mNetworkThread;
+ std::unique_ptr<rtc::Thread> mWorkerThread;
+ std::unique_ptr<rtc::Thread> mSignalingThread;
+
+ // The factory that allows creation of native webrtc PeerConnections.
+ rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> mPeerConnectionFactory;
+
+ rtc::scoped_refptr<webrtc::AudioProcessing> mAudioProcessingModule;
+
+ // more native webrtc stuff
+ std::unique_ptr<webrtc::TaskQueueFactory> mTaskQueueFactory;
+
+
+ // Devices
+ void updateDevices();
+ rtc::scoped_refptr<webrtc::AudioDeviceModule> mTuningDeviceModule;
+ rtc::scoped_refptr<webrtc::AudioDeviceModule> mPeerDeviceModule;
+ std::vector<LLWebRTCDevicesObserver *> mVoiceDevicesObserverList;
+
+ // accessors in native webrtc for devices aren't apparently implemented yet.
+ bool mTuningMode;
+ int32_t mRecordingDevice;
+ LLWebRTCVoiceDeviceList mRecordingDeviceList;
+
+ int32_t mPlayoutDevice;
+ LLWebRTCVoiceDeviceList mPlayoutDeviceList;
+
+ bool mMute;
+
+ LLAudioDeviceObserver * mTuningAudioDeviceObserver;
+ LLCustomProcessor * mPeerCustomProcessor;
+
+ // peer connections
+ std::vector<rtc::scoped_refptr<LLWebRTCPeerConnectionImpl>> mPeerConnections;
+};
+
+
+// The implementation of a peer connection, which contains
+// the various interfaces used by the viewer to interact with
+// the webrtc connection.
+class LLWebRTCPeerConnectionImpl : public LLWebRTCPeerConnectionInterface,
+ public LLWebRTCAudioInterface,
+ public LLWebRTCDataInterface,
+ public webrtc::PeerConnectionObserver,
+ public webrtc::CreateSessionDescriptionObserver,
+ public webrtc::SetRemoteDescriptionObserverInterface,
+ public webrtc::SetLocalDescriptionObserverInterface,
+ public webrtc::DataChannelObserver
+
+{
+ public:
+ LLWebRTCPeerConnectionImpl();
+ ~LLWebRTCPeerConnectionImpl();
+
+ void init(LLWebRTCImpl * webrtc_impl);
+ void terminate();
+
+ virtual void AddRef() const override = 0;
+ virtual rtc::RefCountReleaseStatus Release() const override = 0;
+
+ //
+ // LLWebRTCPeerConnection
+ //
+ bool initializeConnection(const InitOptions& options) override;
+ bool shutdownConnection() override;
+
+ void setSignalingObserver(LLWebRTCSignalingObserver *observer) override;
+ void unsetSignalingObserver(LLWebRTCSignalingObserver *observer) override;
+ void AnswerAvailable(const std::string &sdp) override;
+
+ //
+ // LLWebRTCAudioInterface
+ //
+ void setMute(bool mute) override;
+ void setReceiveVolume(float volume) override; // volume between 0.0 and 1.0
+ void setSendVolume(float volume) override; // volume between 0.0 and 1.0
+
+ //
+ // LLWebRTCDataInterface
+ //
+ void sendData(const std::string& data, bool binary=false) override;
+ void setDataObserver(LLWebRTCDataObserver *observer) override;
+ void unsetDataObserver(LLWebRTCDataObserver *observer) override;
+
+ //
+ // PeerConnectionObserver implementation.
+ //
+
+ void OnSignalingChange(webrtc::PeerConnectionInterface::SignalingState new_state) override {}
+ void OnAddTrack(rtc::scoped_refptr<webrtc::RtpReceiverInterface> receiver,
+ const std::vector<rtc::scoped_refptr<webrtc::MediaStreamInterface>> &streams) override;
+ void OnRemoveTrack(rtc::scoped_refptr<webrtc::RtpReceiverInterface> receiver) override;
+ void OnDataChannel(rtc::scoped_refptr<webrtc::DataChannelInterface> channel) override;
+ void OnRenegotiationNeeded() override {}
+ void OnIceConnectionChange(webrtc::PeerConnectionInterface::IceConnectionState new_state) override {};
+ void OnIceGatheringChange(webrtc::PeerConnectionInterface::IceGatheringState new_state) override;
+ void OnIceCandidate(const webrtc::IceCandidateInterface *candidate) override;
+ void OnIceConnectionReceivingChange(bool receiving) override {}
+ void OnConnectionChange(webrtc::PeerConnectionInterface::PeerConnectionState new_state) override;
+
+ //
+ // CreateSessionDescriptionObserver implementation.
+ //
+ void OnSuccess(webrtc::SessionDescriptionInterface *desc) override;
+ void OnFailure(webrtc::RTCError error) override;
+
+ //
+ // SetRemoteDescriptionObserverInterface implementation.
+ //
+ void OnSetRemoteDescriptionComplete(webrtc::RTCError error) override;
+
+ //
+ // SetLocalDescriptionObserverInterface implementation.
+ //
+ void OnSetLocalDescriptionComplete(webrtc::RTCError error) override;
+
+ //
+ // DataChannelObserver implementation.
+ //
+ void OnStateChange() override;
+ void OnMessage(const webrtc::DataBuffer& buffer) override;
+
+ // Helpers
+ void resetMute();
+ void enableSenderTracks(bool enable);
+ void enableReceiverTracks(bool enable);
+
+ protected:
+
+ LLWebRTCImpl * mWebRTCImpl;
+
+ rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> mPeerConnectionFactory;
+
+ bool mMute;
+
+ // signaling
+ std::vector<LLWebRTCSignalingObserver *> mSignalingObserverList;
+ std::vector<std::unique_ptr<webrtc::IceCandidateInterface>> mCachedIceCandidates;
+ bool mAnswerReceived;
+
+ rtc::scoped_refptr<webrtc::PeerConnectionInterface> mPeerConnection;
+ rtc::scoped_refptr<webrtc::MediaStreamInterface> mLocalStream;
+
+ // data
+ std::vector<LLWebRTCDataObserver *> mDataObserverList;
+ rtc::scoped_refptr<webrtc::DataChannelInterface> mDataChannel;
+};
+
+}
+
+#endif // LLWEBRTC_IMPL_H