diff options
Diffstat (limited to 'indra/llwebrtc')
| -rw-r--r-- | indra/llwebrtc/CMakeLists.txt | 8 | ||||
| -rw-r--r-- | indra/llwebrtc/llwebrtc.cpp | 742 | ||||
| -rw-r--r-- | indra/llwebrtc/llwebrtc.h | 5 | ||||
| -rw-r--r-- | indra/llwebrtc/llwebrtc_impl.h | 414 | 
4 files changed, 728 insertions, 441 deletions
diff --git a/indra/llwebrtc/CMakeLists.txt b/indra/llwebrtc/CMakeLists.txt index a64a3e4dac..a01d9fc632 100644 --- a/indra/llwebrtc/CMakeLists.txt +++ b/indra/llwebrtc/CMakeLists.txt @@ -10,10 +10,6 @@ include(WebRTC)  project(llwebrtc) -if (LINUX) -    add_compile_options(-Wno-deprecated-declarations) # webrtc::CreateAudioDeviceWithDataObserver is deprecated -endif (LINUX) -  set(llwebrtc_SOURCE_FILES      llwebrtc.cpp      ) @@ -46,7 +42,7 @@ if (WINDOWS)                                         iphlpapi                                         libcmt)      # as the webrtc libraries are release, build this binary as release as well. -    target_compile_options(llwebrtc PRIVATE "/MT") +    target_compile_options(llwebrtc PRIVATE "/MT" "/Zc:wchar_t")      if (USE_BUGSPLAT)          set_target_properties(llwebrtc PROPERTIES PDB_OUTPUT_DIRECTORY "${SYMBOLS_STAGING_DIR}")      endif (USE_BUGSPLAT) @@ -65,6 +61,8 @@ target_include_directories( llwebrtc INTERFACE ${CMAKE_CURRENT_SOURCE_DIR})  if (WINDOWS)      set_property(TARGET llwebrtc PROPERTY          MSVC_RUNTIME_LIBRARY "MultiThreadedDebug") +else() +    target_compile_options(llwebrtc PRIVATE -Wno-deprecated-declarations) # webrtc::CreateAudioDeviceWithDataObserver is deprecated  endif (WINDOWS)  ADD_CUSTOM_COMMAND(TARGET llwebrtc POST_BUILD diff --git a/indra/llwebrtc/llwebrtc.cpp b/indra/llwebrtc/llwebrtc.cpp index 12d02bbcc1..edba2bee9a 100644 --- a/indra/llwebrtc/llwebrtc.cpp +++ b/indra/llwebrtc/llwebrtc.cpp @@ -9,7 +9,7 @@   * This library is free software; you can redistribute it and/or   * modify it under the terms of the GNU Lesser General Public   * License as published by the Free Software Foundation; - * version 2.1 of the License only. + * version 2.1 of the License only   *   * This library is distributed in the hope that it will be useful,   * but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -32,41 +32,79 @@  #include "api/audio_codecs/audio_encoder_factory.h"  #include "api/audio_codecs/builtin_audio_decoder_factory.h"  #include "api/audio_codecs/builtin_audio_encoder_factory.h" +#include "api/audio/builtin_audio_processing_builder.h"  #include "api/media_stream_interface.h"  #include "api/media_stream_track.h"  #include "modules/audio_processing/audio_buffer.h"  #include "modules/audio_mixer/audio_mixer_impl.h" +#include "api/environment/environment_factory.h"  namespace llwebrtc  { +#if WEBRTC_WIN +static int16_t PLAYOUT_DEVICE_DEFAULT = webrtc::AudioDeviceModule::kDefaultDevice; +static int16_t RECORD_DEVICE_DEFAULT  = webrtc::AudioDeviceModule::kDefaultDevice; +#else +static int16_t PLAYOUT_DEVICE_DEFAULT = 0; +static int16_t RECORD_DEVICE_DEFAULT  = 0; +#endif -static int16_t PLAYOUT_DEVICE_DEFAULT = -1; -static int16_t PLAYOUT_DEVICE_BAD     = -2; -static int16_t RECORD_DEVICE_DEFAULT  = -1; -static int16_t RECORD_DEVICE_BAD      = -2; -LLAudioDeviceObserver::LLAudioDeviceObserver() : mSumVector {0}, mMicrophoneEnergy(0.0) {} +// +// LLWebRTCAudioTransport implementation +// -float LLAudioDeviceObserver::getMicrophoneEnergy() { return mMicrophoneEnergy; } +LLWebRTCAudioTransport::LLWebRTCAudioTransport() : mMicrophoneEnergy(0.0) +{ +    memset(mSumVector, 0, sizeof(mSumVector)); +} -// TODO: Pull smoothing/filtering code into a common helper function -// for LLAudioDeviceObserver and LLCustomProcessor +void LLWebRTCAudioTransport::SetEngineTransport(webrtc::AudioTransport* t) +{ +    engine_.store(t, std::memory_order_release); +} -void LLAudioDeviceObserver::OnCaptureData(const void    *audio_samples, -                                          const size_t   num_samples, -                                          const size_t   bytes_per_sample, -                                          const size_t   num_channels, -                                          const uint32_t samples_per_sec) +int32_t LLWebRTCAudioTransport::RecordedDataIsAvailable(const void* audio_data, +                                                        size_t      number_of_frames, +                                                        size_t      bytes_per_frame, +                                                        size_t      number_of_channels, +                                                        uint32_t    samples_per_sec, +                                                        uint32_t    total_delay_ms, +                                                        int32_t     clock_drift, +                                                        uint32_t    current_mic_level, +                                                        bool        key_pressed, +                                                        uint32_t&   new_mic_level)  { +    auto* engine = engine_.load(std::memory_order_acquire); + +    // 1) Deliver to engine (authoritative). +    int32_t ret = 0; +    if (engine) +    { +        ret = engine->RecordedDataIsAvailable(audio_data, +                                              number_of_frames, +                                              bytes_per_frame, +                                              number_of_channels, +                                              samples_per_sec, +                                              total_delay_ms, +                                              clock_drift, +                                              current_mic_level, +                                              key_pressed, +                                              new_mic_level); +    } + +    // 2) Calculate energy for microphone level monitoring      // calculate the energy      float        energy  = 0; -    const short *samples = (const short *) audio_samples; -    for (size_t index = 0; index < num_samples * num_channels; index++) +    const short *samples = (const short *) audio_data; + +    for (size_t index = 0; index < number_of_frames * number_of_channels; index++)      {          float sample = (static_cast<float>(samples[index]) / (float) 32767);          energy += sample * sample;      } - +    float gain = mGain.load(std::memory_order_relaxed); +    energy     = energy * gain * gain;      // smooth it.      size_t buffer_size = sizeof(mSumVector) / sizeof(mSumVector[0]);      float  totalSum    = 0; @@ -78,18 +116,59 @@ void LLAudioDeviceObserver::OnCaptureData(const void    *audio_samples,      }      mSumVector[i] = energy;      totalSum += energy; -    mMicrophoneEnergy = std::sqrt(totalSum / (num_samples * buffer_size)); +    mMicrophoneEnergy = std::sqrt(totalSum / (number_of_frames * number_of_channels * buffer_size)); + +    return ret;  } -void LLAudioDeviceObserver::OnRenderData(const void    *audio_samples, -                                         const size_t   num_samples, -                                         const size_t   bytes_per_sample, -                                         const size_t   num_channels, -                                         const uint32_t samples_per_sec) +int32_t LLWebRTCAudioTransport::NeedMorePlayData(size_t   number_of_frames, +                                                 size_t   bytes_per_frame, +                                                 size_t   number_of_channels, +                                                 uint32_t samples_per_sec, +                                                 void*    audio_data, +                                                 size_t&  number_of_samples_out, +                                                 int64_t* elapsed_time_ms, +                                                 int64_t* ntp_time_ms)  { +    auto* engine = engine_.load(std::memory_order_acquire); +    if (!engine) +    { +        // No engine sink; output silence to be safe. +        const size_t bytes = number_of_frames * bytes_per_frame * number_of_channels; +        memset(audio_data, 0, bytes); +        number_of_samples_out = bytes_per_frame; +        return 0; +    } + +    // Only the engine should fill the buffer. +    return engine->NeedMorePlayData(number_of_frames, +                                    bytes_per_frame, +                                    number_of_channels, +                                    samples_per_sec, +                                    audio_data, +                                    number_of_samples_out, +                                    elapsed_time_ms, +                                    ntp_time_ms);  } -LLCustomProcessor::LLCustomProcessor() : mSampleRateHz(0), mNumChannels(0), mMicrophoneEnergy(0.0), mGain(1.0) +void LLWebRTCAudioTransport::PullRenderData(int      bits_per_sample, +                                            int      sample_rate, +                                            size_t   number_of_channels, +                                            size_t   number_of_frames, +                                            void*    audio_data, +                                            int64_t* elapsed_time_ms, +                                            int64_t* ntp_time_ms) +{ +    auto* engine = engine_.load(std::memory_order_acquire); + +    if (engine) +    { +        engine +            ->PullRenderData(bits_per_sample, sample_rate, number_of_channels, number_of_frames, audio_data, elapsed_time_ms, ntp_time_ms); +    } +} + +LLCustomProcessor::LLCustomProcessor(LLCustomProcessorStatePtr state) : mSampleRateHz(0), mNumChannels(0), mState(state)  {      memset(mSumVector, 0, sizeof(mSumVector));  } @@ -101,40 +180,61 @@ void LLCustomProcessor::Initialize(int sample_rate_hz, int num_channels)      memset(mSumVector, 0, sizeof(mSumVector));  } -void LLCustomProcessor::Process(webrtc::AudioBuffer *audio_in) +void LLCustomProcessor::Process(webrtc::AudioBuffer *audio)  { -    webrtc::StreamConfig stream_config; -    stream_config.set_sample_rate_hz(mSampleRateHz); -    stream_config.set_num_channels(mNumChannels); -    std::vector<float *> frame; -    std::vector<float>   frame_samples; - -    if (audio_in->num_channels() < 1 || audio_in->num_frames() < 480) +    if (audio->num_channels() < 1 || audio->num_frames() < 480)      {          return;      } -    // grab the input audio -    frame_samples.resize(stream_config.num_samples()); -    frame.resize(stream_config.num_channels()); -    for (size_t ch = 0; ch < stream_config.num_channels(); ++ch) +    // calculate the energy + +    float desired_gain = mState->getGain(); +    if (mState->getDirty())      { -        frame[ch] = &(frame_samples)[ch * stream_config.num_frames()]; +        // We'll delay ramping by 30ms in order to clear out buffers that may +        // have had content before muting.  And for the last 20ms, we'll ramp +        // down or up smoothly. +        mRampFrames = 5; + +        // we've changed our desired gain, so set the incremental +        // gain change so that we smoothly step over 20ms +        mGainStep = (desired_gain - mCurrentGain) / (mSampleRateHz / 50);      } -    audio_in->CopyTo(stream_config, &frame[0]); - -    // calculate the energy -    float energy = 0; -    for (size_t index = 0; index < stream_config.num_samples(); index++) +    if (mRampFrames)      { -        float sample = frame_samples[index]; -        sample       = sample * mGain; // apply gain -        frame_samples[index] = sample; // write processed sample back to buffer. -        energy += sample * sample; +        if (mRampFrames-- > 2) +        { +            // don't change the gain if we're still in the 'don't move' phase +            mGainStep = 0.0f; +        } +    } +    else +    { +        // We've ramped all the way down, so don't step the gain any more and +        // just maintaint he current gain. +        mGainStep = 0.0f; +        mCurrentGain = desired_gain;      } -    audio_in->CopyFrom(&frame[0], stream_config); +    float energy       = 0; + +    auto chans = audio->channels(); +    for (size_t ch = 0; ch < audio->num_channels(); ch++) +    { +        float* frame_samples = chans[ch]; +        float  gain          = mCurrentGain; +        for (size_t index = 0; index < audio->num_frames(); index++) +        { +            float sample         = frame_samples[index]; +            sample               = sample * gain;    // apply gain +            frame_samples[index] = sample;        // write processed sample back to buffer. +            energy += sample * sample; +            gain += mGainStep; +        } +    } +    mCurrentGain += audio->num_frames() * mGainStep;      // smooth it.      size_t buffer_size = sizeof(mSumVector) / sizeof(mSumVector[0]); @@ -147,7 +247,7 @@ void LLCustomProcessor::Process(webrtc::AudioBuffer *audio_in)      }      mSumVector[i] = energy;      totalSum += energy; -    mMicrophoneEnergy = std::sqrt(totalSum / (stream_config.num_samples() * buffer_size)); +    mState->setMicrophoneEnergy(std::sqrt(totalSum / (audio->num_channels() * audio->num_frames() * buffer_size)));  }  // @@ -159,91 +259,54 @@ LLWebRTCImpl::LLWebRTCImpl(LLWebRTCLogCallback* logCallback) :      mPeerCustomProcessor(nullptr),      mMute(true),      mTuningMode(false), -    mPlayoutDevice(0), -    mRecordingDevice(0), -    mTuningAudioDeviceObserver(nullptr) +    mDevicesDeploying(0), +    mGain(0.0f)  {  }  void LLWebRTCImpl::init()  { -    mPlayoutDevice   = 0; -    mRecordingDevice = 0; -    rtc::InitializeSSL(); +    webrtc::InitializeSSL();      // Normal logging is rather spammy, so turn it off. -    rtc::LogMessage::LogToDebug(rtc::LS_NONE); -    rtc::LogMessage::SetLogToStderr(true); -    rtc::LogMessage::AddLogToStream(mLogSink, rtc::LS_VERBOSE); +    webrtc::LogMessage::LogToDebug(webrtc::LS_NONE); +    webrtc::LogMessage::SetLogToStderr(true); +    webrtc::LogMessage::AddLogToStream(mLogSink, webrtc::LS_VERBOSE);      mTaskQueueFactory = webrtc::CreateDefaultTaskQueueFactory();      // Create the native threads. -    mNetworkThread = rtc::Thread::CreateWithSocketServer(); +    mNetworkThread = webrtc::Thread::CreateWithSocketServer();      mNetworkThread->SetName("WebRTCNetworkThread", nullptr);      mNetworkThread->Start(); -    mWorkerThread = rtc::Thread::Create(); +    mWorkerThread = webrtc::Thread::Create();      mWorkerThread->SetName("WebRTCWorkerThread", nullptr);      mWorkerThread->Start(); -    mSignalingThread = rtc::Thread::Create(); +    mSignalingThread = webrtc::Thread::Create();      mSignalingThread->SetName("WebRTCSignalingThread", nullptr);      mSignalingThread->Start(); -    mTuningAudioDeviceObserver = new LLAudioDeviceObserver; -    mWorkerThread->PostTask( -        [this]() -        { -            // Initialize the audio devices on the Worker Thread -            mTuningDeviceModule = -                webrtc::CreateAudioDeviceWithDataObserver(webrtc::AudioDeviceModule::AudioLayer::kPlatformDefaultAudio, -                                                          mTaskQueueFactory.get(), -                                                          std::unique_ptr<webrtc::AudioDeviceDataObserver>(mTuningAudioDeviceObserver)); - -            mTuningDeviceModule->Init(); -            mTuningDeviceModule->SetPlayoutDevice(mPlayoutDevice); -            mTuningDeviceModule->SetRecordingDevice(mRecordingDevice); -            mTuningDeviceModule->EnableBuiltInAEC(false); -#if !CM_WEBRTC -            mTuningDeviceModule->SetAudioDeviceSink(this); -#endif -            mTuningDeviceModule->InitMicrophone(); -            mTuningDeviceModule->InitSpeaker(); -            mTuningDeviceModule->SetStereoRecording(false); -            mTuningDeviceModule->SetStereoPlayout(true); -            mTuningDeviceModule->InitRecording(); -            mTuningDeviceModule->InitPlayout(); -            updateDevices(); -        }); -      mWorkerThread->BlockingCall(          [this]()          { -            // the peer device module doesn't need an observer -            // as we pull peer data after audio processing. -            mPeerDeviceModule = webrtc::CreateAudioDeviceWithDataObserver(webrtc::AudioDeviceModule::AudioLayer::kPlatformDefaultAudio, -                                                                          mTaskQueueFactory.get(), -                                                                          nullptr); -            mPeerDeviceModule->Init(); -            mPeerDeviceModule->SetPlayoutDevice(mPlayoutDevice); -            mPeerDeviceModule->SetRecordingDevice(mRecordingDevice); -            mPeerDeviceModule->EnableBuiltInAEC(false); -            mPeerDeviceModule->InitMicrophone(); -            mPeerDeviceModule->InitSpeaker(); +            webrtc::scoped_refptr<webrtc::AudioDeviceModule> realADM = +                webrtc::AudioDeviceModule::Create(webrtc::AudioDeviceModule::AudioLayer::kPlatformDefaultAudio, mTaskQueueFactory.get()); +            mDeviceModule = webrtc::make_ref_counted<LLWebRTCAudioDeviceModule>(realADM); +            mDeviceModule->SetObserver(this);          });      // The custom processor allows us to retrieve audio data (and levels)      // from after other audio processing such as AEC, AGC, etc. -    mPeerCustomProcessor = new LLCustomProcessor; -    webrtc::AudioProcessingBuilder apb; -    apb.SetCapturePostProcessing(std::unique_ptr<webrtc::CustomProcessing>(mPeerCustomProcessor)); -    mAudioProcessingModule = apb.Create(); +    mPeerCustomProcessor = std::make_shared<LLCustomProcessorState>(); +    webrtc::BuiltinAudioProcessingBuilder apb; +    apb.SetCapturePostProcessing(std::make_unique<LLCustomProcessor>(mPeerCustomProcessor)); +    mAudioProcessingModule = apb.Build(webrtc::CreateEnvironment());      webrtc::AudioProcessing::Config apm_config;      apm_config.echo_canceller.enabled         = false;      apm_config.echo_canceller.mobile_mode     = false;      apm_config.gain_controller1.enabled       = false; -    apm_config.gain_controller1.mode          = webrtc::AudioProcessing::Config::GainController1::kAdaptiveAnalog; -    apm_config.gain_controller2.enabled       = false; +    apm_config.gain_controller2.enabled       = true;      apm_config.high_pass_filter.enabled       = true;      apm_config.noise_suppression.enabled      = true;      apm_config.noise_suppression.level        = webrtc::AudioProcessing::Config::NoiseSuppression::kVeryHigh; @@ -254,6 +317,7 @@ void LLWebRTCImpl::init()      mAudioProcessingModule->ApplyConfig(apm_config);      webrtc::ProcessingConfig processing_config; +      processing_config.input_stream().set_num_channels(2);      processing_config.input_stream().set_sample_rate_hz(48000);      processing_config.output_stream().set_num_channels(2); @@ -268,13 +332,19 @@ void LLWebRTCImpl::init()      mPeerConnectionFactory = webrtc::CreatePeerConnectionFactory(mNetworkThread.get(),                                                                   mWorkerThread.get(),                                                                   mSignalingThread.get(), -                                                                 mPeerDeviceModule, +                                                                 mDeviceModule,                                                                   webrtc::CreateBuiltinAudioEncoderFactory(),                                                                   webrtc::CreateBuiltinAudioDecoderFactory(),                                                                   nullptr /* video_encoder_factory */,                                                                   nullptr /* video_decoder_factory */,                                                                   nullptr /* audio_mixer */,                                                                   mAudioProcessingModule); +    mWorkerThread->PostTask( +        [this]() +        { +            mDeviceModule->EnableBuiltInAEC(false); +            updateDevices(); +        });  } @@ -296,64 +366,16 @@ void LLWebRTCImpl::terminate()      mWorkerThread->BlockingCall(          [this]()          { -            if (mTuningDeviceModule) -            { -                mTuningDeviceModule->StopRecording(); -                mTuningDeviceModule->Terminate(); -            } -            if (mPeerDeviceModule) +            if (mDeviceModule)              { -                mPeerDeviceModule->StopRecording(); -                mPeerDeviceModule->Terminate(); -            } -            mTuningDeviceModule = nullptr; -            mPeerDeviceModule   = nullptr; -            mTaskQueueFactory   = nullptr; -        }); -    rtc::LogMessage::RemoveLogToStream(mLogSink); -} - -// -// Devices functions -// -// Most device-related functionality needs to happen -// on the worker thread (the audio thread,) so those calls will be -// proxied over to that thread. -// -void LLWebRTCImpl::setRecording(bool recording) -{ -    mWorkerThread->PostTask( -        [this, recording]() -        { -            if (recording) -            { -                mPeerDeviceModule->SetStereoRecording(false); -                mPeerDeviceModule->InitRecording(); -                mPeerDeviceModule->StartRecording(); -            } -            else -            { -                mPeerDeviceModule->StopRecording(); -            } -        }); -} - -void LLWebRTCImpl::setPlayout(bool playing) -{ -    mWorkerThread->PostTask( -        [this, playing]() -        { -            if (playing) -            { -                mPeerDeviceModule->SetStereoPlayout(true); -                mPeerDeviceModule->InitPlayout(); -                mPeerDeviceModule->StartPlayout(); -            } -            else -            { -                mPeerDeviceModule->StopPlayout(); +                mDeviceModule->StopRecording(); +                mDeviceModule->StopPlayout(); +                mDeviceModule->Terminate();              } +            mDeviceModule     = nullptr; +            mTaskQueueFactory = nullptr;          }); +    webrtc::LogMessage::RemoveLogToStream(mLogSink);  }  void LLWebRTCImpl::setAudioConfig(LLWebRTCDeviceInterface::AudioConfig config) @@ -361,9 +383,9 @@ void LLWebRTCImpl::setAudioConfig(LLWebRTCDeviceInterface::AudioConfig config)      webrtc::AudioProcessing::Config apm_config;      apm_config.echo_canceller.enabled         = config.mEchoCancellation;      apm_config.echo_canceller.mobile_mode     = false; -    apm_config.gain_controller1.enabled       = config.mAGC; -    apm_config.gain_controller1.mode          = webrtc::AudioProcessing::Config::GainController1::kAdaptiveAnalog; -    apm_config.gain_controller2.enabled       = false; +    apm_config.gain_controller1.enabled       = false; +    apm_config.gain_controller2.enabled       = config.mAGC; +    apm_config.gain_controller2.adaptive_digital.enabled = true; // auto-level speech      apm_config.high_pass_filter.enabled       = true;      apm_config.transient_suppression.enabled  = true;      apm_config.pipeline.multi_channel_render  = true; @@ -416,142 +438,134 @@ void LLWebRTCImpl::unsetDevicesObserver(LLWebRTCDevicesObserver *observer)      }  } -void ll_set_device_module_capture_device(rtc::scoped_refptr<webrtc::AudioDeviceModule> device_module, int16_t device) +// must be run in the worker thread. +void LLWebRTCImpl::workerDeployDevices()  { +    int16_t recordingDevice = RECORD_DEVICE_DEFAULT;  #if WEBRTC_WIN -    if (device < 0) -    { -        device_module->SetRecordingDevice(webrtc::AudioDeviceModule::kDefaultDevice); -    } -    else -    { -        device_module->SetRecordingDevice(device); -    } +    int16_t recording_device_start = 0;  #else -    // passed in default is -1, but the device list -    // has it at 0 -    device_module->SetRecordingDevice(device + 1); +    int16_t recording_device_start = 1;  #endif -    device_module->InitMicrophone(); -} -void LLWebRTCImpl::setCaptureDevice(const std::string &id) -{ -    int16_t recordingDevice = RECORD_DEVICE_DEFAULT; -    if (id != "Default") +    if (mRecordingDevice != "Default")      { -        for (int16_t i = 0; i < mRecordingDeviceList.size(); i++) +        for (int16_t i = recording_device_start; i < mRecordingDeviceList.size(); i++)          { -            if (mRecordingDeviceList[i].mID == id) +            if (mRecordingDeviceList[i].mID == mRecordingDevice)              {                  recordingDevice = i;                  break;              }          }      } -    if (recordingDevice == mRecordingDevice) -    { -        return; -    } -    mRecordingDevice = recordingDevice; -    if (mTuningMode) -    { -        mWorkerThread->PostTask([this, recordingDevice]() -            { -                ll_set_device_module_capture_device(mTuningDeviceModule, recordingDevice); -            }); -    } -    else -    { -        mWorkerThread->PostTask([this, recordingDevice]() -            { -                bool recording = mPeerDeviceModule->Recording(); -                if (recording) -                { -                    mPeerDeviceModule->StopRecording(); -                } -                ll_set_device_module_capture_device(mPeerDeviceModule, recordingDevice); -                if (recording) -                { -                    mPeerDeviceModule->SetStereoRecording(false); -                    mPeerDeviceModule->InitRecording(); -                    mPeerDeviceModule->StartRecording(); -                } -            }); -    } -} - -void ll_set_device_module_render_device(rtc::scoped_refptr<webrtc::AudioDeviceModule> device_module, int16_t device) -{ +    mDeviceModule->StopPlayout(); +    mDeviceModule->ForceStopRecording();  #if WEBRTC_WIN -    if (device < 0) +    if (recordingDevice < 0)      { -        device_module->SetPlayoutDevice(webrtc::AudioDeviceModule::kDefaultDevice); +        mDeviceModule->SetRecordingDevice((webrtc::AudioDeviceModule::WindowsDeviceType)recordingDevice);      }      else      { -        device_module->SetPlayoutDevice(device); +        mDeviceModule->SetRecordingDevice(recordingDevice);      }  #else -    device_module->SetPlayoutDevice(device + 1); +    mDeviceModule->SetRecordingDevice(recordingDevice);  #endif -    device_module->InitSpeaker(); -} +    mDeviceModule->InitMicrophone(); +    mDeviceModule->SetStereoRecording(false); +    mDeviceModule->InitRecording(); -void LLWebRTCImpl::setRenderDevice(const std::string &id) -{      int16_t playoutDevice = PLAYOUT_DEVICE_DEFAULT; -    if (id != "Default") +#if WEBRTC_WIN +    int16_t playout_device_start = 0; +#else +    int16_t playout_device_start = 1; +#endif +    if (mPlayoutDevice != "Default")      { -        for (int16_t i = 0; i < mPlayoutDeviceList.size(); i++) +        for (int16_t i = playout_device_start; i < mPlayoutDeviceList.size(); i++)          { -            if (mPlayoutDeviceList[i].mID == id) +            if (mPlayoutDeviceList[i].mID == mPlayoutDevice)              {                  playoutDevice = i;                  break;              }          }      } -    if (playoutDevice == mPlayoutDevice) + +#if WEBRTC_WIN +    if (playoutDevice < 0) +    { +        mDeviceModule->SetPlayoutDevice((webrtc::AudioDeviceModule::WindowsDeviceType)playoutDevice); +    } +    else      { -        return; +        mDeviceModule->SetPlayoutDevice(playoutDevice);      } -    mPlayoutDevice = playoutDevice; +#else +    mDeviceModule->SetPlayoutDevice(playoutDevice); +#endif +    mDeviceModule->InitSpeaker(); +    mDeviceModule->SetStereoPlayout(true); +    mDeviceModule->InitPlayout(); -    if (mTuningMode) +    if ((!mMute && mPeerConnections.size()) || mTuningMode)      { -        mWorkerThread->PostTask( -            [this, playoutDevice]() -            { -                ll_set_device_module_render_device(mTuningDeviceModule, playoutDevice); -            }); +        mDeviceModule->ForceStartRecording();      } -    else + +    if (!mTuningMode)      { -        mWorkerThread->PostTask( -            [this, playoutDevice]() +        mDeviceModule->StartPlayout(); +    } +    mSignalingThread->PostTask( +        [this] +        { +            for (auto& connection : mPeerConnections)              { -                bool playing = mPeerDeviceModule->Playing(); -                if (playing) +                if (mTuningMode)                  { -                    mPeerDeviceModule->StopPlayout(); +                    connection->enableSenderTracks(false);                  } -                ll_set_device_module_render_device(mPeerDeviceModule, playoutDevice); -                if (playing) +                else                  { -                    mPeerDeviceModule->SetStereoPlayout(true); -                    mPeerDeviceModule->InitPlayout(); -                    mPeerDeviceModule->StartPlayout(); +                    connection->resetMute();                  } -            }); +                connection->enableReceiverTracks(!mTuningMode); +            } +            if (1 < mDevicesDeploying.fetch_sub(1, std::memory_order_relaxed)) +            { +                mWorkerThread->PostTask([this] { workerDeployDevices(); }); +            } +        }); +} + +void LLWebRTCImpl::setCaptureDevice(const std::string &id) +{ + +    if (mRecordingDevice != id) +    { +        mRecordingDevice = id; +        deployDevices(); +    } +} + +void LLWebRTCImpl::setRenderDevice(const std::string &id) +{ +    if (mPlayoutDevice != id) +    { +        mPlayoutDevice = id; +        deployDevices();      }  }  // updateDevices needs to happen on the worker thread.  void LLWebRTCImpl::updateDevices()  { -    int16_t renderDeviceCount  = mTuningDeviceModule->PlayoutDevices(); +    int16_t renderDeviceCount  = mDeviceModule->PlayoutDevices();      mPlayoutDeviceList.clear();  #if WEBRTC_WIN @@ -565,11 +579,11 @@ void LLWebRTCImpl::updateDevices()      {          char name[webrtc::kAdmMaxDeviceNameSize];          char guid[webrtc::kAdmMaxGuidSize]; -        mTuningDeviceModule->PlayoutDeviceName(index, name, guid); +        mDeviceModule->PlayoutDeviceName(index, name, guid);          mPlayoutDeviceList.emplace_back(name, guid);      } -    int16_t captureDeviceCount        = mTuningDeviceModule->RecordingDevices(); +    int16_t captureDeviceCount        = mDeviceModule->RecordingDevices();      mRecordingDeviceList.clear();  #if WEBRTC_WIN @@ -583,7 +597,7 @@ void LLWebRTCImpl::updateDevices()      {          char name[webrtc::kAdmMaxDeviceNameSize];          char guid[webrtc::kAdmMaxGuidSize]; -        mTuningDeviceModule->RecordingDeviceName(index, name, guid); +        mDeviceModule->RecordingDeviceName(index, name, guid);          mRecordingDeviceList.emplace_back(name, guid);      } @@ -595,11 +609,7 @@ void LLWebRTCImpl::updateDevices()  void LLWebRTCImpl::OnDevicesUpdated()  { -    // reset these to a bad value so an update is forced -    mRecordingDevice = RECORD_DEVICE_BAD; -    mPlayoutDevice   = PLAYOUT_DEVICE_BAD; - -    updateDevices(); +    deployDevices();  } @@ -607,60 +617,109 @@ void LLWebRTCImpl::setTuningMode(bool enable)  {      mTuningMode = enable;      mWorkerThread->PostTask( -        [this, enable] { -            if (enable) -            { -                mPeerDeviceModule->StopRecording(); -                mPeerDeviceModule->StopPlayout(); -                ll_set_device_module_render_device(mTuningDeviceModule, mPlayoutDevice); -                ll_set_device_module_capture_device(mTuningDeviceModule, mRecordingDevice); -                mTuningDeviceModule->InitPlayout(); -                mTuningDeviceModule->InitRecording(); -                mTuningDeviceModule->StartRecording(); -                // TODO:  Starting Playout on the TDM appears to create an audio artifact (click) -                // in this case, so disabling it for now.  We may have to do something different -                // if we enable 'echo playback' via the TDM when tuning. -                //mTuningDeviceModule->StartPlayout(); -            } -            else -            { -                mTuningDeviceModule->StopRecording(); -                //mTuningDeviceModule->StopPlayout(); -                ll_set_device_module_render_device(mPeerDeviceModule, mPlayoutDevice); -                ll_set_device_module_capture_device(mPeerDeviceModule, mRecordingDevice); -                mPeerDeviceModule->SetStereoPlayout(true); -                mPeerDeviceModule->SetStereoRecording(false); -                mPeerDeviceModule->InitPlayout(); -                mPeerDeviceModule->InitRecording(); -                mPeerDeviceModule->StartPlayout(); -                mPeerDeviceModule->StartRecording(); -            } -        } -    ); -    mSignalingThread->PostTask( -        [this, enable] +        [this]          { -            for (auto &connection : mPeerConnections) -            { -                if (enable) +            mDeviceModule->SetTuning(mTuningMode, mMute); +            mSignalingThread->PostTask( +                [this]                  { -                    connection->enableSenderTracks(false); -                } -                else -                { -                    connection->resetMute(); -                } -                connection->enableReceiverTracks(!enable); -            } +                    for (auto& connection : mPeerConnections) +                    { +                        if (mTuningMode) +                        { +                            connection->enableSenderTracks(false); +                        } +                        else +                        { +                            connection->resetMute(); +                        } +                        connection->enableReceiverTracks(!mTuningMode); +                    } +                });          });  } -float LLWebRTCImpl::getTuningAudioLevel() { return -20 * log10f(mTuningAudioDeviceObserver->getMicrophoneEnergy()); } +void LLWebRTCImpl::deployDevices() +{ +    if (0 < mDevicesDeploying.fetch_add(1, std::memory_order_relaxed)) +    { +        return; +    } +    mWorkerThread->PostTask( +        [this] { +            workerDeployDevices(); +        }); +} + +float LLWebRTCImpl::getTuningAudioLevel() +{ +    return mDeviceModule ? -20 * log10f(mDeviceModule->GetMicrophoneEnergy()) : std::numeric_limits<float>::infinity(); +} -float LLWebRTCImpl::getPeerConnectionAudioLevel() { return -20 * log10f(mPeerCustomProcessor->getMicrophoneEnergy()); } +void LLWebRTCImpl::setTuningMicGain(float gain) +{ +    if (mTuningMode && mDeviceModule) +    { +        mDeviceModule->SetTuningMicGain(gain); +    } +} + +float LLWebRTCImpl::getPeerConnectionAudioLevel() +{ +    return mTuningMode ? std::numeric_limits<float>::infinity() +                       : (mPeerCustomProcessor ? -20 * log10f(mPeerCustomProcessor->getMicrophoneEnergy()) +                                               : std::numeric_limits<float>::infinity()); +} -void LLWebRTCImpl::setPeerConnectionGain(float gain) { mPeerCustomProcessor->setGain(gain); } +void LLWebRTCImpl::setMicGain(float gain) +{ +    mGain = gain; +    if (!mTuningMode && mPeerCustomProcessor) +    { +        mPeerCustomProcessor->setGain(gain); +    } +} +void LLWebRTCImpl::setMute(bool mute, int delay_ms) +{ +    if (mMute != mute) +    { +        mMute = mute; +        intSetMute(mute, delay_ms); +    } +} + +void LLWebRTCImpl::intSetMute(bool mute, int delay_ms) +{ +    if (mPeerCustomProcessor) +    { +        mPeerCustomProcessor->setGain(mMute ? 0.0f : mGain); +    } +    if (mMute) +    { +        mWorkerThread->PostDelayedTask( +            [this] +            { +                if (mDeviceModule) +                { +                    mDeviceModule->ForceStopRecording(); +                } +            }, +            webrtc::TimeDelta::Millis(delay_ms)); +    } +    else +    { +        mWorkerThread->PostTask( +            [this] +            { +                if (mDeviceModule) +                { +                    mDeviceModule->InitRecording(); +                    mDeviceModule->ForceStartRecording(); +                } +            }); +    } +}  //  // Peer Connection Helpers @@ -668,34 +727,31 @@ void LLWebRTCImpl::setPeerConnectionGain(float gain) { mPeerCustomProcessor->set  LLWebRTCPeerConnectionInterface *LLWebRTCImpl::newPeerConnection()  { -    rtc::scoped_refptr<LLWebRTCPeerConnectionImpl> peerConnection = rtc::scoped_refptr<LLWebRTCPeerConnectionImpl>(new rtc::RefCountedObject<LLWebRTCPeerConnectionImpl>()); +    bool empty = mPeerConnections.empty(); +    webrtc::scoped_refptr<LLWebRTCPeerConnectionImpl> peerConnection = webrtc::scoped_refptr<LLWebRTCPeerConnectionImpl>(new webrtc::RefCountedObject<LLWebRTCPeerConnectionImpl>());      peerConnection->init(this); - -    mPeerConnections.emplace_back(peerConnection); -    // Should it really start disabled? -    // Seems like something doesn't get the memo and senders need to be reset later -    // to remove the voice indicator from taskbar -    peerConnection->enableSenderTracks(false);      if (mPeerConnections.empty())      { -        setRecording(true); -        setPlayout(true); +        intSetMute(mMute);      } +    mPeerConnections.emplace_back(peerConnection); + +    peerConnection->enableSenderTracks(false); +    peerConnection->resetMute();      return peerConnection.get();  }  void LLWebRTCImpl::freePeerConnection(LLWebRTCPeerConnectionInterface* peer_connection)  { -    std::vector<rtc::scoped_refptr<LLWebRTCPeerConnectionImpl>>::iterator it = +    std::vector<webrtc::scoped_refptr<LLWebRTCPeerConnectionImpl>>::iterator it =      std::find(mPeerConnections.begin(), mPeerConnections.end(), peer_connection);      if (it != mPeerConnections.end())      {          mPeerConnections.erase(it); -    } -    if (mPeerConnections.empty()) -    { -        setRecording(false); -        setPlayout(false); +        if (mPeerConnections.empty()) +        { +            intSetMute(true); +        }      }  } @@ -731,7 +787,7 @@ void LLWebRTCPeerConnectionImpl::init(LLWebRTCImpl * webrtc_impl)  }  void LLWebRTCPeerConnectionImpl::terminate()  { -    mWebRTCImpl->SignalingBlockingCall( +    mWebRTCImpl->PostSignalingTask(          [this]()          {              if (mPeerConnection) @@ -755,7 +811,6 @@ void LLWebRTCPeerConnectionImpl::terminate()                          track->set_enabled(false);                      }                  } -                mPeerConnection->SetAudioRecording(false);                  mPeerConnection->Close();                  if (mLocalStream) @@ -842,7 +897,7 @@ bool LLWebRTCPeerConnectionImpl::initializeConnection(const LLWebRTCPeerConnecti                  mDataChannel->RegisterObserver(this);              } -            cricket::AudioOptions audioOptions; +            webrtc::AudioOptions audioOptions;              audioOptions.auto_gain_control = true;              audioOptions.echo_cancellation = true;              audioOptions.noise_suppression = true; @@ -850,7 +905,7 @@ bool LLWebRTCPeerConnectionImpl::initializeConnection(const LLWebRTCPeerConnecti              mLocalStream = mPeerConnectionFactory->CreateLocalMediaStream("SLStream"); -            rtc::scoped_refptr<webrtc::AudioTrackInterface> audio_track( +            webrtc::scoped_refptr<webrtc::AudioTrackInterface> audio_track(                  mPeerConnectionFactory->CreateAudioTrack("SLAudio", mPeerConnectionFactory->CreateAudioSource(audioOptions).get()));              audio_track->set_enabled(false);              mLocalStream->AddTrack(audio_track); @@ -864,7 +919,7 @@ bool LLWebRTCPeerConnectionImpl::initializeConnection(const LLWebRTCPeerConnecti                  webrtc::RtpParameters      params;                  webrtc::RtpCodecParameters codecparam;                  codecparam.name                       = "opus"; -                codecparam.kind                       = cricket::MEDIA_TYPE_AUDIO; +                codecparam.kind                       = webrtc::MediaType::AUDIO;                  codecparam.clock_rate                 = 48000;                  codecparam.num_channels               = 2;                  codecparam.parameters["stereo"]       = "1"; @@ -879,7 +934,7 @@ bool LLWebRTCPeerConnectionImpl::initializeConnection(const LLWebRTCPeerConnecti                  webrtc::RtpParameters      params;                  webrtc::RtpCodecParameters codecparam;                  codecparam.name                       = "opus"; -                codecparam.kind                       = cricket::MEDIA_TYPE_AUDIO; +                codecparam.kind                       = webrtc::MediaType::AUDIO;                  codecparam.clock_rate                 = 48000;                  codecparam.num_channels               = 2;                  codecparam.parameters["stereo"]       = "1"; @@ -906,7 +961,6 @@ void LLWebRTCPeerConnectionImpl::enableSenderTracks(bool enable)      // set_enabled shouldn't be done on the worker thread.      if (mPeerConnection)      { -        mPeerConnection->SetAudioRecording(enable);          auto senders = mPeerConnection->GetSenders();          for (auto &sender : senders)          { @@ -940,7 +994,7 @@ void LLWebRTCPeerConnectionImpl::AnswerAvailable(const std::string &sdp)                                     {                                         RTC_LOG(LS_INFO) << __FUNCTION__ << " " << mPeerConnection->peer_connection_state();                                         mPeerConnection->SetRemoteDescription(webrtc::CreateSessionDescription(webrtc::SdpType::kAnswer, sdp), -                                                                             rtc::scoped_refptr<webrtc::SetRemoteDescriptionObserverInterface>(this)); +                                                                             webrtc::scoped_refptr<webrtc::SetRemoteDescriptionObserverInterface>(this));                                     }                                 });  } @@ -953,22 +1007,22 @@ void LLWebRTCPeerConnectionImpl::AnswerAvailable(const std::string &sdp)  void LLWebRTCPeerConnectionImpl::setMute(bool mute)  {      EMicMuteState new_state = mute ? MUTE_MUTED : MUTE_UNMUTED; -    if (new_state == mMute) -    { -        return; // no change -    } + +    // even if mute hasn't changed, we still need to update the mute +    // state on the connections to handle cases where the 'Default' device +    // has changed in the OS (unplugged headset, etc.) which messes +    // with the mute state. +      bool force_reset = mMute == MUTE_INITIAL && mute;      bool enable = !mute;      mMute = new_state; +      mWebRTCImpl->PostSignalingTask(          [this, force_reset, enable]()          {          if (mPeerConnection)          { -            // SetAudioRecording must be called before enabling/disabling tracks. -            mPeerConnection->SetAudioRecording(enable); -              auto senders = mPeerConnection->GetSenders();              RTC_LOG(LS_INFO) << __FUNCTION__ << (mMute ? "disabling" : "enabling") << " streams count " << senders.size(); @@ -1048,14 +1102,14 @@ void LLWebRTCPeerConnectionImpl::setSendVolume(float volume)  // PeerConnectionObserver implementation.  // -void LLWebRTCPeerConnectionImpl::OnAddTrack(rtc::scoped_refptr<webrtc::RtpReceiverInterface>                     receiver, -                                            const std::vector<rtc::scoped_refptr<webrtc::MediaStreamInterface>> &streams) +void LLWebRTCPeerConnectionImpl::OnAddTrack(webrtc::scoped_refptr<webrtc::RtpReceiverInterface>                     receiver, +                                            const std::vector<webrtc::scoped_refptr<webrtc::MediaStreamInterface>> &streams)  {      RTC_LOG(LS_INFO) << __FUNCTION__ << " " << receiver->id();      webrtc::RtpParameters      params;      webrtc::RtpCodecParameters codecparam;      codecparam.name                       = "opus"; -    codecparam.kind                       = cricket::MEDIA_TYPE_AUDIO; +    codecparam.kind                       = webrtc::MediaType::AUDIO;      codecparam.clock_rate                 = 48000;      codecparam.num_channels               = 2;      codecparam.parameters["stereo"]       = "1"; @@ -1064,12 +1118,12 @@ void LLWebRTCPeerConnectionImpl::OnAddTrack(rtc::scoped_refptr<webrtc::RtpReceiv      receiver->SetParameters(params);  } -void LLWebRTCPeerConnectionImpl::OnRemoveTrack(rtc::scoped_refptr<webrtc::RtpReceiverInterface> receiver) +void LLWebRTCPeerConnectionImpl::OnRemoveTrack(webrtc::scoped_refptr<webrtc::RtpReceiverInterface> receiver)  {      RTC_LOG(LS_INFO) << __FUNCTION__ << " " << receiver->id();  } -void LLWebRTCPeerConnectionImpl::OnDataChannel(rtc::scoped_refptr<webrtc::DataChannelInterface> channel) +void LLWebRTCPeerConnectionImpl::OnDataChannel(webrtc::scoped_refptr<webrtc::DataChannelInterface> channel)  {      if (mDataChannel)      { @@ -1156,23 +1210,23 @@ static std::string iceCandidateToTrickleString(const webrtc::IceCandidateInterfa      candidate->candidate().address().ipaddr().ToString() << " " <<      candidate->candidate().address().PortAsString() << " typ "; -    if (candidate->candidate().type() == cricket::LOCAL_PORT_TYPE) +    if (candidate->candidate().type() == webrtc::IceCandidateType::kHost)      {          candidate_stream << "host";      } -    else if (candidate->candidate().type() == cricket::STUN_PORT_TYPE) +    else if (candidate->candidate().type() == webrtc::IceCandidateType::kSrflx)      {          candidate_stream << "srflx " <<          "raddr " << candidate->candidate().related_address().ipaddr().ToString() << " " <<          "rport " << candidate->candidate().related_address().PortAsString();      } -    else if (candidate->candidate().type() == cricket::RELAY_PORT_TYPE) +    else if (candidate->candidate().type() == webrtc::IceCandidateType::kRelay)      {          candidate_stream << "relay " <<          "raddr " << candidate->candidate().related_address().ipaddr().ToString() << " " <<          "rport " << candidate->candidate().related_address().PortAsString();      } -    else if (candidate->candidate().type() == cricket::PRFLX_PORT_TYPE) +    else if (candidate->candidate().type() == webrtc::IceCandidateType::kPrflx)      {          candidate_stream << "prflx " <<          "raddr " << candidate->candidate().related_address().ipaddr().ToString() << " " << @@ -1267,7 +1321,7 @@ void LLWebRTCPeerConnectionImpl::OnSuccess(webrtc::SessionDescriptionInterface *     mPeerConnection->SetLocalDescription(std::unique_ptr<webrtc::SessionDescriptionInterface>(                                                       webrtc::CreateSessionDescription(webrtc::SdpType::kOffer, mangled_sdp)), -                                                 rtc::scoped_refptr<webrtc::SetLocalDescriptionObserverInterface>(this)); +                                                 webrtc::scoped_refptr<webrtc::SetLocalDescriptionObserverInterface>(this));  } @@ -1377,7 +1431,7 @@ void LLWebRTCPeerConnectionImpl::sendData(const std::string& data, bool binary)  {      if (mDataChannel)      { -        rtc::CopyOnWriteBuffer cowBuffer(data.data(), data.length()); +        webrtc::CopyOnWriteBuffer cowBuffer(data.data(), data.length());          webrtc::DataBuffer     buffer(cowBuffer, binary);          mWebRTCImpl->PostNetworkTask([this, buffer]() {                  if (mDataChannel) diff --git a/indra/llwebrtc/llwebrtc.h b/indra/llwebrtc/llwebrtc.h index c6fdb909dd..7d06b7d2b4 100644 --- a/indra/llwebrtc/llwebrtc.h +++ b/indra/llwebrtc/llwebrtc.h @@ -159,7 +159,10 @@ class LLWebRTCDeviceInterface      virtual void setTuningMode(bool enable) = 0;      virtual float getTuningAudioLevel() = 0; // for use during tuning      virtual float getPeerConnectionAudioLevel() = 0; // for use when not tuning -    virtual void setPeerConnectionGain(float gain) = 0; +    virtual void setMicGain(float gain) = 0; +    virtual void setTuningMicGain(float gain)        = 0; + +    virtual void setMute(bool mute, int delay_ms = 0) = 0;  };  // LLWebRTCAudioInterface provides the viewer with a way diff --git a/indra/llwebrtc/llwebrtc_impl.h b/indra/llwebrtc/llwebrtc_impl.h index dfdb19e9be..7b23b11208 100644 --- a/indra/llwebrtc/llwebrtc_impl.h +++ b/indra/llwebrtc/llwebrtc_impl.h @@ -54,12 +54,12 @@  #include "rtc_base/ref_counted_object.h"  #include "rtc_base/ssl_adapter.h"  #include "rtc_base/thread.h" +#include "rtc_base/logging.h"  #include "api/peer_connection_interface.h"  #include "api/media_stream_interface.h"  #include "api/create_peerconnection_factory.h"  #include "modules/audio_device/include/audio_device.h"  #include "modules/audio_device/include/audio_device_data_observer.h" -#include "rtc_base/task_queue.h"  #include "api/task_queue/task_queue_factory.h"  #include "api/task_queue/default_task_queue_factory.h"  #include "modules/audio_device/include/audio_device_defines.h" @@ -69,35 +69,30 @@ namespace llwebrtc  class LLWebRTCPeerConnectionImpl; -class LLWebRTCLogSink : public rtc::LogSink { +class LLWebRTCLogSink : public webrtc::LogSink +{  public: -    LLWebRTCLogSink(LLWebRTCLogCallback* callback) : -    mCallback(callback) -    { -    } +    LLWebRTCLogSink(LLWebRTCLogCallback* callback) : mCallback(callback) {}      // Destructor: close the log file -    ~LLWebRTCLogSink() override -    { -    } +    ~LLWebRTCLogSink() override {} -    void OnLogMessage(const std::string& msg, -                      rtc::LoggingSeverity severity) override +    void OnLogMessage(const std::string& msg, webrtc::LoggingSeverity severity) override      {          if (mCallback)          { -            switch(severity) +            switch (severity)              { -                case rtc::LS_VERBOSE: +                case webrtc::LS_VERBOSE:                      mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg);                      break; -                case rtc::LS_INFO: +                case webrtc::LS_INFO:                      mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg);                      break; -                case rtc::LS_WARNING: +                case webrtc::LS_WARNING:                      mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg);                      break; -                case rtc::LS_ERROR: +                case webrtc::LS_ERROR:                      mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg);                      break;                  default: @@ -118,67 +113,301 @@ private:      LLWebRTCLogCallback* mCallback;  }; -// Implements a class allowing capture of audio data -// to determine audio level of the microphone. -class LLAudioDeviceObserver : public webrtc::AudioDeviceDataObserver +// ----------------------------------------------------------------------------- +// A proxy transport that forwards capture data to two AudioTransport sinks: +//  - the "engine" (libwebrtc's VoiceEngine) +//  - the "user" (your app's listener) +// +// Playout (NeedMorePlayData) goes only to the engine by default to avoid +// double-writing into the output buffer. See notes below if you want a tap. +// ----------------------------------------------------------------------------- +class LLWebRTCAudioTransport : public webrtc::AudioTransport  { -  public: -    LLAudioDeviceObserver(); - -    // Retrieve the RMS audio loudness -    float getMicrophoneEnergy(); - -    // Data retrieved from the caputure device is -    // passed in here for processing. -    void OnCaptureData(const void    *audio_samples, -                       const size_t   num_samples, -                       const size_t   bytes_per_sample, -                       const size_t   num_channels, -                       const uint32_t samples_per_sec) override; - -    // This is for data destined for the render device. -    // not currently used. -    void OnRenderData(const void    *audio_samples, -                      const size_t   num_samples, -                      const size_t   bytes_per_sample, -                      const size_t   num_channels, -                      const uint32_t samples_per_sec) override; +public: +    LLWebRTCAudioTransport(); + +    void SetEngineTransport(webrtc::AudioTransport* t); + +    // -------- Capture path: fan out to both sinks -------- +    int32_t RecordedDataIsAvailable(const void* audio_data, +                                    size_t      number_of_samples, +                                    size_t      bytes_per_sample, +                                    size_t      number_of_channels, +                                    uint32_t    samples_per_sec, +                                    uint32_t    total_delay_ms, +                                    int32_t     clock_drift, +                                    uint32_t    current_mic_level, +                                    bool        key_pressed, +                                    uint32_t&   new_mic_level) override; + +    // -------- Playout path: delegate to engine only -------- +    int32_t NeedMorePlayData(size_t   number_of_samples, +                             size_t   bytes_per_sample, +                             size_t   number_of_channels, +                             uint32_t samples_per_sec, +                             void*    audio_data, +                             size_t&  number_of_samples_out, +                             int64_t* elapsed_time_ms, +                             int64_t* ntp_time_ms) override; + +    // Method to pull mixed render audio data from all active VoE channels. +    // The data will not be passed as reference for audio processing internally. +    void PullRenderData(int      bits_per_sample, +                        int      sample_rate, +                        size_t   number_of_channels, +                        size_t   number_of_frames, +                        void*    audio_data, +                        int64_t* elapsed_time_ms, +                        int64_t* ntp_time_ms) override; + +    float GetMicrophoneEnergy() { return mMicrophoneEnergy.load(std::memory_order_relaxed); } +    void  SetGain(float gain) { mGain.store(gain, std::memory_order_relaxed); } + +private: +    std::atomic<webrtc::AudioTransport*> engine_{ nullptr }; +    static const int                     NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing (30 frames) +    float                                mSumVector[NUM_PACKETS_TO_FILTER]; +    std::atomic<float>                   mMicrophoneEnergy; +    std::atomic<float>                   mGain{ 0.0f }; -  protected: -    static const int NUM_PACKETS_TO_FILTER = 30;  // 300 ms of smoothing (30 frames) -    float mSumVector[NUM_PACKETS_TO_FILTER]; -    float mMicrophoneEnergy;  }; + +// ----------------------------------------------------------------------------- +// LLWebRTCAudioDeviceModule +// - Wraps a real ADM to provide microphone energy for tuning +// ----------------------------------------------------------------------------- +class LLWebRTCAudioDeviceModule : public webrtc::AudioDeviceModule +{ +public: +    explicit LLWebRTCAudioDeviceModule(webrtc::scoped_refptr<webrtc::AudioDeviceModule> inner) : inner_(std::move(inner)), tuning_(false) +    { +        RTC_CHECK(inner_); +    } + +    // ----- AudioDeviceModule interface: we mostly forward to |inner_| ----- +    int32_t ActiveAudioLayer(AudioLayer* audioLayer) const override { return inner_->ActiveAudioLayer(audioLayer); } + +    int32_t RegisterAudioCallback(webrtc::AudioTransport* engine_transport) override +    { +        // The engine registers its transport here. We put our audio transport between engine and ADM. +        audio_transport_.SetEngineTransport(engine_transport); +        // Register our proxy with the real ADM. +        return inner_->RegisterAudioCallback(&audio_transport_); +    } + +    int32_t Init() override { return inner_->Init(); } +    int32_t Terminate() override { return inner_->Terminate(); } +    bool    Initialized() const override { return inner_->Initialized(); } + +    // --- Device enumeration/selection (forward) --- +    int16_t PlayoutDevices() override { return inner_->PlayoutDevices(); } +    int16_t RecordingDevices() override { return inner_->RecordingDevices(); } +    int32_t PlayoutDeviceName(uint16_t index, char name[webrtc::kAdmMaxDeviceNameSize], char guid[webrtc::kAdmMaxGuidSize]) override +    { +        return inner_->PlayoutDeviceName(index, name, guid); +    } +    int32_t RecordingDeviceName(uint16_t index, char name[webrtc::kAdmMaxDeviceNameSize], char guid[webrtc::kAdmMaxGuidSize]) override +    { +        return inner_->RecordingDeviceName(index, name, guid); +    } +    int32_t SetPlayoutDevice(uint16_t index) override { return inner_->SetPlayoutDevice(index); } +    int32_t SetRecordingDevice(uint16_t index) override { return inner_->SetRecordingDevice(index); } + +    // Windows default/communications selectors, if your branch exposes them: +    int32_t SetPlayoutDevice(WindowsDeviceType type) override { return inner_->SetPlayoutDevice(type); } +    int32_t SetRecordingDevice(WindowsDeviceType type) override { return inner_->SetRecordingDevice(type); } + +    // --- Init/start/stop (forward) --- +    int32_t InitPlayout() override { return inner_->InitPlayout(); } +    bool    PlayoutIsInitialized() const override { return inner_->PlayoutIsInitialized(); } +    int32_t StartPlayout() override { +        if (tuning_) return 0;  // For tuning, don't allow playout +        return inner_->StartPlayout(); +    } +    int32_t StopPlayout() override { return inner_->StopPlayout(); } +    bool    Playing() const override { return inner_->Playing(); } + +    int32_t InitRecording() override { return inner_->InitRecording(); } +    bool    RecordingIsInitialized() const override { return inner_->RecordingIsInitialized(); } +    int32_t StartRecording() override { +        // ignore start recording as webrtc.lib will +        // send one when streams first connect, resulting +        // in an inadvertant 'recording' when mute is on. +        // We take full control of StartRecording via +        // ForceStartRecording below. +        return 0; +    } +    int32_t StopRecording() override { +        if (tuning_) return 0;  // if we're tuning, disregard the StopRecording we get from disabling the streams +        return inner_->StopRecording(); +    } +    int32_t ForceStartRecording() { return inner_->StartRecording(); } +    int32_t ForceStopRecording() { return inner_->StopRecording(); } +    bool    Recording() const override { return inner_->Recording(); } + +    // --- Stereo opts (forward if available on your branch) --- +    int32_t SetStereoPlayout(bool enable) override { return inner_->SetStereoPlayout(enable); } +    int32_t SetStereoRecording(bool enable) override { return inner_->SetStereoRecording(enable); } +    int32_t PlayoutIsAvailable(bool* available) override { return inner_->PlayoutIsAvailable(available); } +    int32_t RecordingIsAvailable(bool* available) override { return inner_->RecordingIsAvailable(available); } + +    // --- AGC/Volume/Mute/etc. (forward) --- +    int32_t SetMicrophoneVolume(uint32_t volume) override { return inner_->SetMicrophoneVolume(volume); } +    int32_t MicrophoneVolume(uint32_t* volume) const override { return inner_->MicrophoneVolume(volume); } + +    // --- Speaker/Microphone init (forward) --- +    int32_t InitSpeaker() override { return inner_->InitSpeaker(); } +    bool    SpeakerIsInitialized() const override { return inner_->SpeakerIsInitialized(); } +    int32_t InitMicrophone() override { return inner_->InitMicrophone(); } +    bool    MicrophoneIsInitialized() const override { return inner_->MicrophoneIsInitialized(); } + +    // --- Speaker Volume (forward) --- +    int32_t SpeakerVolumeIsAvailable(bool* available) override { return inner_->SpeakerVolumeIsAvailable(available); } +    int32_t SetSpeakerVolume(uint32_t volume) override { return inner_->SetSpeakerVolume(volume); } +    int32_t SpeakerVolume(uint32_t* volume) const override { return inner_->SpeakerVolume(volume); } +    int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override { return inner_->MaxSpeakerVolume(maxVolume); } +    int32_t MinSpeakerVolume(uint32_t* minVolume) const override { return inner_->MinSpeakerVolume(minVolume); } + +    // --- Microphone Volume (forward) --- +    int32_t MicrophoneVolumeIsAvailable(bool* available) override { return inner_->MicrophoneVolumeIsAvailable(available); } +    int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override { return inner_->MaxMicrophoneVolume(maxVolume); } +    int32_t MinMicrophoneVolume(uint32_t* minVolume) const override { return inner_->MinMicrophoneVolume(minVolume); } + +    // --- Speaker Mute (forward) --- +    int32_t SpeakerMuteIsAvailable(bool* available) override { return inner_->SpeakerMuteIsAvailable(available); } +    int32_t SetSpeakerMute(bool enable) override { return inner_->SetSpeakerMute(enable); } +    int32_t SpeakerMute(bool* enabled) const override { return inner_->SpeakerMute(enabled); } + +    // --- Microphone Mute (forward) --- +    int32_t MicrophoneMuteIsAvailable(bool* available) override { return inner_->MicrophoneMuteIsAvailable(available); } +    int32_t SetMicrophoneMute(bool enable) override { return inner_->SetMicrophoneMute(enable); } +    int32_t MicrophoneMute(bool* enabled) const override { return inner_->MicrophoneMute(enabled); } + +    // --- Stereo Support (forward) --- +    int32_t StereoPlayoutIsAvailable(bool* available) const override { return inner_->StereoPlayoutIsAvailable(available); } +    int32_t StereoPlayout(bool* enabled) const override { return inner_->StereoPlayout(enabled); } +    int32_t StereoRecordingIsAvailable(bool* available) const override { return inner_->StereoRecordingIsAvailable(available); } +    int32_t StereoRecording(bool* enabled) const override { return inner_->StereoRecording(enabled); } + +    // --- Delay/Timing (forward) --- +    int32_t PlayoutDelay(uint16_t* delayMS) const override { return inner_->PlayoutDelay(delayMS); } + +    // --- Built-in Audio Processing (forward) --- +    bool    BuiltInAECIsAvailable() const override { return inner_->BuiltInAECIsAvailable(); } +    bool    BuiltInAGCIsAvailable() const override { return inner_->BuiltInAGCIsAvailable(); } +    bool    BuiltInNSIsAvailable() const override { return inner_->BuiltInNSIsAvailable(); } +    int32_t EnableBuiltInAEC(bool enable) override { return inner_->EnableBuiltInAEC(enable); } +    int32_t EnableBuiltInAGC(bool enable) override { return inner_->EnableBuiltInAGC(enable); } +    int32_t EnableBuiltInNS(bool enable) override { return inner_->EnableBuiltInNS(enable); } + +    // --- Additional AudioDeviceModule methods (forward) --- +    int32_t GetPlayoutUnderrunCount() const override { return inner_->GetPlayoutUnderrunCount(); } + +    // Used to generate RTC stats. If not implemented, RTCAudioPlayoutStats will +    // not be present in the stats. +    std::optional<Stats> GetStats() const override { return inner_->GetStats(); } + +// Only supported on iOS. +#if defined(WEBRTC_IOS) +    virtual int GetPlayoutAudioParameters(AudioParameters* params) const override { return inner_->GetPlayoutAudioParameters(params); } +    virtual int GetRecordAudioParameters(AudioParameters* params) override { return inner_->GetRecordAudioParameters(params); } +#endif // WEBRTC_IOS + +    virtual int32_t GetPlayoutDevice() const override { return inner_->GetPlayoutDevice(); } +    virtual int32_t GetRecordingDevice() const override { return inner_->GetRecordingDevice(); } +    virtual int32_t SetObserver(webrtc::AudioDeviceObserver* observer) override { return inner_->SetObserver(observer); } + +    // tuning microphone energy calculations +    float GetMicrophoneEnergy() { return audio_transport_.GetMicrophoneEnergy(); } +    void SetTuningMicGain(float gain) { audio_transport_.SetGain(gain); } +    void  SetTuning(bool tuning, bool mute) +    { +        tuning_ = tuning; +        if (tuning) +        { +            inner_->InitRecording(); +            inner_->StartRecording(); +            inner_->StopPlayout(); +        } +        else +        { +            if (mute) +            { +                inner_->StopRecording(); +            } +            else +            { +                inner_->InitRecording(); +                inner_->StartRecording(); +            } +            inner_->StartPlayout(); +        } +    } + +protected: +    ~LLWebRTCAudioDeviceModule() override = default; + +private: +    webrtc::scoped_refptr<webrtc::AudioDeviceModule> inner_; +    LLWebRTCAudioTransport                        audio_transport_; + +    bool tuning_; +}; + +class LLCustomProcessorState +{ + +public: +    float getMicrophoneEnergy() { return mMicrophoneEnergy.load(std::memory_order_relaxed); } +    void setMicrophoneEnergy(float energy) { mMicrophoneEnergy.store(energy, std::memory_order_relaxed); } + +    void setGain(float gain) +    { +        mGain.store(gain, std::memory_order_relaxed); +        mDirty.store(true, std::memory_order_relaxed); +    } + +    float getGain() { return mGain.load(std::memory_order_relaxed); } + +    bool getDirty() { return mDirty.exchange(false, std::memory_order_relaxed); } + + protected: +    std::atomic<bool>  mDirty{ true }; +    std::atomic<float> mMicrophoneEnergy{ 0.0f }; +    std::atomic<float> mGain{ 0.0f }; +}; + +using LLCustomProcessorStatePtr = std::shared_ptr<LLCustomProcessorState>; +  // Used to process/retrieve audio levels after  // all of the processing (AGC, AEC, etc.) for display in-world to the user.  class LLCustomProcessor : public webrtc::CustomProcessing  { -  public: -    LLCustomProcessor(); +public: +    LLCustomProcessor(LLCustomProcessorStatePtr state);      ~LLCustomProcessor() override {}      // (Re-) Initializes the submodule.      void Initialize(int sample_rate_hz, int num_channels) override;      // Analyzes the given capture or render signal. -    void Process(webrtc::AudioBuffer *audio) override; +    void Process(webrtc::AudioBuffer* audio) override;      // Returns a string representation of the module state.      std::string ToString() const override { return ""; } -    float getMicrophoneEnergy() { return mMicrophoneEnergy; } - -    void setGain(float gain) { mGain = gain; } - -  protected: -    static const int NUM_PACKETS_TO_FILTER = 30;  // 300 ms of smoothing -    int              mSampleRateHz; -    int              mNumChannels; +protected: +    static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing +    int              mSampleRateHz{ 48000 }; +    int              mNumChannels{ 2 }; +    int              mRampFrames{ 2 }; +    float            mCurrentGain{ 0.0f }; +    float            mGainStep{ 0.0f };      float mSumVector[NUM_PACKETS_TO_FILTER]; -    float mMicrophoneEnergy; -    float mGain; +    friend LLCustomProcessorState; +    LLCustomProcessorStatePtr mState;  }; @@ -187,7 +416,7 @@ class LLCustomProcessor : public webrtc::CustomProcessing  #if CM_WEBRTC  class LLWebRTCImpl : public LLWebRTCDeviceInterface  #else -class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceSink +class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceObserver  #endif  {    public: @@ -218,10 +447,15 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS      float getTuningAudioLevel() override;      float getPeerConnectionAudioLevel() override; -    void setPeerConnectionGain(float gain) override; +    void setMicGain(float gain) override; +    void setTuningMicGain(float gain) override; + +    void setMute(bool mute, int delay_ms = 20) override; + +    void intSetMute(bool mute, int delay_ms = 20);      // -    // AudioDeviceSink +    // AudioDeviceObserver      //  #if CM_WEBRTC      void OnDevicesUpdated(); @@ -254,19 +488,19 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS          mNetworkThread->PostTask(std::move(task), location);      } -    void WorkerBlockingCall(rtc::FunctionView<void()> functor, +    void WorkerBlockingCall(webrtc::FunctionView<void()> functor,                    const webrtc::Location& location = webrtc::Location::Current())      {          mWorkerThread->BlockingCall(std::move(functor), location);      } -    void SignalingBlockingCall(rtc::FunctionView<void()> functor, +    void SignalingBlockingCall(webrtc::FunctionView<void()> functor,                    const webrtc::Location& location = webrtc::Location::Current())      {          mSignalingThread->BlockingCall(std::move(functor), location);      } -    void NetworkBlockingCall(rtc::FunctionView<void()> functor, +    void NetworkBlockingCall(webrtc::FunctionView<void()> functor,                    const webrtc::Location& location = webrtc::Location::Current())      {          mNetworkThread->BlockingCall(std::move(functor), location); @@ -274,7 +508,7 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS      // Allows the LLWebRTCPeerConnectionImpl class to retrieve the      // native webrtc PeerConnectionFactory. -    rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> getPeerConnectionFactory() +    webrtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> getPeerConnectionFactory()      {          return mPeerConnectionFactory;      } @@ -283,49 +517,47 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS      LLWebRTCPeerConnectionInterface* newPeerConnection();      void freePeerConnection(LLWebRTCPeerConnectionInterface* peer_connection); -    // enables/disables capture via the capture device -    void setRecording(bool recording); - -    void setPlayout(bool playing); -    protected: + +    void workerDeployDevices();      LLWebRTCLogSink*                                           mLogSink;      // The native webrtc threads -    std::unique_ptr<rtc::Thread>                               mNetworkThread; -    std::unique_ptr<rtc::Thread>                               mWorkerThread; -    std::unique_ptr<rtc::Thread>                               mSignalingThread; +    std::unique_ptr<webrtc::Thread>                            mNetworkThread; +    std::unique_ptr<webrtc::Thread>                            mWorkerThread; +    std::unique_ptr<webrtc::Thread>                            mSignalingThread;      // The factory that allows creation of native webrtc PeerConnections. -    rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> mPeerConnectionFactory; +    webrtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> mPeerConnectionFactory; -    rtc::scoped_refptr<webrtc::AudioProcessing>                mAudioProcessingModule; +    webrtc::scoped_refptr<webrtc::AudioProcessing>                mAudioProcessingModule;      // more native webrtc stuff -    std::unique_ptr<webrtc::TaskQueueFactory>                  mTaskQueueFactory; +    std::unique_ptr<webrtc::TaskQueueFactory>                     mTaskQueueFactory;      // Devices      void updateDevices(); -    rtc::scoped_refptr<webrtc::AudioDeviceModule>              mTuningDeviceModule; -    rtc::scoped_refptr<webrtc::AudioDeviceModule>              mPeerDeviceModule; +    void deployDevices(); +    std::atomic<int>                                           mDevicesDeploying; +    webrtc::scoped_refptr<LLWebRTCAudioDeviceModule>           mDeviceModule;      std::vector<LLWebRTCDevicesObserver *>                     mVoiceDevicesObserverList;      // accessors in native webrtc for devices aren't apparently implemented yet.      bool                                                       mTuningMode; -    int32_t                                                    mRecordingDevice; +    std::string                                                mRecordingDevice;      LLWebRTCVoiceDeviceList                                    mRecordingDeviceList; -    int32_t                                                    mPlayoutDevice; +    std::string                                                mPlayoutDevice;      LLWebRTCVoiceDeviceList                                    mPlayoutDeviceList;      bool                                                       mMute; +    float                                                      mGain; -    LLAudioDeviceObserver *                                    mTuningAudioDeviceObserver; -    LLCustomProcessor *                                        mPeerCustomProcessor; +    LLCustomProcessorStatePtr                                  mPeerCustomProcessor;      // peer connections -    std::vector<rtc::scoped_refptr<LLWebRTCPeerConnectionImpl>>     mPeerConnections; +    std::vector<webrtc::scoped_refptr<LLWebRTCPeerConnectionImpl>> mPeerConnections;  }; @@ -350,7 +582,7 @@ class LLWebRTCPeerConnectionImpl : public LLWebRTCPeerConnectionInterface,      void terminate();      virtual void AddRef() const override = 0; -    virtual rtc::RefCountReleaseStatus Release() const override = 0; +    virtual webrtc::RefCountReleaseStatus Release() const override = 0;      //      // LLWebRTCPeerConnection @@ -381,10 +613,10 @@ class LLWebRTCPeerConnectionImpl : public LLWebRTCPeerConnectionInterface,      //      void OnSignalingChange(webrtc::PeerConnectionInterface::SignalingState new_state) override {} -    void OnAddTrack(rtc::scoped_refptr<webrtc::RtpReceiverInterface> receiver, -                    const std::vector<rtc::scoped_refptr<webrtc::MediaStreamInterface>> &streams) override; -    void OnRemoveTrack(rtc::scoped_refptr<webrtc::RtpReceiverInterface> receiver) override; -    void OnDataChannel(rtc::scoped_refptr<webrtc::DataChannelInterface> channel) override; +    void OnAddTrack(webrtc::scoped_refptr<webrtc::RtpReceiverInterface> receiver, +                    const std::vector<webrtc::scoped_refptr<webrtc::MediaStreamInterface>> &streams) override; +    void OnRemoveTrack(webrtc::scoped_refptr<webrtc::RtpReceiverInterface> receiver) override; +    void OnDataChannel(webrtc::scoped_refptr<webrtc::DataChannelInterface> channel) override;      void OnRenegotiationNeeded() override {}      void OnIceConnectionChange(webrtc::PeerConnectionInterface::IceConnectionState new_state) override {};      void OnIceGatheringChange(webrtc::PeerConnectionInterface::IceGatheringState new_state) override; @@ -423,7 +655,7 @@ class LLWebRTCPeerConnectionImpl : public LLWebRTCPeerConnectionInterface,      LLWebRTCImpl * mWebRTCImpl; -    rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> mPeerConnectionFactory; +    webrtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> mPeerConnectionFactory;      typedef enum {          MUTE_INITIAL, @@ -437,12 +669,12 @@ class LLWebRTCPeerConnectionImpl : public LLWebRTCPeerConnectionInterface,      std::vector<std::unique_ptr<webrtc::IceCandidateInterface>>  mCachedIceCandidates;      bool mAnswerReceived; -    rtc::scoped_refptr<webrtc::PeerConnectionInterface> mPeerConnection; -    rtc::scoped_refptr<webrtc::MediaStreamInterface> mLocalStream; +    webrtc::scoped_refptr<webrtc::PeerConnectionInterface> mPeerConnection; +    webrtc::scoped_refptr<webrtc::MediaStreamInterface> mLocalStream;      // data      std::vector<LLWebRTCDataObserver *> mDataObserverList; -    rtc::scoped_refptr<webrtc::DataChannelInterface> mDataChannel; +    webrtc::scoped_refptr<webrtc::DataChannelInterface> mDataChannel;  };  }  | 
