summaryrefslogtreecommitdiff
path: root/indra/llwebrtc/llwebrtc_impl.h
diff options
context:
space:
mode:
Diffstat (limited to 'indra/llwebrtc/llwebrtc_impl.h')
-rw-r--r--indra/llwebrtc/llwebrtc_impl.h414
1 files changed, 323 insertions, 91 deletions
diff --git a/indra/llwebrtc/llwebrtc_impl.h b/indra/llwebrtc/llwebrtc_impl.h
index dfdb19e9be..7b23b11208 100644
--- a/indra/llwebrtc/llwebrtc_impl.h
+++ b/indra/llwebrtc/llwebrtc_impl.h
@@ -54,12 +54,12 @@
#include "rtc_base/ref_counted_object.h"
#include "rtc_base/ssl_adapter.h"
#include "rtc_base/thread.h"
+#include "rtc_base/logging.h"
#include "api/peer_connection_interface.h"
#include "api/media_stream_interface.h"
#include "api/create_peerconnection_factory.h"
#include "modules/audio_device/include/audio_device.h"
#include "modules/audio_device/include/audio_device_data_observer.h"
-#include "rtc_base/task_queue.h"
#include "api/task_queue/task_queue_factory.h"
#include "api/task_queue/default_task_queue_factory.h"
#include "modules/audio_device/include/audio_device_defines.h"
@@ -69,35 +69,30 @@ namespace llwebrtc
class LLWebRTCPeerConnectionImpl;
-class LLWebRTCLogSink : public rtc::LogSink {
+class LLWebRTCLogSink : public webrtc::LogSink
+{
public:
- LLWebRTCLogSink(LLWebRTCLogCallback* callback) :
- mCallback(callback)
- {
- }
+ LLWebRTCLogSink(LLWebRTCLogCallback* callback) : mCallback(callback) {}
// Destructor: close the log file
- ~LLWebRTCLogSink() override
- {
- }
+ ~LLWebRTCLogSink() override {}
- void OnLogMessage(const std::string& msg,
- rtc::LoggingSeverity severity) override
+ void OnLogMessage(const std::string& msg, webrtc::LoggingSeverity severity) override
{
if (mCallback)
{
- switch(severity)
+ switch (severity)
{
- case rtc::LS_VERBOSE:
+ case webrtc::LS_VERBOSE:
mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg);
break;
- case rtc::LS_INFO:
+ case webrtc::LS_INFO:
mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg);
break;
- case rtc::LS_WARNING:
+ case webrtc::LS_WARNING:
mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg);
break;
- case rtc::LS_ERROR:
+ case webrtc::LS_ERROR:
mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg);
break;
default:
@@ -118,67 +113,301 @@ private:
LLWebRTCLogCallback* mCallback;
};
-// Implements a class allowing capture of audio data
-// to determine audio level of the microphone.
-class LLAudioDeviceObserver : public webrtc::AudioDeviceDataObserver
+// -----------------------------------------------------------------------------
+// A proxy transport that forwards capture data to two AudioTransport sinks:
+// - the "engine" (libwebrtc's VoiceEngine)
+// - the "user" (your app's listener)
+//
+// Playout (NeedMorePlayData) goes only to the engine by default to avoid
+// double-writing into the output buffer. See notes below if you want a tap.
+// -----------------------------------------------------------------------------
+class LLWebRTCAudioTransport : public webrtc::AudioTransport
{
- public:
- LLAudioDeviceObserver();
-
- // Retrieve the RMS audio loudness
- float getMicrophoneEnergy();
-
- // Data retrieved from the caputure device is
- // passed in here for processing.
- void OnCaptureData(const void *audio_samples,
- const size_t num_samples,
- const size_t bytes_per_sample,
- const size_t num_channels,
- const uint32_t samples_per_sec) override;
-
- // This is for data destined for the render device.
- // not currently used.
- void OnRenderData(const void *audio_samples,
- const size_t num_samples,
- const size_t bytes_per_sample,
- const size_t num_channels,
- const uint32_t samples_per_sec) override;
+public:
+ LLWebRTCAudioTransport();
+
+ void SetEngineTransport(webrtc::AudioTransport* t);
+
+ // -------- Capture path: fan out to both sinks --------
+ int32_t RecordedDataIsAvailable(const void* audio_data,
+ size_t number_of_samples,
+ size_t bytes_per_sample,
+ size_t number_of_channels,
+ uint32_t samples_per_sec,
+ uint32_t total_delay_ms,
+ int32_t clock_drift,
+ uint32_t current_mic_level,
+ bool key_pressed,
+ uint32_t& new_mic_level) override;
+
+ // -------- Playout path: delegate to engine only --------
+ int32_t NeedMorePlayData(size_t number_of_samples,
+ size_t bytes_per_sample,
+ size_t number_of_channels,
+ uint32_t samples_per_sec,
+ void* audio_data,
+ size_t& number_of_samples_out,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) override;
+
+ // Method to pull mixed render audio data from all active VoE channels.
+ // The data will not be passed as reference for audio processing internally.
+ void PullRenderData(int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames,
+ void* audio_data,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) override;
+
+ float GetMicrophoneEnergy() { return mMicrophoneEnergy.load(std::memory_order_relaxed); }
+ void SetGain(float gain) { mGain.store(gain, std::memory_order_relaxed); }
+
+private:
+ std::atomic<webrtc::AudioTransport*> engine_{ nullptr };
+ static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing (30 frames)
+ float mSumVector[NUM_PACKETS_TO_FILTER];
+ std::atomic<float> mMicrophoneEnergy;
+ std::atomic<float> mGain{ 0.0f };
- protected:
- static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing (30 frames)
- float mSumVector[NUM_PACKETS_TO_FILTER];
- float mMicrophoneEnergy;
};
+
+// -----------------------------------------------------------------------------
+// LLWebRTCAudioDeviceModule
+// - Wraps a real ADM to provide microphone energy for tuning
+// -----------------------------------------------------------------------------
+class LLWebRTCAudioDeviceModule : public webrtc::AudioDeviceModule
+{
+public:
+ explicit LLWebRTCAudioDeviceModule(webrtc::scoped_refptr<webrtc::AudioDeviceModule> inner) : inner_(std::move(inner)), tuning_(false)
+ {
+ RTC_CHECK(inner_);
+ }
+
+ // ----- AudioDeviceModule interface: we mostly forward to |inner_| -----
+ int32_t ActiveAudioLayer(AudioLayer* audioLayer) const override { return inner_->ActiveAudioLayer(audioLayer); }
+
+ int32_t RegisterAudioCallback(webrtc::AudioTransport* engine_transport) override
+ {
+ // The engine registers its transport here. We put our audio transport between engine and ADM.
+ audio_transport_.SetEngineTransport(engine_transport);
+ // Register our proxy with the real ADM.
+ return inner_->RegisterAudioCallback(&audio_transport_);
+ }
+
+ int32_t Init() override { return inner_->Init(); }
+ int32_t Terminate() override { return inner_->Terminate(); }
+ bool Initialized() const override { return inner_->Initialized(); }
+
+ // --- Device enumeration/selection (forward) ---
+ int16_t PlayoutDevices() override { return inner_->PlayoutDevices(); }
+ int16_t RecordingDevices() override { return inner_->RecordingDevices(); }
+ int32_t PlayoutDeviceName(uint16_t index, char name[webrtc::kAdmMaxDeviceNameSize], char guid[webrtc::kAdmMaxGuidSize]) override
+ {
+ return inner_->PlayoutDeviceName(index, name, guid);
+ }
+ int32_t RecordingDeviceName(uint16_t index, char name[webrtc::kAdmMaxDeviceNameSize], char guid[webrtc::kAdmMaxGuidSize]) override
+ {
+ return inner_->RecordingDeviceName(index, name, guid);
+ }
+ int32_t SetPlayoutDevice(uint16_t index) override { return inner_->SetPlayoutDevice(index); }
+ int32_t SetRecordingDevice(uint16_t index) override { return inner_->SetRecordingDevice(index); }
+
+ // Windows default/communications selectors, if your branch exposes them:
+ int32_t SetPlayoutDevice(WindowsDeviceType type) override { return inner_->SetPlayoutDevice(type); }
+ int32_t SetRecordingDevice(WindowsDeviceType type) override { return inner_->SetRecordingDevice(type); }
+
+ // --- Init/start/stop (forward) ---
+ int32_t InitPlayout() override { return inner_->InitPlayout(); }
+ bool PlayoutIsInitialized() const override { return inner_->PlayoutIsInitialized(); }
+ int32_t StartPlayout() override {
+ if (tuning_) return 0; // For tuning, don't allow playout
+ return inner_->StartPlayout();
+ }
+ int32_t StopPlayout() override { return inner_->StopPlayout(); }
+ bool Playing() const override { return inner_->Playing(); }
+
+ int32_t InitRecording() override { return inner_->InitRecording(); }
+ bool RecordingIsInitialized() const override { return inner_->RecordingIsInitialized(); }
+ int32_t StartRecording() override {
+ // ignore start recording as webrtc.lib will
+ // send one when streams first connect, resulting
+ // in an inadvertant 'recording' when mute is on.
+ // We take full control of StartRecording via
+ // ForceStartRecording below.
+ return 0;
+ }
+ int32_t StopRecording() override {
+ if (tuning_) return 0; // if we're tuning, disregard the StopRecording we get from disabling the streams
+ return inner_->StopRecording();
+ }
+ int32_t ForceStartRecording() { return inner_->StartRecording(); }
+ int32_t ForceStopRecording() { return inner_->StopRecording(); }
+ bool Recording() const override { return inner_->Recording(); }
+
+ // --- Stereo opts (forward if available on your branch) ---
+ int32_t SetStereoPlayout(bool enable) override { return inner_->SetStereoPlayout(enable); }
+ int32_t SetStereoRecording(bool enable) override { return inner_->SetStereoRecording(enable); }
+ int32_t PlayoutIsAvailable(bool* available) override { return inner_->PlayoutIsAvailable(available); }
+ int32_t RecordingIsAvailable(bool* available) override { return inner_->RecordingIsAvailable(available); }
+
+ // --- AGC/Volume/Mute/etc. (forward) ---
+ int32_t SetMicrophoneVolume(uint32_t volume) override { return inner_->SetMicrophoneVolume(volume); }
+ int32_t MicrophoneVolume(uint32_t* volume) const override { return inner_->MicrophoneVolume(volume); }
+
+ // --- Speaker/Microphone init (forward) ---
+ int32_t InitSpeaker() override { return inner_->InitSpeaker(); }
+ bool SpeakerIsInitialized() const override { return inner_->SpeakerIsInitialized(); }
+ int32_t InitMicrophone() override { return inner_->InitMicrophone(); }
+ bool MicrophoneIsInitialized() const override { return inner_->MicrophoneIsInitialized(); }
+
+ // --- Speaker Volume (forward) ---
+ int32_t SpeakerVolumeIsAvailable(bool* available) override { return inner_->SpeakerVolumeIsAvailable(available); }
+ int32_t SetSpeakerVolume(uint32_t volume) override { return inner_->SetSpeakerVolume(volume); }
+ int32_t SpeakerVolume(uint32_t* volume) const override { return inner_->SpeakerVolume(volume); }
+ int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override { return inner_->MaxSpeakerVolume(maxVolume); }
+ int32_t MinSpeakerVolume(uint32_t* minVolume) const override { return inner_->MinSpeakerVolume(minVolume); }
+
+ // --- Microphone Volume (forward) ---
+ int32_t MicrophoneVolumeIsAvailable(bool* available) override { return inner_->MicrophoneVolumeIsAvailable(available); }
+ int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override { return inner_->MaxMicrophoneVolume(maxVolume); }
+ int32_t MinMicrophoneVolume(uint32_t* minVolume) const override { return inner_->MinMicrophoneVolume(minVolume); }
+
+ // --- Speaker Mute (forward) ---
+ int32_t SpeakerMuteIsAvailable(bool* available) override { return inner_->SpeakerMuteIsAvailable(available); }
+ int32_t SetSpeakerMute(bool enable) override { return inner_->SetSpeakerMute(enable); }
+ int32_t SpeakerMute(bool* enabled) const override { return inner_->SpeakerMute(enabled); }
+
+ // --- Microphone Mute (forward) ---
+ int32_t MicrophoneMuteIsAvailable(bool* available) override { return inner_->MicrophoneMuteIsAvailable(available); }
+ int32_t SetMicrophoneMute(bool enable) override { return inner_->SetMicrophoneMute(enable); }
+ int32_t MicrophoneMute(bool* enabled) const override { return inner_->MicrophoneMute(enabled); }
+
+ // --- Stereo Support (forward) ---
+ int32_t StereoPlayoutIsAvailable(bool* available) const override { return inner_->StereoPlayoutIsAvailable(available); }
+ int32_t StereoPlayout(bool* enabled) const override { return inner_->StereoPlayout(enabled); }
+ int32_t StereoRecordingIsAvailable(bool* available) const override { return inner_->StereoRecordingIsAvailable(available); }
+ int32_t StereoRecording(bool* enabled) const override { return inner_->StereoRecording(enabled); }
+
+ // --- Delay/Timing (forward) ---
+ int32_t PlayoutDelay(uint16_t* delayMS) const override { return inner_->PlayoutDelay(delayMS); }
+
+ // --- Built-in Audio Processing (forward) ---
+ bool BuiltInAECIsAvailable() const override { return inner_->BuiltInAECIsAvailable(); }
+ bool BuiltInAGCIsAvailable() const override { return inner_->BuiltInAGCIsAvailable(); }
+ bool BuiltInNSIsAvailable() const override { return inner_->BuiltInNSIsAvailable(); }
+ int32_t EnableBuiltInAEC(bool enable) override { return inner_->EnableBuiltInAEC(enable); }
+ int32_t EnableBuiltInAGC(bool enable) override { return inner_->EnableBuiltInAGC(enable); }
+ int32_t EnableBuiltInNS(bool enable) override { return inner_->EnableBuiltInNS(enable); }
+
+ // --- Additional AudioDeviceModule methods (forward) ---
+ int32_t GetPlayoutUnderrunCount() const override { return inner_->GetPlayoutUnderrunCount(); }
+
+ // Used to generate RTC stats. If not implemented, RTCAudioPlayoutStats will
+ // not be present in the stats.
+ std::optional<Stats> GetStats() const override { return inner_->GetStats(); }
+
+// Only supported on iOS.
+#if defined(WEBRTC_IOS)
+ virtual int GetPlayoutAudioParameters(AudioParameters* params) const override { return inner_->GetPlayoutAudioParameters(params); }
+ virtual int GetRecordAudioParameters(AudioParameters* params) override { return inner_->GetRecordAudioParameters(params); }
+#endif // WEBRTC_IOS
+
+ virtual int32_t GetPlayoutDevice() const override { return inner_->GetPlayoutDevice(); }
+ virtual int32_t GetRecordingDevice() const override { return inner_->GetRecordingDevice(); }
+ virtual int32_t SetObserver(webrtc::AudioDeviceObserver* observer) override { return inner_->SetObserver(observer); }
+
+ // tuning microphone energy calculations
+ float GetMicrophoneEnergy() { return audio_transport_.GetMicrophoneEnergy(); }
+ void SetTuningMicGain(float gain) { audio_transport_.SetGain(gain); }
+ void SetTuning(bool tuning, bool mute)
+ {
+ tuning_ = tuning;
+ if (tuning)
+ {
+ inner_->InitRecording();
+ inner_->StartRecording();
+ inner_->StopPlayout();
+ }
+ else
+ {
+ if (mute)
+ {
+ inner_->StopRecording();
+ }
+ else
+ {
+ inner_->InitRecording();
+ inner_->StartRecording();
+ }
+ inner_->StartPlayout();
+ }
+ }
+
+protected:
+ ~LLWebRTCAudioDeviceModule() override = default;
+
+private:
+ webrtc::scoped_refptr<webrtc::AudioDeviceModule> inner_;
+ LLWebRTCAudioTransport audio_transport_;
+
+ bool tuning_;
+};
+
+class LLCustomProcessorState
+{
+
+public:
+ float getMicrophoneEnergy() { return mMicrophoneEnergy.load(std::memory_order_relaxed); }
+ void setMicrophoneEnergy(float energy) { mMicrophoneEnergy.store(energy, std::memory_order_relaxed); }
+
+ void setGain(float gain)
+ {
+ mGain.store(gain, std::memory_order_relaxed);
+ mDirty.store(true, std::memory_order_relaxed);
+ }
+
+ float getGain() { return mGain.load(std::memory_order_relaxed); }
+
+ bool getDirty() { return mDirty.exchange(false, std::memory_order_relaxed); }
+
+ protected:
+ std::atomic<bool> mDirty{ true };
+ std::atomic<float> mMicrophoneEnergy{ 0.0f };
+ std::atomic<float> mGain{ 0.0f };
+};
+
+using LLCustomProcessorStatePtr = std::shared_ptr<LLCustomProcessorState>;
+
// Used to process/retrieve audio levels after
// all of the processing (AGC, AEC, etc.) for display in-world to the user.
class LLCustomProcessor : public webrtc::CustomProcessing
{
- public:
- LLCustomProcessor();
+public:
+ LLCustomProcessor(LLCustomProcessorStatePtr state);
~LLCustomProcessor() override {}
// (Re-) Initializes the submodule.
void Initialize(int sample_rate_hz, int num_channels) override;
// Analyzes the given capture or render signal.
- void Process(webrtc::AudioBuffer *audio) override;
+ void Process(webrtc::AudioBuffer* audio) override;
// Returns a string representation of the module state.
std::string ToString() const override { return ""; }
- float getMicrophoneEnergy() { return mMicrophoneEnergy; }
-
- void setGain(float gain) { mGain = gain; }
-
- protected:
- static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing
- int mSampleRateHz;
- int mNumChannels;
+protected:
+ static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing
+ int mSampleRateHz{ 48000 };
+ int mNumChannels{ 2 };
+ int mRampFrames{ 2 };
+ float mCurrentGain{ 0.0f };
+ float mGainStep{ 0.0f };
float mSumVector[NUM_PACKETS_TO_FILTER];
- float mMicrophoneEnergy;
- float mGain;
+ friend LLCustomProcessorState;
+ LLCustomProcessorStatePtr mState;
};
@@ -187,7 +416,7 @@ class LLCustomProcessor : public webrtc::CustomProcessing
#if CM_WEBRTC
class LLWebRTCImpl : public LLWebRTCDeviceInterface
#else
-class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceSink
+class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceObserver
#endif
{
public:
@@ -218,10 +447,15 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS
float getTuningAudioLevel() override;
float getPeerConnectionAudioLevel() override;
- void setPeerConnectionGain(float gain) override;
+ void setMicGain(float gain) override;
+ void setTuningMicGain(float gain) override;
+
+ void setMute(bool mute, int delay_ms = 20) override;
+
+ void intSetMute(bool mute, int delay_ms = 20);
//
- // AudioDeviceSink
+ // AudioDeviceObserver
//
#if CM_WEBRTC
void OnDevicesUpdated();
@@ -254,19 +488,19 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS
mNetworkThread->PostTask(std::move(task), location);
}
- void WorkerBlockingCall(rtc::FunctionView<void()> functor,
+ void WorkerBlockingCall(webrtc::FunctionView<void()> functor,
const webrtc::Location& location = webrtc::Location::Current())
{
mWorkerThread->BlockingCall(std::move(functor), location);
}
- void SignalingBlockingCall(rtc::FunctionView<void()> functor,
+ void SignalingBlockingCall(webrtc::FunctionView<void()> functor,
const webrtc::Location& location = webrtc::Location::Current())
{
mSignalingThread->BlockingCall(std::move(functor), location);
}
- void NetworkBlockingCall(rtc::FunctionView<void()> functor,
+ void NetworkBlockingCall(webrtc::FunctionView<void()> functor,
const webrtc::Location& location = webrtc::Location::Current())
{
mNetworkThread->BlockingCall(std::move(functor), location);
@@ -274,7 +508,7 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS
// Allows the LLWebRTCPeerConnectionImpl class to retrieve the
// native webrtc PeerConnectionFactory.
- rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> getPeerConnectionFactory()
+ webrtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> getPeerConnectionFactory()
{
return mPeerConnectionFactory;
}
@@ -283,49 +517,47 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS
LLWebRTCPeerConnectionInterface* newPeerConnection();
void freePeerConnection(LLWebRTCPeerConnectionInterface* peer_connection);
- // enables/disables capture via the capture device
- void setRecording(bool recording);
-
- void setPlayout(bool playing);
-
protected:
+
+ void workerDeployDevices();
LLWebRTCLogSink* mLogSink;
// The native webrtc threads
- std::unique_ptr<rtc::Thread> mNetworkThread;
- std::unique_ptr<rtc::Thread> mWorkerThread;
- std::unique_ptr<rtc::Thread> mSignalingThread;
+ std::unique_ptr<webrtc::Thread> mNetworkThread;
+ std::unique_ptr<webrtc::Thread> mWorkerThread;
+ std::unique_ptr<webrtc::Thread> mSignalingThread;
// The factory that allows creation of native webrtc PeerConnections.
- rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> mPeerConnectionFactory;
+ webrtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> mPeerConnectionFactory;
- rtc::scoped_refptr<webrtc::AudioProcessing> mAudioProcessingModule;
+ webrtc::scoped_refptr<webrtc::AudioProcessing> mAudioProcessingModule;
// more native webrtc stuff
- std::unique_ptr<webrtc::TaskQueueFactory> mTaskQueueFactory;
+ std::unique_ptr<webrtc::TaskQueueFactory> mTaskQueueFactory;
// Devices
void updateDevices();
- rtc::scoped_refptr<webrtc::AudioDeviceModule> mTuningDeviceModule;
- rtc::scoped_refptr<webrtc::AudioDeviceModule> mPeerDeviceModule;
+ void deployDevices();
+ std::atomic<int> mDevicesDeploying;
+ webrtc::scoped_refptr<LLWebRTCAudioDeviceModule> mDeviceModule;
std::vector<LLWebRTCDevicesObserver *> mVoiceDevicesObserverList;
// accessors in native webrtc for devices aren't apparently implemented yet.
bool mTuningMode;
- int32_t mRecordingDevice;
+ std::string mRecordingDevice;
LLWebRTCVoiceDeviceList mRecordingDeviceList;
- int32_t mPlayoutDevice;
+ std::string mPlayoutDevice;
LLWebRTCVoiceDeviceList mPlayoutDeviceList;
bool mMute;
+ float mGain;
- LLAudioDeviceObserver * mTuningAudioDeviceObserver;
- LLCustomProcessor * mPeerCustomProcessor;
+ LLCustomProcessorStatePtr mPeerCustomProcessor;
// peer connections
- std::vector<rtc::scoped_refptr<LLWebRTCPeerConnectionImpl>> mPeerConnections;
+ std::vector<webrtc::scoped_refptr<LLWebRTCPeerConnectionImpl>> mPeerConnections;
};
@@ -350,7 +582,7 @@ class LLWebRTCPeerConnectionImpl : public LLWebRTCPeerConnectionInterface,
void terminate();
virtual void AddRef() const override = 0;
- virtual rtc::RefCountReleaseStatus Release() const override = 0;
+ virtual webrtc::RefCountReleaseStatus Release() const override = 0;
//
// LLWebRTCPeerConnection
@@ -381,10 +613,10 @@ class LLWebRTCPeerConnectionImpl : public LLWebRTCPeerConnectionInterface,
//
void OnSignalingChange(webrtc::PeerConnectionInterface::SignalingState new_state) override {}
- void OnAddTrack(rtc::scoped_refptr<webrtc::RtpReceiverInterface> receiver,
- const std::vector<rtc::scoped_refptr<webrtc::MediaStreamInterface>> &streams) override;
- void OnRemoveTrack(rtc::scoped_refptr<webrtc::RtpReceiverInterface> receiver) override;
- void OnDataChannel(rtc::scoped_refptr<webrtc::DataChannelInterface> channel) override;
+ void OnAddTrack(webrtc::scoped_refptr<webrtc::RtpReceiverInterface> receiver,
+ const std::vector<webrtc::scoped_refptr<webrtc::MediaStreamInterface>> &streams) override;
+ void OnRemoveTrack(webrtc::scoped_refptr<webrtc::RtpReceiverInterface> receiver) override;
+ void OnDataChannel(webrtc::scoped_refptr<webrtc::DataChannelInterface> channel) override;
void OnRenegotiationNeeded() override {}
void OnIceConnectionChange(webrtc::PeerConnectionInterface::IceConnectionState new_state) override {};
void OnIceGatheringChange(webrtc::PeerConnectionInterface::IceGatheringState new_state) override;
@@ -423,7 +655,7 @@ class LLWebRTCPeerConnectionImpl : public LLWebRTCPeerConnectionInterface,
LLWebRTCImpl * mWebRTCImpl;
- rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> mPeerConnectionFactory;
+ webrtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> mPeerConnectionFactory;
typedef enum {
MUTE_INITIAL,
@@ -437,12 +669,12 @@ class LLWebRTCPeerConnectionImpl : public LLWebRTCPeerConnectionInterface,
std::vector<std::unique_ptr<webrtc::IceCandidateInterface>> mCachedIceCandidates;
bool mAnswerReceived;
- rtc::scoped_refptr<webrtc::PeerConnectionInterface> mPeerConnection;
- rtc::scoped_refptr<webrtc::MediaStreamInterface> mLocalStream;
+ webrtc::scoped_refptr<webrtc::PeerConnectionInterface> mPeerConnection;
+ webrtc::scoped_refptr<webrtc::MediaStreamInterface> mLocalStream;
// data
std::vector<LLWebRTCDataObserver *> mDataObserverList;
- rtc::scoped_refptr<webrtc::DataChannelInterface> mDataChannel;
+ webrtc::scoped_refptr<webrtc::DataChannelInterface> mDataChannel;
};
}