summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRoxie Linden <roxie@lindenlab.com>2023-09-21 15:28:58 -0700
committerRoxie Linden <roxie@lindenlab.com>2024-02-22 23:11:34 -0800
commit1cd8f6f4f88f7717f0fcafbb5d47de0af59d5fb7 (patch)
tree671957c7ccbdf2dcca6913ca107952e1259f1b21
parent8859312b1f0d975793c6c2a3d7b23b9880c657c5 (diff)
Stream audio levels to and from viewers via DataChannels
-rw-r--r--indra/llwebrtc/llwebrtc.cpp204
-rw-r--r--indra/llwebrtc/llwebrtc.h13
-rw-r--r--indra/llwebrtc/llwebrtc_impl.h10
-rw-r--r--indra/newview/llvoicewebrtc.cpp444
-rw-r--r--indra/newview/llvoicewebrtc.h24
5 files changed, 316 insertions, 379 deletions
diff --git a/indra/llwebrtc/llwebrtc.cpp b/indra/llwebrtc/llwebrtc.cpp
index ac5870eab3..77b050cbd0 100644
--- a/indra/llwebrtc/llwebrtc.cpp
+++ b/indra/llwebrtc/llwebrtc.cpp
@@ -62,10 +62,39 @@ void LLWebRTCImpl::init()
mTaskQueueFactory.get(),
std::unique_ptr<webrtc::AudioDeviceDataObserver>(this));
mDeviceModule->Init();
+ mDeviceModule->SetStereoRecording(false);
+ mDeviceModule->EnableBuiltInAEC(false);
updateDevices();
});
}
+void LLWebRTCImpl::terminate()
+{
+ mSignalingThread->BlockingCall(
+ [this]()
+ {
+ if (mPeerConnection)
+ {
+ mPeerConnection->Close();
+ mPeerConnection = nullptr;
+ }
+ });
+ mWorkerThread->BlockingCall(
+ [this]()
+ {
+ if (mDeviceModule)
+ {
+ mDeviceModule = nullptr;
+ }
+ });
+
+ mNetworkThread->Stop();
+ mWorkerThread->Stop();
+ mSignalingThread->Stop();
+
+}
+
+
void LLWebRTCImpl::refreshDevices()
{
mWorkerThread->PostTask([this]() { updateDevices(); });
@@ -88,22 +117,33 @@ void LLWebRTCImpl::setCaptureDevice(const std::string &id)
mWorkerThread->PostTask(
[this, id]()
{
- mDeviceModule->StopRecording();
+ bool was_recording = mDeviceModule->Recording();
+
+ if (was_recording)
+ {
+ mDeviceModule->StopRecording();
+ }
int16_t captureDeviceCount = mDeviceModule->RecordingDevices();
- for (int16_t index = 0; index < captureDeviceCount; index++)
+ int16_t index = 0; /* default to first one if no match */
+ for (int16_t i = 0; i < captureDeviceCount; i++)
{
char name[webrtc::kAdmMaxDeviceNameSize];
char guid[webrtc::kAdmMaxGuidSize];
- mDeviceModule->RecordingDeviceName(index, name, guid);
+ mDeviceModule->RecordingDeviceName(i, name, guid);
if (id == guid || id == "Default")
{
- RTC_LOG(LS_INFO) << __FUNCTION__ << "Set recording device to " << name << " " << guid << " " << index;
- mDeviceModule->SetRecordingDevice(index);
+ RTC_LOG(LS_INFO) << __FUNCTION__ << "Set recording device to " << name << " " << guid << " " << i;
+ index = i;
break;
}
}
+ mDeviceModule->SetRecordingDevice(index);
+ mDeviceModule->InitMicrophone();
mDeviceModule->InitRecording();
- mDeviceModule->StartRecording();
+ if (was_recording)
+ {
+ mDeviceModule->StartRecording();
+ }
});
}
@@ -112,21 +152,32 @@ void LLWebRTCImpl::setRenderDevice(const std::string &id)
mWorkerThread->PostTask(
[this, id]()
{
- mDeviceModule->StopPlayout();
- int16_t renderDeviceCount = mDeviceModule->RecordingDevices();
- for (int16_t index = 0; index < renderDeviceCount; index++)
+ bool was_playing = mDeviceModule->Playing();
+ if (was_playing)
+ {
+ mDeviceModule->StopPlayout();
+ }
+ int16_t renderDeviceCount = mDeviceModule->PlayoutDevices();
+ int16_t index = 0; /* default to first one if no match */
+ for (int16_t i = 0; i < renderDeviceCount; i++)
{
char name[webrtc::kAdmMaxDeviceNameSize];
char guid[webrtc::kAdmMaxGuidSize];
- mDeviceModule->PlayoutDeviceName(index, name, guid);
+ mDeviceModule->PlayoutDeviceName(i, name, guid);
if (id == guid || id == "Default")
{
- mDeviceModule->SetPlayoutDevice(index);
+ RTC_LOG(LS_INFO) << __FUNCTION__ << "Set recording device to " << name << " " << guid << " " << i;
+ index = i;
break;
}
}
+ mDeviceModule->SetPlayoutDevice(index);
+ mDeviceModule->InitSpeaker();
mDeviceModule->InitPlayout();
- mDeviceModule->StartPlayout();
+ if (was_playing)
+ {
+ mDeviceModule->StartPlayout();
+ }
});
}
@@ -141,10 +192,6 @@ void LLWebRTCImpl::updateDevices()
mDeviceModule->PlayoutDeviceName(index, name, guid);
renderDeviceList.emplace_back(name, guid);
}
- for (auto &observer : mVoiceDevicesObserverList)
- {
- observer->OnRenderDevicesChanged(renderDeviceList);
- }
int16_t captureDeviceCount = mDeviceModule->RecordingDevices();
LLWebRTCVoiceDeviceList captureDeviceList;
@@ -157,7 +204,7 @@ void LLWebRTCImpl::updateDevices()
}
for (auto &observer : mVoiceDevicesObserverList)
{
- observer->OnCaptureDevicesChanged(captureDeviceList);
+ observer->OnDevicesChanged(renderDeviceList, captureDeviceList);
}
}
@@ -188,11 +235,6 @@ void LLWebRTCImpl::OnCaptureData(const void *audio_samples,
const size_t num_channels,
const uint32_t samples_per_sec)
{
- if (bytes_per_sample != 2)
- {
- return;
- }
-
double energy = 0;
const short *samples = (const short *) audio_samples;
for (size_t index = 0; index < num_samples * num_channels; index++)
@@ -242,6 +284,21 @@ bool LLWebRTCImpl::initializeConnection()
bool LLWebRTCImpl::initializeConnectionThreaded()
{
+ rtc::scoped_refptr<webrtc::AudioProcessing> apm = webrtc::AudioProcessingBuilder().Create();
+ webrtc::AudioProcessing::Config apm_config;
+ apm_config.echo_canceller.enabled = false;
+ apm_config.echo_canceller.mobile_mode = false;
+ apm_config.gain_controller1.enabled = true;
+ apm_config.gain_controller1.mode =
+ webrtc::AudioProcessing::Config::GainController1::kAdaptiveAnalog;
+ apm_config.gain_controller2.enabled = true;
+ apm_config.high_pass_filter.enabled = true;
+ apm_config.noise_suppression.enabled = true;
+ apm_config.noise_suppression.level = webrtc::AudioProcessing::Config::NoiseSuppression::kVeryHigh;
+ apm_config.transient_suppression.enabled = true;
+ //
+ apm->ApplyConfig(apm_config);
+
mPeerConnectionFactory = webrtc::CreatePeerConnectionFactory(mNetworkThread.get(),
mWorkerThread.get(),
mSignalingThread.get(),
@@ -251,7 +308,7 @@ bool LLWebRTCImpl::initializeConnectionThreaded()
nullptr /* video_encoder_factory */,
nullptr /* video_decoder_factory */,
nullptr /* audio_mixer */,
- nullptr /* audio_processing */);
+ apm);
webrtc::PeerConnectionInterface::RTCConfiguration config;
config.sdp_semantics = webrtc::SdpSemantics::kUnifiedPlan;
webrtc::PeerConnectionInterface::IceServer server;
@@ -278,6 +335,17 @@ bool LLWebRTCImpl::initializeConnectionThreaded()
return false;
}
+ webrtc::DataChannelInit init;
+ init.ordered = true;
+
+ auto data_channel_or_error = mPeerConnection->CreateDataChannelOrError("SLData", &init);
+ if (data_channel_or_error.ok())
+ {
+ mDataChannel = std::move(data_channel_or_error.value());
+
+ mDataChannel->RegisterObserver(this);
+ }
+
RTC_LOG(LS_INFO) << __FUNCTION__ << " " << mPeerConnection->signaling_state();
cricket::AudioOptions audioOptions;
@@ -305,7 +373,6 @@ bool LLWebRTCImpl::initializeConnectionThreaded()
codecparam.num_channels = 1;
codecparam.parameters["stereo"] = "0";
codecparam.parameters["sprop-stereo"] = "0";
-
params.codecs.push_back(codecparam);
sender->SetParameters(params);
}
@@ -313,21 +380,6 @@ bool LLWebRTCImpl::initializeConnectionThreaded()
mPeerConnection->SetLocalDescription(rtc::scoped_refptr<webrtc::SetLocalDescriptionObserverInterface>(this));
RTC_LOG(LS_INFO) << __FUNCTION__ << " " << mPeerConnection->signaling_state();
-
- webrtc::DataChannelInit init;
- init.ordered = true;
- init.reliable = true;
- auto data_channel_or_error = mPeerConnection->CreateDataChannelOrError("SLData", &init);
- if (data_channel_or_error.ok())
- {
- mDataChannel = std::move(data_channel_or_error.value());
- }
- else
- {
- shutdownConnection();
- return false;
- }
- mDataChannel->RegisterObserver(this);
return true;
}
@@ -414,6 +466,18 @@ void LLWebRTCImpl::setSpeakerVolume(float volume)
});
}
+void LLWebRTCImpl::requestAudioLevel()
+{
+ mWorkerThread->PostTask(
+ [this]()
+ {
+ for (auto &observer : mAudioObserverList)
+ {
+ observer->OnAudioLevel((float)mTuningEnergy);
+ }
+ });
+}
+
//
// PeerConnectionObserver implementation.
//
@@ -429,6 +493,13 @@ void LLWebRTCImpl::OnRemoveTrack(rtc::scoped_refptr<webrtc::RtpReceiverInterface
RTC_LOG(LS_INFO) << __FUNCTION__ << " " << receiver->id();
}
+void LLWebRTCImpl::OnDataChannel(rtc::scoped_refptr<webrtc::DataChannelInterface> channel)
+{
+ mDataChannel = channel;
+ channel->RegisterObserver(this);
+}
+
+
void LLWebRTCImpl::OnIceGatheringChange(webrtc::PeerConnectionInterface::IceGatheringState new_state)
{
LLWebRTCSignalingObserver::IceGatheringState webrtc_new_state = LLWebRTCSignalingObserver::IceGatheringState::ICE_GATHERING_NEW;
@@ -469,10 +540,14 @@ void LLWebRTCImpl::OnConnectionChange(webrtc::PeerConnectionInterface::PeerConne
{
if (new_state == webrtc::PeerConnectionInterface::PeerConnectionState::kConnected)
{
- for (auto &observer : mSignalingObserverList)
- {
- observer->OnAudioEstablished(this);
- }
+ mWorkerThread->PostTask([this]() {
+ mDeviceModule->StartRecording();
+ mDeviceModule->StartPlayout();
+ for (auto &observer : mSignalingObserverList)
+ {
+ observer->OnAudioEstablished(this);
+ }
+ });
}
break;
}
@@ -589,9 +664,44 @@ void LLWebRTCImpl::OnSetLocalDescriptionComplete(webrtc::RTCError error)
}
}
+void LLWebRTCImpl::setAudioObserver(LLWebRTCAudioObserver *observer) { mAudioObserverList.emplace_back(observer); }
+
+void LLWebRTCImpl::unsetAudioObserver(LLWebRTCAudioObserver *observer)
+{
+ std::vector<LLWebRTCAudioObserver *>::iterator it = std::find(mAudioObserverList.begin(), mAudioObserverList.end(), observer);
+ if (it != mAudioObserverList.end())
+ {
+ mAudioObserverList.erase(it);
+ }
+}
+
//
// DataChannelObserver implementation
//
+
+void LLWebRTCImpl::OnStateChange()
+{
+ RTC_LOG(LS_INFO) << __FUNCTION__ << " Data Channel State: " << webrtc::DataChannelInterface::DataStateString(mDataChannel->state());
+ switch (mDataChannel->state())
+ {
+ case webrtc::DataChannelInterface::kOpen:
+ RTC_LOG(LS_INFO) << __FUNCTION__ << " Data Channel State Open";
+ break;
+ case webrtc::DataChannelInterface::kConnecting:
+ RTC_LOG(LS_INFO) << __FUNCTION__ << " Data Channel State Connecting";
+ break;
+ case webrtc::DataChannelInterface::kClosing:
+ RTC_LOG(LS_INFO) << __FUNCTION__ << " Data Channel State closing";
+ break;
+ case webrtc::DataChannelInterface::kClosed:
+ RTC_LOG(LS_INFO) << __FUNCTION__ << " Data Channel State closed";
+ break;
+ default:
+ break;
+ }
+}
+
+
void LLWebRTCImpl::OnMessage(const webrtc::DataBuffer& buffer)
{
std::string data((const char*)buffer.data.cdata(), buffer.size());
@@ -632,4 +742,12 @@ void init()
gWebRTCImpl->AddRef();
gWebRTCImpl->init();
}
+
+void terminate()
+{
+ gWebRTCImpl->terminate();
+ gWebRTCImpl->Release();
+ gWebRTCImpl = nullptr;
+}
+
} // namespace llwebrtc
diff --git a/indra/llwebrtc/llwebrtc.h b/indra/llwebrtc/llwebrtc.h
index a6e754684e..f1ba1620e3 100644
--- a/indra/llwebrtc/llwebrtc.h
+++ b/indra/llwebrtc/llwebrtc.h
@@ -45,6 +45,7 @@
namespace llwebrtc
{
LLSYMEXPORT void init();
+LLSYMEXPORT void terminate();
struct LLWebRTCIceCandidate
{
@@ -69,8 +70,7 @@ typedef std::vector<LLWebRTCVoiceDevice> LLWebRTCVoiceDeviceList;
class LLWebRTCDevicesObserver
{
public:
- virtual void OnRenderDevicesChanged(const LLWebRTCVoiceDeviceList &render_devices) = 0;
- virtual void OnCaptureDevicesChanged(const LLWebRTCVoiceDeviceList &capture_devices) = 0;
+ virtual void OnDevicesChanged(const LLWebRTCVoiceDeviceList &render_devices, const LLWebRTCVoiceDeviceList &capture_devices) = 0;
};
class LLWebRTCDeviceInterface
@@ -89,11 +89,20 @@ class LLWebRTCDeviceInterface
virtual double getTuningMicrophoneEnergy() = 0;
};
+class LLWebRTCAudioObserver
+{
+ public:
+ virtual void OnAudioLevel(float level) = 0;
+};
+
class LLWebRTCAudioInterface
{
public:
+ virtual void setAudioObserver(LLWebRTCAudioObserver *observer) = 0;
+ virtual void unsetAudioObserver(LLWebRTCAudioObserver *observer) = 0;
virtual void setMute(bool mute) = 0;
virtual void setSpeakerVolume(float volume) = 0; // volume between 0.0 and 1.0
+ virtual void requestAudioLevel() = 0;
};
class LLWebRTCDataObserver
diff --git a/indra/llwebrtc/llwebrtc_impl.h b/indra/llwebrtc/llwebrtc_impl.h
index 1ad117c7f3..1670d10705 100644
--- a/indra/llwebrtc/llwebrtc_impl.h
+++ b/indra/llwebrtc/llwebrtc_impl.h
@@ -83,6 +83,7 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface,
~LLWebRTCImpl() {}
void init();
+ void terminate();
//
// LLWebRTCDeviceInterface
@@ -126,8 +127,11 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface,
//
// LLWebRTCAudioInterface
//
+ void setAudioObserver(LLWebRTCAudioObserver *observer) override;
+ void unsetAudioObserver(LLWebRTCAudioObserver *observer) override;
void setMute(bool mute) override;
void setSpeakerVolume(float folume) override; // range 0.0-1.0
+ void requestAudioLevel() override;
//
// LLWebRTCDataInterface
@@ -144,7 +148,7 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface,
void OnAddTrack(rtc::scoped_refptr<webrtc::RtpReceiverInterface> receiver,
const std::vector<rtc::scoped_refptr<webrtc::MediaStreamInterface>> &streams) override;
void OnRemoveTrack(rtc::scoped_refptr<webrtc::RtpReceiverInterface> receiver) override;
- void OnDataChannel(rtc::scoped_refptr<webrtc::DataChannelInterface> channel) override {}
+ void OnDataChannel(rtc::scoped_refptr<webrtc::DataChannelInterface> channel) override;
void OnRenegotiationNeeded() override {}
void OnIceConnectionChange(webrtc::PeerConnectionInterface::IceConnectionState new_state) override {};
void OnIceGatheringChange(webrtc::PeerConnectionInterface::IceGatheringState new_state) override;
@@ -171,7 +175,7 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface,
//
// DataChannelObserver implementation.
//
- void OnStateChange() override {}
+ void OnStateChange() override;
void OnMessage(const webrtc::DataBuffer& buffer) override;
protected:
@@ -200,6 +204,8 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface,
rtc::scoped_refptr<webrtc::PeerConnectionInterface> mPeerConnection;
+ std::vector<LLWebRTCAudioObserver *> mAudioObserverList;
+
std::vector<LLWebRTCDataObserver *> mDataObserverList;
rtc::scoped_refptr<webrtc::DataChannelInterface> mDataChannel;
};
diff --git a/indra/newview/llvoicewebrtc.cpp b/indra/newview/llvoicewebrtc.cpp
index 9013de67f5..da27ff7320 100644
--- a/indra/newview/llvoicewebrtc.cpp
+++ b/indra/newview/llvoicewebrtc.cpp
@@ -74,6 +74,9 @@
// for base64 decoding
#include "apr_base64.h"
+#include "json/reader.h"
+#include "json/writer.h"
+
#define USE_SESSION_GROUPS 0
#define VX_NULL_POSITION -2147483648.0 /*The Silence*/
@@ -88,7 +91,7 @@ namespace {
static const std::string VOICE_SERVER_TYPE = "WebRTC";
// Don't send positional updates more frequently than this:
- const F32 UPDATE_THROTTLE_SECONDS = 0.5f;
+ const F32 UPDATE_THROTTLE_SECONDS = 0.1f;
// Timeout for connection to WebRTC
const F32 CONNECT_ATTEMPT_TIMEOUT = 300.0f;
@@ -375,7 +378,7 @@ void LLWebRTCVoiceClient::terminate()
mRelogRequested = false;
mVoiceEnabled = false;
- llwebrtc::init();
+ llwebrtc::terminate();
sShuttingDown = true;
sPump = NULL;
@@ -684,7 +687,7 @@ void LLWebRTCVoiceClient::voiceControlStateMachine()
{
performMicTuning();
}
-
+ sessionEstablished();
setVoiceControlStateUnless(VOICE_STATE_WAIT_FOR_CHANNEL, VOICE_STATE_SESSION_RETRY);
}
break;
@@ -1116,6 +1119,9 @@ bool LLWebRTCVoiceClient::addAndJoinSession(const sessionStatePtr_t &nextSession
// Just flush it all out and start new.
mWebRTCPump.discard();
+ // add 'self' participant.
+ addParticipantByID(gAgent.getID());
+
notifyStatusObservers(LLVoiceClientStatusObserver::STATUS_JOINED);
return true;
@@ -1922,16 +1928,6 @@ void LLWebRTCVoiceClient::sessionMediaDisconnectSendMessage(const sessionStatePt
}
-void LLWebRTCVoiceClient::OnCaptureDevicesChanged(const llwebrtc::LLWebRTCVoiceDeviceList& render_devices)
-{
- clearCaptureDevices();
- for (auto &device : render_devices)
- {
- LLWebRTCVoiceClient::addCaptureDevice(LLVoiceDevice(device.display_name, device.id));
- }
- LLWebRTCVoiceClient::setDevicesListUpdated(true);
-}
-
void LLWebRTCVoiceClient::clearCaptureDevices()
{
LL_DEBUGS("Voice") << "called" << LL_ENDL;
@@ -1967,13 +1963,19 @@ void LLWebRTCVoiceClient::setDevicesListUpdated(bool state)
mDevicesListUpdated = state;
}
-void LLWebRTCVoiceClient::OnRenderDevicesChanged(const llwebrtc::LLWebRTCVoiceDeviceList &render_devices)
+void LLWebRTCVoiceClient::OnDevicesChanged(const llwebrtc::LLWebRTCVoiceDeviceList &render_devices,
+ const llwebrtc::LLWebRTCVoiceDeviceList &capture_devices)
{
clearRenderDevices();
for (auto &device : render_devices)
{
addRenderDevice(LLVoiceDevice(device.display_name, device.id));
}
+ clearCaptureDevices();
+ for (auto &device : capture_devices)
+ {
+ addCaptureDevice(LLVoiceDevice(device.display_name, device.id));
+ }
setDevicesListUpdated(true);
}
@@ -2105,7 +2107,7 @@ bool LLWebRTCVoiceClient::deviceSettingsAvailable()
{
bool result = true;
- if(mRenderDevices.empty())
+ if(mRenderDevices.empty() || mCaptureDevices.empty())
result = false;
return result;
@@ -2113,11 +2115,7 @@ bool LLWebRTCVoiceClient::deviceSettingsAvailable()
bool LLWebRTCVoiceClient::deviceSettingsUpdated()
{
bool updated = mDevicesListUpdated;
- if (mDevicesListUpdated)
- {
- // a hot swap event or a polling of the audio devices has been parsed since the last redraw of the input and output device panel.
- mDevicesListUpdated = false; // toggle the setting
- }
+ mDevicesListUpdated = false;
return updated;
}
@@ -2285,20 +2283,12 @@ void LLWebRTCVoiceClient::setHidden(bool hidden)
void LLWebRTCVoiceClient::sendPositionAndVolumeUpdate(void)
{
- std::ostringstream stream;
-
if (mSpatialCoordsDirty && inSpatialChannel())
{
LLVector3 l, u, a, vel;
LLVector3d pos;
mSpatialCoordsDirty = false;
-
- // Always send both speaker and listener positions together.
- stream << "<Request requestId=\"" << mCommandCookie++ << "\" action=\"Session.Set3DPosition.1\">"
- << "<SessionHandle>" << getAudioSessionHandle() << "</SessionHandle>";
-
- stream << "<SpeakerPosition>";
LLMatrix3 avatarRot = mAvatarRot.getMatrix3();
@@ -2321,38 +2311,6 @@ void LLWebRTCVoiceClient::sendPositionAndVolumeUpdate(void)
pos.mdV[i] = VX_NULL_POSITION;
}
}
-
- stream
- << "<Position>"
- << "<X>" << pos.mdV[VX] << "</X>"
- << "<Y>" << pos.mdV[VY] << "</Y>"
- << "<Z>" << pos.mdV[VZ] << "</Z>"
- << "</Position>"
- << "<Velocity>"
- << "<X>" << vel.mV[VX] << "</X>"
- << "<Y>" << vel.mV[VY] << "</Y>"
- << "<Z>" << vel.mV[VZ] << "</Z>"
- << "</Velocity>"
- << "<AtOrientation>"
- << "<X>" << a.mV[VX] << "</X>"
- << "<Y>" << a.mV[VY] << "</Y>"
- << "<Z>" << a.mV[VZ] << "</Z>"
- << "</AtOrientation>"
- << "<UpOrientation>"
- << "<X>" << u.mV[VX] << "</X>"
- << "<Y>" << u.mV[VY] << "</Y>"
- << "<Z>" << u.mV[VZ] << "</Z>"
- << "</UpOrientation>"
- << "<LeftOrientation>"
- << "<X>" << l.mV [VX] << "</X>"
- << "<Y>" << l.mV [VY] << "</Y>"
- << "<Z>" << l.mV [VZ] << "</Z>"
- << "</LeftOrientation>"
- ;
-
- stream << "</SpeakerPosition>";
-
- stream << "<ListenerPosition>";
LLVector3d earPosition;
LLVector3 earVelocity;
@@ -2397,40 +2355,13 @@ void LLWebRTCVoiceClient::sendPositionAndVolumeUpdate(void)
pos.mdV[i] = VX_NULL_POSITION;
}
}
-
- stream
- << "<Position>"
- << "<X>" << pos.mdV[VX] << "</X>"
- << "<Y>" << pos.mdV[VY] << "</Y>"
- << "<Z>" << pos.mdV[VZ] << "</Z>"
- << "</Position>"
- << "<Velocity>"
- << "<X>" << vel.mV[VX] << "</X>"
- << "<Y>" << vel.mV[VY] << "</Y>"
- << "<Z>" << vel.mV[VZ] << "</Z>"
- << "</Velocity>"
- << "<AtOrientation>"
- << "<X>" << a.mV[VX] << "</X>"
- << "<Y>" << a.mV[VY] << "</Y>"
- << "<Z>" << a.mV[VZ] << "</Z>"
- << "</AtOrientation>"
- << "<UpOrientation>"
- << "<X>" << u.mV[VX] << "</X>"
- << "<Y>" << u.mV[VY] << "</Y>"
- << "<Z>" << u.mV[VZ] << "</Z>"
- << "</UpOrientation>"
- << "<LeftOrientation>"
- << "<X>" << l.mV [VX] << "</X>"
- << "<Y>" << l.mV [VY] << "</Y>"
- << "<Z>" << l.mV [VZ] << "</Z>"
- << "</LeftOrientation>"
- ;
-
- stream << "</ListenerPosition>";
+ }
- stream << "<ReqDispositionType>1</ReqDispositionType>"; //do not generate responses for update requests
- stream << "</Request>\n\n\n";
- }
+ if (mWebRTCAudioInterface)
+ {
+ mWebRTCAudioInterface->requestAudioLevel();
+ }
+
if(mAudioSession && (mAudioSession->mVolumeDirty || mAudioSession->mMuteDirty))
{
@@ -2469,41 +2400,12 @@ void LLWebRTCVoiceClient::sendPositionAndVolumeUpdate(void)
}
LL_DEBUGS("Voice") << "Setting volume/mute for avatar " << p->mAvatarID << " to " << volume << (mute?"/true":"/false") << LL_ENDL;
-
- // SLIM SDK: Send both volume and mute commands.
-
- // Send a "volume for me" command for the user.
- stream << "<Request requestId=\"" << mCommandCookie++ << "\" action=\"Session.SetParticipantVolumeForMe.1\">"
- << "<SessionHandle>" << getAudioSessionHandle() << "</SessionHandle>"
- << "<ParticipantURI>" << p->mURI << "</ParticipantURI>"
- << "<Volume>" << volume << "</Volume>"
- << "</Request>\n\n\n";
-
- if(!mAudioSession->mIsP2P)
- {
- // Send a "mute for me" command for the user
- // Doesn't work in P2P sessions
- stream << "<Request requestId=\"" << mCommandCookie++ << "\" action=\"Session.SetParticipantMuteForMe.1\">"
- << "<SessionHandle>" << getAudioSessionHandle() << "</SessionHandle>"
- << "<ParticipantURI>" << p->mURI << "</ParticipantURI>"
- << "<Mute>" << (mute?"1":"0") << "</Mute>"
- << "<Scope>Audio</Scope>"
- << "</Request>\n\n\n";
- }
}
p->mVolumeDirty = false;
}
}
}
-
- std::string update(stream.str());
- if(!update.empty())
- {
- LL_DEBUGS("VoiceUpdate") << "sending update " << update << LL_ENDL;
- writeString(update);
- }
-
}
void LLWebRTCVoiceClient::sendLocalAudioUpdates()
@@ -2700,6 +2602,7 @@ void LLWebRTCVoiceClient::OnAudioEstablished(llwebrtc::LLWebRTCAudioInterface *
{
LL_INFOS("Voice") << "On AudioEstablished." << LL_ENDL;
mWebRTCAudioInterface = audio_interface;
+ mWebRTCAudioInterface->setAudioObserver(this);
float speaker_volume = 0;
audio_interface->setMute(true);
{
@@ -2710,9 +2613,68 @@ void LLWebRTCVoiceClient::OnAudioEstablished(llwebrtc::LLWebRTCAudioInterface *
setVoiceControlStateUnless(VOICE_STATE_SESSION_ESTABLISHED, VOICE_STATE_SESSION_RETRY);
}
+void LLWebRTCVoiceClient::OnAudioLevel(float level)
+{
+ if (mWebRTCDataInterface)
+ {
+ Json::FastWriter writer;
+ Json::Value root;
+ root["p"] = (UINT32) (level * 256);
+ std::string json_data = writer.write(root);
+
+ mWebRTCDataInterface->sendData(json_data, false);
+ }
+}
+
void LLWebRTCVoiceClient::OnDataReceived(const std::string& data, bool binary)
{
-
+ // incoming data will be a json structure (if it's not binary.) We may pack
+ // binary for size reasons. Most of the keys in the json objects are
+ // single or double characters for size reasons.
+ // The primary element is:
+ // An object where each key is an agent id. (in the future, we may allow
+ // integer indices into an agentid list, populated on join commands. For size.
+ // Each key will point to a json object with keys identifying what's updated.
+ // 'p' - audio source power (level/volume) (int8 as int)
+ // 'j' - join - object of join data (TBD) (true for now)
+ // 'l' - boolean, always true if exists.
+
+ if (binary)
+ {
+ LL_WARNS("Voice") << "Binary data received from data channel." << LL_ENDL;
+ return;
+ }
+
+ Json::Reader reader;
+ Json::Value voice_data;
+ if (reader.parse(data, voice_data, false)) // don't collect comments
+ {
+ if (!voice_data.isObject())
+ {
+ LL_WARNS("Voice") << "Expected object from data channel:" << data << LL_ENDL;
+ return;
+ }
+ for (auto &participant_id : voice_data.getMemberNames())
+ {
+ std::string foo = participant_id;
+ LL_WARNS("Voice") << "Participant ID (" << participant_id << "):" << data << LL_ENDL;
+
+ LLUUID agent_id(participant_id);
+ if (agent_id.isNull())
+ {
+ LL_WARNS("Voice") << "Bad participant ID from data channel (" << participant_id << "):" << data << LL_ENDL;
+ continue;
+ }
+ participantStatePtr_t participant = findParticipantByID(agent_id);
+ if (participant)
+ {
+ participant->mPower = (F32) (voice_data[participant_id].get("p", Json::Value(participant->mPower)).asInt()) / 256;
+ /* WebRTC appears to have deprecated VAD, but it's still in the Audio Processing Module so maybe we
+ can use it at some point when we actually process frames. */
+ participant->mIsSpeaking = participant->mPower > 0.05;
+ }
+ }
+ }
}
@@ -2994,7 +2956,7 @@ void LLWebRTCVoiceClient::sessionAddedEvent(
session->mAlternateSIPURI = session->mSIPURI;
// and generate a proper URI from the ID.
- setSessionURI(session, sipURIFromID(session->mCallerID));
+ setSessionURI(session, session->mCallerID.asString());
}
else
{
@@ -3065,7 +3027,7 @@ void LLWebRTCVoiceClient::joinedAudioSession(const sessionStatePtr_t &session)
if(!session->mIsChannel)
{
// this is a p2p session. Make sure the other end is added as a participant.
- participantStatePtr_t participant(session->addParticipant(session->mSIPURI));
+ participantStatePtr_t participant(session->addParticipant(LLUUID(session->mSIPURI)));
if(participant)
{
if(participant->mAvatarIDValid)
@@ -3365,7 +3327,7 @@ void LLWebRTCVoiceClient::participantAddedEvent(
sessionStatePtr_t session(findSession(sessionHandle));
if(session)
{
- participantStatePtr_t participant(session->addParticipant(uriString));
+ participantStatePtr_t participant(session->addParticipant(LLUUID(uriString)));
if(participant)
{
participant->mAccountName = nameString;
@@ -3424,92 +3386,6 @@ void LLWebRTCVoiceClient::participantRemovedEvent(
}
}
-
-void LLWebRTCVoiceClient::participantUpdatedEvent(
- std::string &sessionHandle,
- std::string &sessionGroupHandle,
- std::string &uriString,
- std::string &alias,
- bool isModeratorMuted,
- bool isSpeaking,
- int volume,
- F32 energy)
-{
- sessionStatePtr_t session(findSession(sessionHandle));
- if(session)
- {
- participantStatePtr_t participant(session->findParticipant(uriString));
-
- if(participant)
- {
- //LL_INFOS("Voice") << "Participant Update for " << participant->mDisplayName << LL_ENDL;
-
- participant->mIsSpeaking = isSpeaking;
- participant->mIsModeratorMuted = isModeratorMuted;
-
- // SLIM SDK: convert range: ensure that energy is set to zero if is_speaking is false
- if (isSpeaking)
- {
- participant->mSpeakingTimeout.reset();
- participant->mPower = energy;
- }
- else
- {
- participant->mPower = 0.0f;
- }
-
- // Ignore incoming volume level if it has been explicitly set, or there
- // is a volume or mute change pending.
- if ( !participant->mVolumeSet && !participant->mVolumeDirty)
- {
- participant->mVolume = (F32)volume * VOLUME_SCALE_WEBRTC;
- }
-
- // *HACK: mantipov: added while working on EXT-3544
- /*
- Sometimes LLVoiceClient::participantUpdatedEvent callback is called BEFORE
- LLViewerChatterBoxSessionAgentListUpdates::post() sometimes AFTER.
-
- participantUpdatedEvent updates voice participant state in particular participantState::mIsModeratorMuted
- Originally we wanted to update session Speaker Manager to fire LLSpeakerVoiceModerationEvent to fix the EXT-3544 bug.
- Calling of the LLSpeakerMgr::update() method was added into LLIMMgr::processAgentListUpdates.
-
- But in case participantUpdatedEvent() is called after LLViewerChatterBoxSessionAgentListUpdates::post()
- voice participant mIsModeratorMuted is changed after speakers are updated in Speaker Manager
- and event is not fired.
-
- So, we have to call LLSpeakerMgr::update() here.
- */
- LLVoiceChannel* voice_cnl = LLVoiceChannel::getCurrentVoiceChannel();
-
- // ignore session ID of local chat
- if (voice_cnl && voice_cnl->getSessionID().notNull())
- {
- LLSpeakerMgr* speaker_manager = LLIMModel::getInstance()->getSpeakerManager(voice_cnl->getSessionID());
- if (speaker_manager)
- {
- speaker_manager->update(true);
-
- // also initialize voice moderate_mode depend on Agent's participant. See EXT-6937.
- // *TODO: remove once a way to request the current voice channel moderation mode is implemented.
- if (gAgent.getID() == participant->mAvatarID)
- {
- speaker_manager->initVoiceModerateMode();
- }
- }
- }
- }
- else
- {
- LL_WARNS("Voice") << "unknown participant: " << uriString << LL_ENDL;
- }
- }
- else
- {
- LL_DEBUGS("Voice") << "unknown session " << sessionHandle << LL_ENDL;
- }
-}
-
void LLWebRTCVoiceClient::messageEvent(
std::string &sessionHandle,
std::string &uriString,
@@ -3743,8 +3619,9 @@ void LLWebRTCVoiceClient::muteListChanged()
/////////////////////////////
// Managing list of participants
-LLWebRTCVoiceClient::participantState::participantState(const std::string &uri) :
- mURI(uri),
+LLWebRTCVoiceClient::participantState::participantState(const LLUUID& agent_id) :
+ mURI(agent_id.asString()),
+ mAvatarID(agent_id),
mPTT(false),
mIsSpeaking(false),
mIsModeratorMuted(false),
@@ -3760,55 +3637,27 @@ LLWebRTCVoiceClient::participantState::participantState(const std::string &uri)
{
}
-LLWebRTCVoiceClient::participantStatePtr_t LLWebRTCVoiceClient::sessionState::addParticipant(const std::string &uri)
+LLWebRTCVoiceClient::participantStatePtr_t LLWebRTCVoiceClient::sessionState::addParticipant(const LLUUID& agent_id)
{
participantStatePtr_t result;
- bool useAlternateURI = false;
- // Note: this is mostly the body of LLWebRTCVoiceClient::sessionState::findParticipant(), but since we need to know if it
- // matched the alternate SIP URI (so we can add it properly), we need to reproduce it here.
- {
- participantMap::iterator iter = mParticipantsByURI.find(uri);
+ participantUUIDMap::iterator iter = mParticipantsByUUID.find(agent_id);
- if(iter == mParticipantsByURI.end())
- {
- if(!mAlternateSIPURI.empty() && (uri == mAlternateSIPURI))
- {
- // This is a p2p session (probably with the SLIM client) with an alternate URI for the other participant.
- // Use mSIPURI instead, since it will be properly encoded.
- iter = mParticipantsByURI.find(mSIPURI);
- useAlternateURI = true;
- }
- }
- if(iter != mParticipantsByURI.end())
- {
- result = iter->second;
- }
+ if (iter != mParticipantsByUUID.end())
+ {
+ result = iter->second;
}
if(!result)
{
// participant isn't already in one list or the other.
- result.reset(new participantState(useAlternateURI?mSIPURI:uri));
- mParticipantsByURI.insert(participantMap::value_type(result->mURI, result));
+ result.reset(new participantState(agent_id));
+ mParticipantsByURI.insert(participantMap::value_type(agent_id.asString(), result));
mParticipantsChanged = true;
-
- // Try to do a reverse transform on the URI to get the GUID back.
- {
- LLUUID id;
- if(LLWebRTCVoiceClient::getInstance()->IDFromName(result->mURI, id))
- {
- result->mAvatarIDValid = true;
- result->mAvatarID = id;
- }
- else
- {
- // Create a UUID by hashing the URI, but do NOT set mAvatarIDValid.
- // This indicates that the ID will not be in the name cache.
- result->mAvatarID.generate(uri);
- }
- }
+
+ result->mAvatarIDValid = true;
+ result->mAvatarID = agent_id;
if(result->updateMuteState())
{
@@ -3983,6 +3832,16 @@ LLWebRTCVoiceClient::participantStatePtr_t LLWebRTCVoiceClient::findParticipantB
return result;
}
+LLWebRTCVoiceClient::participantStatePtr_t LLWebRTCVoiceClient::addParticipantByID(const LLUUID &id)
+{
+ participantStatePtr_t result;
+ if (mAudioSession)
+ {
+ result = mAudioSession->addParticipant(id);
+ }
+ return result;
+}
+
// Check for parcel boundary crossing
@@ -4156,68 +4015,14 @@ bool LLWebRTCVoiceClient::setSpatialChannel(
void LLWebRTCVoiceClient::callUser(const LLUUID &uuid)
{
- std::string userURI = sipURIFromID(uuid);
-
- switchChannel(userURI, false, true, true);
+ switchChannel(uuid.asString(), false, true, true);
}
-#if 0
-// WebRTC text IMs are not in use.
-LLWebRTCVoiceClient::sessionStatePtr_t LLWebRTCVoiceClient::startUserIMSession(const LLUUID &uuid)
-{
- // Figure out if a session with the user already exists
- sessionStatePtr_t session(findSession(uuid));
- if(!session)
- {
- // No session with user, need to start one.
- std::string uri = sipURIFromID(uuid);
- session = addSession(uri);
-
- llassert(session);
- if (!session)
- return session;
- session->mIsSpatial = false;
- session->mReconnect = false;
- session->mIsP2P = true;
- session->mCallerID = uuid;
- }
-
- if(session->mHandle.empty())
- {
- // Session isn't active -- start it up.
- sessionCreateSendMessage(session, false, false);
- }
- else
- {
- // Session is already active -- start up text.
- sessionTextConnectSendMessage(session);
- }
-
- return session;
-}
-#endif
void LLWebRTCVoiceClient::endUserIMSession(const LLUUID &uuid)
{
-#if 0
- // WebRTC text IMs are not in use.
-
- // Figure out if a session with the user exists
- sessionStatePtr_t session(findSession(uuid));
- if(session)
- {
- // found the session
- if(!session->mHandle.empty())
- {
- // sessionTextDisconnectSendMessage(session); // a SLim leftover, not used any more.
- }
- }
- else
- {
- LL_DEBUGS("Voice") << "Session not found for participant ID " << uuid << LL_ENDL;
- }
-#endif
+
}
bool LLWebRTCVoiceClient::isValidChannel(std::string &sessionHandle)
{
@@ -4361,16 +4166,6 @@ bool LLWebRTCVoiceClient::inProximalChannel()
return result;
}
-std::string LLWebRTCVoiceClient::sipURIFromID(const LLUUID &id)
-{
- std::string result;
- result = "sip:";
- result += nameFromID(id);
- result += "@";
-
- return result;
-}
-
std::string LLWebRTCVoiceClient::nameFromAvatar(LLVOAvatar *avatar)
{
std::string result;
@@ -4785,10 +4580,6 @@ BOOL LLWebRTCVoiceClient::getIsSpeaking(const LLUUID& id)
participantStatePtr_t participant(findParticipantByID(id));
if(participant)
{
- if (participant->mSpeakingTimeout.getElapsedTimeF32() > SPEAKING_TIMEOUT)
- {
- participant->mIsSpeaking = FALSE;
- }
result = participant->mIsSpeaking;
}
@@ -4814,7 +4605,8 @@ F32 LLWebRTCVoiceClient::getCurrentPower(const LLUUID& id)
participantStatePtr_t participant(findParticipantByID(id));
if(participant)
{
- result = participant->mPower;
+ LL_WARNS("Voice") << "Power:" << participant->mPower << LL_ENDL;
+ result = participant->mPower*4;
}
return result;
@@ -5178,7 +4970,10 @@ void LLWebRTCVoiceClient::sessionState::for_eachPredicate(const LLWebRTCVoiceCli
}
}
-
+void LLWebRTCVoiceClient::sessionEstablished()
+{
+ addSession(gAgent.getRegion()->getRegionID().asString());
+}
LLWebRTCVoiceClient::sessionStatePtr_t LLWebRTCVoiceClient::findSession(const std::string &handle)
{
@@ -6367,6 +6162,7 @@ void LLWebRTCVoiceClient::captureBufferPlayStopSendMessage()
}
}
+std::string LLWebRTCVoiceClient::sipURIFromID(const LLUUID& id) { return id.asString(); }
LLWebRTCSecurity::LLWebRTCSecurity()
diff --git a/indra/newview/llvoicewebrtc.h b/indra/newview/llvoicewebrtc.h
index 0d6988e1ef..eb898ab4eb 100644
--- a/indra/newview/llvoicewebrtc.h
+++ b/indra/newview/llvoicewebrtc.h
@@ -60,6 +60,7 @@ class LLWebRTCVoiceClient : public LLSingleton<LLWebRTCVoiceClient>,
virtual public LLVoiceEffectInterface,
public llwebrtc::LLWebRTCDevicesObserver,
public llwebrtc::LLWebRTCSignalingObserver,
+ public llwebrtc::LLWebRTCAudioObserver,
public llwebrtc::LLWebRTCDataObserver
{
LLSINGLETON_C11(LLWebRTCVoiceClient);
@@ -80,6 +81,8 @@ public:
// Returns true if WebRTC has successfully logged in and is not in error state
bool isVoiceWorking() const override;
+ std::string sipURIFromID(const LLUUID &id) override;
+
/////////////////////
/// @name Tuning
//@{
@@ -212,7 +215,6 @@ public:
void removeObserver(LLVoiceClientParticipantObserver* observer) override;
//@}
- std::string sipURIFromID(const LLUUID &id) override;
//@}
/// @name LLVoiceEffectInterface virtual implementations
@@ -242,8 +244,8 @@ public:
/// @name Devices change notification
// LLWebRTCDevicesObserver
//@{
- void OnRenderDevicesChanged(const llwebrtc::LLWebRTCVoiceDeviceList &render_devices) override;
- void OnCaptureDevicesChanged(const llwebrtc::LLWebRTCVoiceDeviceList &render_devices) override;
+ void OnDevicesChanged(const llwebrtc::LLWebRTCVoiceDeviceList &render_devices,
+ const llwebrtc::LLWebRTCVoiceDeviceList &capture_devices) override;
//@}
//////////////////////////////
@@ -256,6 +258,13 @@ public:
void OnRenegotiationNeeded() override;
void OnAudioEstablished(llwebrtc::LLWebRTCAudioInterface *audio_interface) override;
//@}
+
+ //////////////////////////////
+ /// @name Signaling notification
+ // LLWebRTCAudioObserver
+ //@{
+ void OnAudioLevel(float level) override;
+ //@}
/////////////////////////
/// @name Data Notification
@@ -307,7 +316,7 @@ protected:
struct participantState
{
public:
- participantState(const std::string &uri);
+ participantState(const LLUUID& agent_id);
bool updateMuteState(); // true if mute state has changed
bool isAvatar();
@@ -348,7 +357,7 @@ protected:
static ptr_t createSession();
~sessionState();
- participantStatePtr_t addParticipant(const std::string &uri);
+ participantStatePtr_t addParticipant(const LLUUID& agent_id);
void removeParticipant(const participantStatePtr_t &participant);
void removeAllParticipants();
@@ -496,7 +505,6 @@ protected:
void sessionRemovedEvent(std::string &sessionHandle, std::string &sessionGroupHandle);
void participantAddedEvent(std::string &sessionHandle, std::string &sessionGroupHandle, std::string &uriString, std::string &alias, std::string &nameString, std::string &displayNameString, int participantType);
void participantRemovedEvent(std::string &sessionHandle, std::string &sessionGroupHandle, std::string &uriString, std::string &alias, std::string &nameString);
- void participantUpdatedEvent(std::string &sessionHandle, std::string &sessionGroupHandle, std::string &uriString, std::string &alias, bool isModeratorMuted, bool isSpeaking, int volume, F32 energy);
void voiceServiceConnectionStateChangedEvent(int statusCode, std::string &statusString, std::string &build_id);
void auxAudioPropertiesEvent(F32 energy);
void messageEvent(std::string &sessionHandle, std::string &uriString, std::string &alias, std::string &messageHeader, std::string &messageBody, std::string &applicationString);
@@ -545,7 +553,7 @@ protected:
void filePlaybackSetMode(bool vox = false, float speed = 1.0f);
participantStatePtr_t findParticipantByID(const LLUUID& id);
-
+ participantStatePtr_t addParticipantByID(const LLUUID &id);
#if 0
////////////////////////////////////////
@@ -556,7 +564,7 @@ protected:
sessionIterator sessionsBegin(void);
sessionIterator sessionsEnd(void);
#endif
-
+ void sessionEstablished();
sessionStatePtr_t findSession(const std::string &handle);
sessionStatePtr_t findSessionBeingCreatedByURI(const std::string &uri);
sessionStatePtr_t findSession(const LLUUID &participant_id);