diff options
90 files changed, 8529 insertions, 1954 deletions
diff --git a/.github/labeler.yaml b/.github/labeler.yaml index d31a361baf..6e03801b65 100644 --- a/.github/labeler.yaml +++ b/.github/labeler.yaml @@ -76,3 +76,6 @@ c/cpp: - '**/*.i' - '**/*.inl' - '**/*.y' + +'team:viewer': + - '*' diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index e3cc2f8527..fc50665ddd 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -399,15 +399,32 @@ jobs: needs: build runs-on: ubuntu-latest steps: + - name: Download viewer exe + uses: actions/download-artifact@v4 + with: + name: Windows-app + path: _artifacts + - name: Download Windows Symbols + if: env.BUGSPLAT_USER && env.BUGSPLAT_PASS + uses: actions/download-artifact@v4 + with: + name: Windows-symbols + - name: Extract viewer pdb + if: env.BUGSPLAT_USER && env.BUGSPLAT_PASS + shell: bash + run: | + tar -xJf "${{ needs.build.outputs.viewer_channel }}.sym.tar.xz" -C _artifacts - name: Post Windows symbols if: env.BUGSPLAT_USER && env.BUGSPLAT_PASS - uses: secondlife/viewer-build-util/post-bugsplat-windows@v2 + uses: secondlife-3p/symbol-upload@v10 with: username: ${{ env.BUGSPLAT_USER }} password: ${{ env.BUGSPLAT_PASS }} database: "SecondLife_Viewer_2018" - channel: ${{ needs.build.outputs.viewer_channel }} + application: ${{ needs.build.outputs.viewer_channel }} version: ${{ needs.build.outputs.viewer_version }} + directory: _artifacts + files: "**/{SecondLifeViewer.exe,llwebrtc.dll,*.pdb}" post-mac-symbols: env: @@ -416,15 +433,22 @@ jobs: needs: build runs-on: ubuntu-latest steps: + - name: Download Mac Symbols + if: env.BUGSPLAT_USER && env.BUGSPLAT_PASS + uses: actions/download-artifact@v4 + with: + name: macOS-symbols - name: Post Mac symbols if: env.BUGSPLAT_USER && env.BUGSPLAT_PASS - uses: secondlife/viewer-build-util/post-bugsplat-mac@v2 + uses: secondlife-3p/symbol-upload@v10 with: username: ${{ env.BUGSPLAT_USER }} password: ${{ env.BUGSPLAT_PASS }} database: "SecondLife_Viewer_2018" - channel: ${{ needs.build.outputs.viewer_channel }} + application: ${{ needs.build.outputs.viewer_channel }} version: ${{ needs.build.outputs.viewer_version }} + directory: . + files: "**/*.xcarchive.zip" release: needs: [setvar, build, sign-and-package-windows, sign-and-package-mac] diff --git a/autobuild.xml b/autobuild.xml index eed4e1ed1a..fbeef128ae 100644 --- a/autobuild.xml +++ b/autobuild.xml @@ -1786,18 +1786,6 @@ </map> <key>mikktspace</key> <map> - <key>canonical_repo</key> - <string>https://bitbucket.org/lindenlab/3p-mikktspace</string> - <key>copyright</key> - <string>Copyright (C) 2011 by Morten S. Mikkelsen, Copyright (C) 2022 Blender Authors</string> - <key>description</key> - <string>Mikktspace Tangent Generator</string> - <key>license</key> - <string>Apache 2.0</string> - <key>license_file</key> - <string>mikktspace.txt</string> - <key>name</key> - <string>mikktspace</string> <key>platforms</key> <map> <key>darwin64</key> @@ -1843,8 +1831,20 @@ <string>windows64</string> </map> </map> + <key>license</key> + <string>Apache 2.0</string> + <key>license_file</key> + <string>mikktspace.txt</string> + <key>copyright</key> + <string>Copyright (C) 2011 by Morten S. Mikkelsen, Copyright (C) 2022 Blender Authors</string> <key>version</key> <string>1</string> + <key>name</key> + <string>mikktspace</string> + <key>canonical_repo</key> + <string>https://bitbucket.org/lindenlab/3p-mikktspace</string> + <key>description</key> + <string>Mikktspace Tangent Generator</string> </map> <key>minizip-ng</key> <map> @@ -2810,6 +2810,8 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors</string> <string>LICENSE</string> <key>copyright</key> <string>Copyright (c) 2000-2012, Linden Research, Inc.</string> + <key>version</key> + <string>3.0-f14b5ec</string> <key>name</key> <string>viewer-manager</string> <key>description</key> @@ -2818,8 +2820,6 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors</string> <string>https://bitbucket.org/lindenlab/vmp-standalone</string> <key>source_type</key> <string>hg</string> - <key>version</key> - <string>3.0-f14b5ec</string> </map> <key>vlc-bin</key> <map> @@ -2959,6 +2959,72 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors</string> <key>description</key> <string>Vulkan GLTF Sample Implementation</string> </map> + <key>webrtc</key> + <map> + <key>platforms</key> + <map> + <key>darwin64</key> + <map> + <key>archive</key> + <map> + <key>hash</key> + <string>194b4f5957c9f003c46e61a434e23a7c3d1180d6</string> + <key>hash_algorithm</key> + <string>sha1</string> + <key>url</key> + <string>https://github.com/secondlife/3p-webrtc-build/releases/download/m114.5735.08.70-debug/webrtc-m114.5735.08.70-debug.10377605436-darwin64-10377605436.tar.zst</string> + </map> + <key>name</key> + <string>darwin64</string> + </map> + <key>linux64</key> + <map> + <key>archive</key> + <map> + <key>hash</key> + <string>38e0c7d30b4c40eb04e60ab199440b847cc7c6cf</string> + <key>hash_algorithm</key> + <string>sha1</string> + <key>url</key> + <string>https://github.com/secondlife/3p-webrtc-build/releases/download/m114.5735.08.70-debug/webrtc-m114.5735.08.70-debug.10377605436-linux64-10377605436.tar.zst</string> + </map> + <key>name</key> + <string>linux64</string> + </map> + <key>windows64</key> + <map> + <key>archive</key> + <map> + <key>hash</key> + <string>053fb5c873df9192e34cddcf2db1c5fdcff76ba1</string> + <key>hash_algorithm</key> + <string>sha1</string> + <key>url</key> + <string>https://github.com/secondlife/3p-webrtc-build/releases/download/m114.5735.08.70-debug/webrtc-m114.5735.08.70-debug.10377605436-windows64-10377605436.tar.zst</string> + </map> + <key>name</key> + <string>windows64</string> + </map> + </map> + <key>license</key> + <string>MIT</string> + <key>license_file</key> + <string>LICENSES/webrtc-license.txt</string> + <key>copyright</key> + <string>Copyright (c) 2011, The WebRTC project authors. All rights reserved.</string> + <key>version</key> + <string>m114.5735.08.70-debug.10377605436</string> + <key>name</key> + <string>webrtc</string> + <key>vcs_branch</key> + <string>secondlife</string> + <key>vcs_revision</key> + <string>d3f62d32bac8694d3c7423c731ae30c113bf6a11</string> + <key>vcs_url</key> + <string>https://github.com/secondlife/3p-webrtc-build</string> + <key>canonical_repo</key> + <string>https://github.com/secondlife/3p-webrtc-build</string> + </map> <key>xxhash</key> <map> <key>platforms</key> @@ -186,7 +186,7 @@ pre_build() # This name is consumed by indra/newview/CMakeLists.txt. Make it # absolute because we've had troubles with relative pathnames. abs_build_dir="$(cd "$build_dir"; pwd)" - VIEWER_SYMBOL_FILE="$(native_path "$abs_build_dir/newview/$variant/secondlife-symbols-$symplat-${AUTOBUILD_ADDRSIZE}.tar.xz")" + VIEWER_SYMBOL_FILE="$(native_path "$abs_build_dir/symbols/$variant/${viewer_channel}.sym.tar.xz")" fi # honor autobuild_configure_parameters same as sling-buildscripts @@ -551,9 +551,8 @@ then # nat 2016-12-22: without RELEASE_CRASH_REPORTING, we have no symbol file. if [ "${RELEASE_CRASH_REPORTING:-}" != "OFF" ] then - # BugSplat wants to see xcarchive.zip - # e.g. build-darwin-x86_64/newview/Release/Second Life Test.xcarchive.zip - symbol_file="${build_dir}/newview/${variant}/${viewer_channel}.xcarchive.zip" + # e.g. build-darwin-x86_64/symbols/Release/Second Life Test.xarchive.zip + symbol_file="${build_dir}/symbols/${variant}/${viewer_channel}.xcarchive.zip" if [[ ! -f "$symbol_file" ]] then # symbol tarball we prep for (e.g.) Breakpad diff --git a/etc/message.xml b/etc/message.xml index b444fe6c11..dee3fd72dd 100755 --- a/etc/message.xml +++ b/etc/message.xml @@ -506,6 +506,13 @@ <boolean>false</boolean> </map> + <key>VoiceSignalingRequest</key> + <map> + <key>flavor</key> + <string>llsd</string> + <key>trusted-sender</key> + <boolean>false</boolean> + </map> <!-- Server to client --> <key>RequiredVoiceVersion</key> <map> @@ -688,6 +695,9 @@ <key>ProvisionVoiceAccountRequest</key> <boolean>false</boolean> + + <key>VoiceSignalingRequest</key> + <boolean>false</boolean> <key>RemoteParcelRequest</key> <boolean>false</boolean> diff --git a/indra/CMakeLists.txt b/indra/CMakeLists.txt index 154d83a640..e7e41fd316 100644 --- a/indra/CMakeLists.txt +++ b/indra/CMakeLists.txt @@ -58,6 +58,9 @@ add_subdirectory(${LIBS_OPEN_PREFIX}llmessage) add_subdirectory(${LIBS_OPEN_PREFIX}llprimitive) add_subdirectory(${LIBS_OPEN_PREFIX}llrender) add_subdirectory(${LIBS_OPEN_PREFIX}llfilesystem) +if (NOT CMAKE_SYSTEM_NAME MATCHES FreeBSD) +add_subdirectory(${LIBS_OPEN_PREFIX}llwebrtc) +endif () add_subdirectory(${LIBS_OPEN_PREFIX}llwindow) add_subdirectory(${LIBS_OPEN_PREFIX}llxml) diff --git a/indra/cmake/00-Common.cmake b/indra/cmake/00-Common.cmake index b7fb49dc55..7de12d0063 100644 --- a/indra/cmake/00-Common.cmake +++ b/indra/cmake/00-Common.cmake @@ -88,7 +88,7 @@ if (WINDOWS) if( ADDRESS_SIZE EQUAL 32 ) add_compile_options( /arch:SSE2 ) endif() - + # Are we using the crummy Visual Studio KDU build workaround? if (NOT VS_DISABLE_FATAL_WARNINGS) add_compile_options(/WX) @@ -228,6 +228,10 @@ if (DARWIN) list(APPEND GCC_WARNINGS -Wno-reorder -Wno-non-virtual-dtor ) + if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 13) + list(APPEND GCC_WARNINGS -Wno-unused-but-set-variable -Wno-unused-variable ) + endif() + add_compile_options(${GCC_WARNINGS}) add_compile_options(-m${ADDRESS_SIZE}) endif () diff --git a/indra/cmake/CMakeLists.txt b/indra/cmake/CMakeLists.txt index 793ee1365f..be56ef102d 100644 --- a/indra/cmake/CMakeLists.txt +++ b/indra/cmake/CMakeLists.txt @@ -63,6 +63,7 @@ set(cmake_SOURCE_FILES ViewerMiscLibs.cmake VisualLeakDetector.cmake LibVLCPlugin.cmake + WebRTC.cmake XmlRpcEpi.cmake xxHash.cmake ZLIBNG.cmake diff --git a/indra/cmake/Linking.cmake b/indra/cmake/Linking.cmake index a297e04143..8e91dac109 100644 --- a/indra/cmake/Linking.cmake +++ b/indra/cmake/Linking.cmake @@ -19,6 +19,7 @@ if (WINDOWS OR DARWIN ) endif() else() set(SHARED_LIB_STAGING_DIR ${CMAKE_BINARY_DIR}/sharedlibs/$<IF:$<BOOL:${LL_GENERATOR_IS_MULTI_CONFIG}>,$<CONFIG>,>) + set(SYMBOLS_STAGING_DIR ${CMAKE_BINARY_DIR}/symbols/$<IF:$<BOOL:${LL_GENERATOR_IS_MULTI_CONFIG}>,$<CONFIG>,>/${VIEWER_CHANNEL}) endif() if( DARWIN ) diff --git a/indra/cmake/WebRTC.cmake b/indra/cmake/WebRTC.cmake new file mode 100644 index 0000000000..e9e679fa80 --- /dev/null +++ b/indra/cmake/WebRTC.cmake @@ -0,0 +1,59 @@ +# -*- cmake -*- +include(Linking) +include(Prebuilt) + +include_guard() + +add_library( ll::webrtc INTERFACE IMPORTED ) +target_include_directories( ll::webrtc SYSTEM INTERFACE "${LIBS_PREBUILT_DIR}/include/webrtc" "${LIBS_PREBUILT_DIR}/include/webrtc/third_party/abseil-cpp") +if (CMAKE_OSX_ARCHITECTURES MATCHES arm64) + if (${PREBUILD_TRACKING_DIR}/sentinel_installed IS_NEWER_THAN ${PREBUILD_TRACKING_DIR}/webrtc_installed OR NOT ${webrtc_installed} EQUAL 0) + if (NOT EXISTS ${CMAKE_BINARY_DIR}/libwebrtc-macos-arm64.tar.xz) + file(DOWNLOAD + https://github.com/crow-misia/libwebrtc-bin/releases/download/114.5735.6.1/libwebrtc-macos-arm64.tar.xz + ${CMAKE_BINARY_DIR}/libwebrtc-macos-arm64.tar.xz + SHOW_PROGRESS + ) + endif (NOT EXISTS ${CMAKE_BINARY_DIR}/libwebrtc-macos-arm64.tar.xz) + file(ARCHIVE_EXTRACT + INPUT ${CMAKE_BINARY_DIR}/libwebrtc-macos-arm64.tar.xz + DESTINATION ${LIBS_PREBUILT_DIR} + ) + file(RENAME + ${LIBS_PREBUILT_DIR}/lib/libwebrtc.a + ${LIBS_PREBUILT_DIR}/lib/release/libwebrtc.a + ) + file(REMOVE_RECURSE ${LIBS_PREBUILT_DIR}/Frameworks/WebRTC.xcframework/macos-arm64/WebRTC.framework) + file(RENAME + ${LIBS_PREBUILT_DIR}/Frameworks/WebRTC.xcframework/macos-arm64/WebRTC.framework + ${LIBS_PREBUILT_DIR}/lib/release/WebRTC.framework + ) + file(REMOVE_RECURSE ${LIBS_PREBUILT_DIR}/Frameworks) + file(WRITE ${PREBUILD_TRACKING_DIR}/webrtc_installed "0") + endif (${PREBUILD_TRACKING_DIR}/sentinel_installed IS_NEWER_THAN ${PREBUILD_TRACKING_DIR}/webrtc_installed OR NOT ${webrtc_installed} EQUAL 0) +elseif (NOT CMAKE_SYSTEM_NAME MATCHES FreeBSD) +use_prebuilt_binary(webrtc) +endif (CMAKE_OSX_ARCHITECTURES MATCHES arm64) + +if (WINDOWS) + target_link_libraries( ll::webrtc INTERFACE webrtc.lib ) +elseif (DARWIN) + FIND_LIBRARY(COREAUDIO_LIBRARY CoreAudio) + FIND_LIBRARY(COREGRAPHICS_LIBRARY CoreGraphics) + FIND_LIBRARY(AUDIOTOOLBOX_LIBRARY AudioToolbox) + FIND_LIBRARY(COREFOUNDATION_LIBRARY CoreFoundation) + FIND_LIBRARY(COCOA_LIBRARY Cocoa) + + target_link_libraries( ll::webrtc INTERFACE + libwebrtc.a + ${COREAUDIO_LIBRARY} + ${AUDIOTOOLBOX_LIBRARY} + ${COREGRAPHICS_LIBRARY} + ${COREFOUNDATION_LIBRARY} + ${COCOA_LIBRARY} + ) +elseif (LINUX) + target_link_libraries( ll::webrtc INTERFACE libwebrtc.a X11 ) +endif (WINDOWS) + + diff --git a/indra/llcommon/llerrorcontrol.h b/indra/llcommon/llerrorcontrol.h index bf5a6df556..cbb703e9e7 100644 --- a/indra/llcommon/llerrorcontrol.h +++ b/indra/llcommon/llerrorcontrol.h @@ -198,7 +198,7 @@ namespace LLError }; /** - * @NOTE: addRecorder() and removeRecorder() uses the boost::shared_ptr to allow for shared ownership + * @NOTE: addRecorder() and removeRecorder() uses the std::shared_ptr to allow for shared ownership * while still ensuring that the allocated memory is eventually freed */ LL_COMMON_API void addRecorder(RecorderPtr); diff --git a/indra/llcommon/llprofilercategories.h b/indra/llcommon/llprofilercategories.h index 617431f629..0de343d020 100644 --- a/indra/llcommon/llprofilercategories.h +++ b/indra/llcommon/llprofilercategories.h @@ -1,5 +1,5 @@ /** - * @file llprofiler_ategories.h + * @file llprofilercategories.h * @brief Profiling categories to minimize Tracy memory usage when viewing captures. * * $LicenseInfo:firstyear=2022&license=viewerlgpl$ @@ -33,7 +33,7 @@ // LL_PROFILER_CATEGORY_ENABLE_DRAWPOOL // LL_PROFILER_CATEGORY_ENABLE_LLSD // LL_PROFILER_CATEGORY_ENABLE_MEMORY -// LL_PROFILER_CATEGORY_ENABLE_SHADERS +// LL_PROFILER_CATEGORY_ENABLE_SHADER // // NOTE: You can still manually use: // LL_PROFILE_ZONE_SCOPED(); @@ -67,6 +67,7 @@ #define LL_PROFILER_CATEGORY_ENABLE_VERTEX 1 #define LL_PROFILER_CATEGORY_ENABLE_VOLUME 1 #define LL_PROFILER_CATEGORY_ENABLE_WIN32 1 +#define LL_PROFILER_CATEGORY_ENABLE_VOICE 1 #if LL_PROFILER_CATEGORY_ENABLE_APP #define LL_PROFILE_ZONE_NAMED_CATEGORY_APP LL_PROFILE_ZONE_NAMED @@ -276,5 +277,13 @@ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_WIN32 #endif +#if LL_PROFILER_CATEGORY_ENABLE_VOICE +#define LL_PROFILE_ZONE_NAMED_CATEGORY_VOICE LL_PROFILE_ZONE_NAMED +#define LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE LL_PROFILE_ZONE_SCOPED +#else +#define LL_PROFILE_ZONE_NAMED_CATEGORY_VOICE(name) +#define LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE +#endif + #endif // LL_PROFILER_CATEGORIES_H diff --git a/indra/llcommon/lltracethreadrecorder.cpp b/indra/llcommon/lltracethreadrecorder.cpp index be3e585ef8..375cb050cc 100644 --- a/indra/llcommon/lltracethreadrecorder.cpp +++ b/indra/llcommon/lltracethreadrecorder.cpp @@ -159,7 +159,8 @@ AccumulatorBufferGroup* ThreadRecorder::activate( AccumulatorBufferGroup* record ThreadRecorder::active_recording_list_t::iterator ThreadRecorder::bringUpToDate( AccumulatorBufferGroup* recording ) { #if LL_TRACE_ENABLED - if (mActiveRecordings.empty()) return mActiveRecordings.end(); + if (mActiveRecordings.empty()) + return mActiveRecordings.end(); mActiveRecordings.back()->mPartialRecording.sync(); BlockTimer::updateTimes(); @@ -202,7 +203,7 @@ ThreadRecorder::active_recording_list_t::iterator ThreadRecorder::bringUpToDate( #endif } -void ThreadRecorder::deactivate( AccumulatorBufferGroup* recording ) +void ThreadRecorder::deactivate(AccumulatorBufferGroup* recording) { #if LL_TRACE_ENABLED active_recording_list_t::iterator recording_it = bringUpToDate(recording); @@ -228,9 +229,10 @@ void ThreadRecorder::deactivate( AccumulatorBufferGroup* recording ) #endif } -ThreadRecorder::ActiveRecording::ActiveRecording( AccumulatorBufferGroup* target ) +ThreadRecorder::ActiveRecording::ActiveRecording(AccumulatorBufferGroup* target) : mTargetRecording(target) -{} +{ +} void ThreadRecorder::ActiveRecording::movePartialToTarget() { @@ -243,30 +245,30 @@ void ThreadRecorder::ActiveRecording::movePartialToTarget() // called by child thread -void ThreadRecorder::addChildRecorder( class ThreadRecorder* child ) +void ThreadRecorder::addChildRecorder(ThreadRecorder* child) { #if LL_TRACE_ENABLED - { LLMutexLock lock(&mChildListMutex); - mChildThreadRecorders.push_back(child); - } + LLMutexLock lock(&mChildListMutex); + mChildThreadRecorders.push_back(child); #endif } // called by child thread -void ThreadRecorder::removeChildRecorder( class ThreadRecorder* child ) +void ThreadRecorder::removeChildRecorder(ThreadRecorder* child) { #if LL_TRACE_ENABLED - { LLMutexLock lock(&mChildListMutex); - mChildThreadRecorders.remove(child); - } + LLMutexLock lock(&mChildListMutex); + mChildThreadRecorders.remove(child); #endif } void ThreadRecorder::pushToParent() { #if LL_TRACE_ENABLED - { LLMutexLock lock(&mSharedRecordingMutex); - LLTrace::get_thread_recorder()->bringUpToDate(&mThreadRecordingBuffers); + if (ThreadRecorder* recorder = LLTrace::get_thread_recorder()) + { + LLMutexLock lock(&mSharedRecordingMutex); + recorder->bringUpToDate(&mThreadRecordingBuffers); mSharedRecordingBuffers.append(mThreadRecordingBuffers); mThreadRecordingBuffers.reset(); } @@ -278,15 +280,14 @@ void ThreadRecorder::pullFromChildren() { #if LL_TRACE_ENABLED LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS; - if (mActiveRecordings.empty()) return; - - { LLMutexLock lock(&mChildListMutex); - + if (!mActiveRecordings.empty()) + { + LLMutexLock lock(&mChildListMutex); AccumulatorBufferGroup& target_recording_buffers = mActiveRecordings.back()->mPartialRecording; target_recording_buffers.sync(); for (LLTrace::ThreadRecorder* rec : mChildThreadRecorders) - { LLMutexLock lock(&(rec->mSharedRecordingMutex)); - + { + LLMutexLock lock(&(rec->mSharedRecordingMutex)); target_recording_buffers.merge(rec->mSharedRecordingBuffers); rec->mSharedRecordingBuffers.reset(); } @@ -294,13 +295,11 @@ void ThreadRecorder::pullFromChildren() #endif } - -void set_master_thread_recorder( ThreadRecorder* recorder ) +void set_master_thread_recorder(ThreadRecorder* recorder) { sMasterThreadRecorder = recorder; } - ThreadRecorder* get_master_thread_recorder() { return sMasterThreadRecorder; diff --git a/indra/llinventory/llsettingsdaycycle.cpp b/indra/llinventory/llsettingsdaycycle.cpp index ef6a187d06..3050cdf953 100644 --- a/indra/llinventory/llsettingsdaycycle.cpp +++ b/indra/llinventory/llsettingsdaycycle.cpp @@ -28,7 +28,6 @@ #include "llsettingsdaycycle.h" #include "llerror.h" #include <algorithm> -#include <boost/make_shared.hpp> #include "lltrace.h" #include "llfasttimer.h" #include "v3colorutil.h" diff --git a/indra/llinventory/llsettingswater.cpp b/indra/llinventory/llsettingswater.cpp index 84542e2cb4..a6a25b0953 100644 --- a/indra/llinventory/llsettingswater.cpp +++ b/indra/llinventory/llsettingswater.cpp @@ -27,7 +27,6 @@ #include "llsettingswater.h" #include <algorithm> -#include <boost/make_shared.hpp> #include "lltrace.h" #include "llfasttimer.h" #include "v3colorutil.h" diff --git a/indra/llmath/llquaternion.h b/indra/llmath/llquaternion.h index fbe4da97f7..be7f119a2b 100644 --- a/indra/llmath/llquaternion.h +++ b/indra/llmath/llquaternion.h @@ -132,6 +132,7 @@ public: friend LLQuaternion operator~(const LLQuaternion &a); // Returns a* (Conjugate of a) bool operator==(const LLQuaternion &b) const; // Returns a == b bool operator!=(const LLQuaternion &b) const; // Returns a != b + F64 operator[](int idx) const { return mQ[idx]; } friend const LLQuaternion& operator*=(LLQuaternion &a, const LLQuaternion &b); // Returns a * b diff --git a/indra/llwebrtc/CMakeLists.txt b/indra/llwebrtc/CMakeLists.txt new file mode 100644 index 0000000000..d6217ff3a4 --- /dev/null +++ b/indra/llwebrtc/CMakeLists.txt @@ -0,0 +1,87 @@ +# -*- cmake -*- + +# some webrtc headers require C++ 20 +set(CMAKE_CXX_STANDARD 20) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +include(00-Common) +include(Linking) +include(WebRTC) + +project(llwebrtc) + +if (LINUX) + add_compile_options(-Wno-deprecated-declarations) # webrtc::CreateAudioDeviceWithDataObserver is deprecated +endif (LINUX) + +set(llwebrtc_SOURCE_FILES + llwebrtc.cpp + ) + +set(llwebrtc_HEADER_FILES + CMakeLists.txt + llwebrtc.h + llwebrtc_impl.h + ) + +list(APPEND llwebrtc_SOURCE_FILES ${llwebrtc_HEADER_FILES}) + +add_library (llwebrtc SHARED ${llwebrtc_SOURCE_FILES}) + +if (NOT INSTALL) +set_target_properties(llwebrtc PROPERTIES PUBLIC_HEADER llwebrtc.h) +endif () + +if (WINDOWS) + set_target_properties(llwebrtc + PROPERTIES + LINK_FLAGS "/debug /LARGEADDRESSAWARE" + ) + target_link_libraries(llwebrtc PRIVATE ll::webrtc + secur32 + winmm + dmoguids + wmcodecdspuuid + msdmo + strmiids + iphlpapi) + if (USE_BUGSPLAT) + set_target_properties(llwebrtc PROPERTIES PDB_OUTPUT_DIRECTORY "${SYMBOLS_STAGING_DIR}") + endif (USE_BUGSPLAT) +elseif (DARWIN) + target_link_libraries(llwebrtc PRIVATE ll::webrtc) + if (USE_BUGSPLAT) + set_target_properties(llwebrtc PROPERTIES XCODE_ATTRIBUTE_DEBUG_INFORMATION_FORMAT "dwarf-with-dsym" + XCODE_ATTRIBUTE_DWARF_DSYM_FOLDER_PATH "${SYMBOLS_STAGING_DIR}/dSYMs") + endif (USE_BUGSPLAT) +elseif (LINUX) + target_link_libraries(llwebrtc PRIVATE ll::webrtc) +endif (WINDOWS) + +target_include_directories( llwebrtc INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}) + +if (WINDOWS) + set_property(TARGET llwebrtc PROPERTY + MSVC_RUNTIME_LIBRARY "MultiThreadedDebug") +endif (WINDOWS) + +ADD_CUSTOM_COMMAND(TARGET llwebrtc POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy_if_different + $<TARGET_FILE:llwebrtc> + ${SHARED_LIB_STAGING_DIR}) +# Add tests +if (LL_TESTS) +endif (LL_TESTS) + +if (INSTALL) + if (DARWIN) + set(_LIB ../Frameworks) + elseif (EXISTS ${CMAKE_SYSROOT}/usr/lib/${ARCH}-linux-gnu) + set(_LIB lib/${ARCH}-linux-gnu) + elseif (EXISTS /lib64) + set(_LIB lib64) + else (DARWIN) + set(_LIB lib) + endif (DARWIN) + install(TARGETS ${PROJECT_NAME} DESTINATION ${_LIB}) +endif (INSTALL) diff --git a/indra/llwebrtc/llwebrtc.cpp b/indra/llwebrtc/llwebrtc.cpp new file mode 100644 index 0000000000..e533783d33 --- /dev/null +++ b/indra/llwebrtc/llwebrtc.cpp @@ -0,0 +1,1350 @@ +/** + * @file llwebrtc.cpp + * @brief WebRTC interface implementation + * + * $LicenseInfo:firstyear=2023&license=viewerlgpl$ + * Second Life Viewer Source Code + * Copyright (C) 2023, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA + * $/LicenseInfo$ + */ + +#include "llwebrtc_impl.h" +#include <algorithm> +#include <string.h> + +#include "api/audio_codecs/audio_decoder_factory.h" +#include "api/audio_codecs/audio_encoder_factory.h" +#include "api/audio_codecs/builtin_audio_decoder_factory.h" +#include "api/audio_codecs/builtin_audio_encoder_factory.h" +#include "api/media_stream_interface.h" +#include "api/media_stream_track.h" +#include "modules/audio_processing/audio_buffer.h" +#include "modules/audio_mixer/audio_mixer_impl.h" + +namespace llwebrtc +{ + +static int16_t PLAYOUT_DEVICE_DEFAULT = -1; +static int16_t PLAYOUT_DEVICE_BAD = -2; +static int16_t RECORD_DEVICE_DEFAULT = -1; +static int16_t RECORD_DEVICE_BAD = -2; + +LLAudioDeviceObserver::LLAudioDeviceObserver() : mSumVector {0}, mMicrophoneEnergy(0.0) {} + +float LLAudioDeviceObserver::getMicrophoneEnergy() { return mMicrophoneEnergy; } + +// TODO: Pull smoothing/filtering code into a common helper function +// for LLAudioDeviceObserver and LLCustomProcessor + +void LLAudioDeviceObserver::OnCaptureData(const void *audio_samples, + const size_t num_samples, + const size_t bytes_per_sample, + const size_t num_channels, + const uint32_t samples_per_sec) +{ + // calculate the energy + float energy = 0; + const short *samples = (const short *) audio_samples; + for (size_t index = 0; index < num_samples * num_channels; index++) + { + float sample = (static_cast<float>(samples[index]) / (float) 32767); + energy += sample * sample; + } + + // smooth it. + size_t buffer_size = sizeof(mSumVector) / sizeof(mSumVector[0]); + float totalSum = 0; + int i; + for (i = 0; i < (buffer_size - 1); i++) + { + mSumVector[i] = mSumVector[i + 1]; + totalSum += mSumVector[i]; + } + mSumVector[i] = energy; + totalSum += energy; + mMicrophoneEnergy = std::sqrt(totalSum / (num_samples * buffer_size)); +} + +void LLAudioDeviceObserver::OnRenderData(const void *audio_samples, + const size_t num_samples, + const size_t bytes_per_sample, + const size_t num_channels, + const uint32_t samples_per_sec) +{ +} + +LLCustomProcessor::LLCustomProcessor() : mSampleRateHz(0), mNumChannels(0), mMicrophoneEnergy(0.0), mGain(1.0) +{ + memset(mSumVector, 0, sizeof(mSumVector)); +} + +void LLCustomProcessor::Initialize(int sample_rate_hz, int num_channels) +{ + mSampleRateHz = sample_rate_hz; + mNumChannels = num_channels; + memset(mSumVector, 0, sizeof(mSumVector)); +} + +void LLCustomProcessor::Process(webrtc::AudioBuffer *audio_in) +{ + webrtc::StreamConfig stream_config; + stream_config.set_sample_rate_hz(mSampleRateHz); + stream_config.set_num_channels(mNumChannels); + std::vector<float *> frame; + std::vector<float> frame_samples; + + if (audio_in->num_channels() < 1 || audio_in->num_frames() < 480) + { + return; + } + + // grab the input audio + frame_samples.resize(stream_config.num_samples()); + frame.resize(stream_config.num_channels()); + for (size_t ch = 0; ch < stream_config.num_channels(); ++ch) + { + frame[ch] = &(frame_samples)[ch * stream_config.num_frames()]; + } + + audio_in->CopyTo(stream_config, &frame[0]); + + // calculate the energy + float energy = 0; + for (size_t index = 0; index < stream_config.num_samples(); index++) + { + float sample = frame_samples[index]; + sample = sample * mGain; // apply gain + frame_samples[index] = sample; // write processed sample back to buffer. + energy += sample * sample; + } + + audio_in->CopyFrom(&frame[0], stream_config); + + // smooth it. + size_t buffer_size = sizeof(mSumVector) / sizeof(mSumVector[0]); + float totalSum = 0; + int i; + for (i = 0; i < (buffer_size - 1); i++) + { + mSumVector[i] = mSumVector[i + 1]; + totalSum += mSumVector[i]; + } + mSumVector[i] = energy; + totalSum += energy; + mMicrophoneEnergy = std::sqrt(totalSum / (stream_config.num_samples() * buffer_size)); +} + +// +// LLWebRTCImpl implementation +// + +LLWebRTCImpl::LLWebRTCImpl(LLWebRTCLogCallback* logCallback) : + mLogSink(new LLWebRTCLogSink(logCallback)), + mPeerCustomProcessor(nullptr), + mMute(true), + mTuningMode(false), + mPlayoutDevice(0), + mRecordingDevice(0), + mTuningAudioDeviceObserver(nullptr) +{ +} + +void LLWebRTCImpl::init() +{ + mPlayoutDevice = 0; + mRecordingDevice = 0; + rtc::InitializeSSL(); + + // Normal logging is rather spammy, so turn it off. + rtc::LogMessage::LogToDebug(rtc::LS_NONE); + rtc::LogMessage::SetLogToStderr(true); + rtc::LogMessage::AddLogToStream(mLogSink, rtc::LS_VERBOSE); + + mTaskQueueFactory = webrtc::CreateDefaultTaskQueueFactory(); + + // Create the native threads. + mNetworkThread = rtc::Thread::CreateWithSocketServer(); + mNetworkThread->SetName("WebRTCNetworkThread", nullptr); + mNetworkThread->Start(); + mWorkerThread = rtc::Thread::Create(); + mWorkerThread->SetName("WebRTCWorkerThread", nullptr); + mWorkerThread->Start(); + mSignalingThread = rtc::Thread::Create(); + mSignalingThread->SetName("WebRTCSignalingThread", nullptr); + mSignalingThread->Start(); + + mTuningAudioDeviceObserver = new LLAudioDeviceObserver; + mWorkerThread->PostTask( + [this]() + { + // Initialize the audio devices on the Worker Thread + mTuningDeviceModule = + webrtc::CreateAudioDeviceWithDataObserver(webrtc::AudioDeviceModule::AudioLayer::kPlatformDefaultAudio, + mTaskQueueFactory.get(), + std::unique_ptr<webrtc::AudioDeviceDataObserver>(mTuningAudioDeviceObserver)); + + mTuningDeviceModule->Init(); + mTuningDeviceModule->SetPlayoutDevice(mPlayoutDevice); + mTuningDeviceModule->SetRecordingDevice(mRecordingDevice); + mTuningDeviceModule->EnableBuiltInAEC(false); +#if __x86_64__ && !__FreeBSD__ + mTuningDeviceModule->SetAudioDeviceSink(this); +#endif + mTuningDeviceModule->InitMicrophone(); + mTuningDeviceModule->InitSpeaker(); + mTuningDeviceModule->InitRecording(); + mTuningDeviceModule->InitPlayout(); + mTuningDeviceModule->SetStereoRecording(true); + mTuningDeviceModule->SetStereoPlayout(true); + updateDevices(); + }); + + mWorkerThread->BlockingCall( + [this]() + { + // the peer device module doesn't need an observer + // as we pull peer data after audio processing. + mPeerDeviceModule = webrtc::CreateAudioDeviceWithDataObserver(webrtc::AudioDeviceModule::AudioLayer::kPlatformDefaultAudio, + mTaskQueueFactory.get(), + nullptr); + mPeerDeviceModule->Init(); + mPeerDeviceModule->SetPlayoutDevice(mPlayoutDevice); + mPeerDeviceModule->SetRecordingDevice(mRecordingDevice); + mPeerDeviceModule->EnableBuiltInAEC(false); + mPeerDeviceModule->InitMicrophone(); + mPeerDeviceModule->InitSpeaker(); + mPeerDeviceModule->InitRecording(); + mPeerDeviceModule->InitPlayout(); + mPeerDeviceModule->SetStereoRecording(true); + mPeerDeviceModule->SetStereoPlayout(true); + }); + + // The custom processor allows us to retrieve audio data (and levels) + // from after other audio processing such as AEC, AGC, etc. + mPeerCustomProcessor = new LLCustomProcessor; + webrtc::AudioProcessingBuilder apb; + apb.SetCapturePostProcessing(std::unique_ptr<webrtc::CustomProcessing>(mPeerCustomProcessor)); + mAudioProcessingModule = apb.Create(); + + webrtc::AudioProcessing::Config apm_config; + apm_config.echo_canceller.enabled = false; + apm_config.echo_canceller.mobile_mode = false; + apm_config.gain_controller1.enabled = false; + apm_config.gain_controller1.mode = webrtc::AudioProcessing::Config::GainController1::kAdaptiveAnalog; + apm_config.gain_controller2.enabled = false; + apm_config.high_pass_filter.enabled = true; + apm_config.noise_suppression.enabled = true; + apm_config.noise_suppression.level = webrtc::AudioProcessing::Config::NoiseSuppression::kVeryHigh; + apm_config.transient_suppression.enabled = true; + apm_config.pipeline.multi_channel_render = true; + apm_config.pipeline.multi_channel_capture = true; + apm_config.pipeline.multi_channel_capture = true; + + webrtc::ProcessingConfig processing_config; + processing_config.input_stream().set_num_channels(2); + processing_config.input_stream().set_sample_rate_hz(48000); + processing_config.output_stream().set_num_channels(2); + processing_config.output_stream().set_sample_rate_hz(48000); + processing_config.reverse_input_stream().set_num_channels(2); + processing_config.reverse_input_stream().set_sample_rate_hz(48000); + processing_config.reverse_output_stream().set_num_channels(2); + processing_config.reverse_output_stream().set_sample_rate_hz(48000); + + mAudioProcessingModule->ApplyConfig(apm_config); + mAudioProcessingModule->Initialize(processing_config); + + + mPeerConnectionFactory = webrtc::CreatePeerConnectionFactory(mNetworkThread.get(), + mWorkerThread.get(), + mSignalingThread.get(), + mPeerDeviceModule, + webrtc::CreateBuiltinAudioEncoderFactory(), + webrtc::CreateBuiltinAudioDecoderFactory(), + nullptr /* video_encoder_factory */, + nullptr /* video_decoder_factory */, + nullptr /* audio_mixer */, + mAudioProcessingModule); + + mWorkerThread->BlockingCall([this]() { mPeerDeviceModule->StartPlayout(); }); +} + +void LLWebRTCImpl::terminate() +{ + for (auto &connection : mPeerConnections) + { + connection->terminate(); + } + + // connection->terminate() above spawns a number of Signaling thread calls to + // shut down the connection. The following Blocking Call will wait + // until they're done before it's executed, allowing time to clean up. + + mSignalingThread->BlockingCall([this]() { mPeerConnectionFactory = nullptr; }); + + mPeerConnections.clear(); + + mWorkerThread->BlockingCall( + [this]() + { + if (mTuningDeviceModule) + { + mTuningDeviceModule->StopRecording(); + mTuningDeviceModule->Terminate(); + } + if (mPeerDeviceModule) + { + mPeerDeviceModule->StopRecording(); + mPeerDeviceModule->Terminate(); + } + mTuningDeviceModule = nullptr; + mPeerDeviceModule = nullptr; + mTaskQueueFactory = nullptr; + }); + rtc::LogMessage::RemoveLogToStream(mLogSink); +} + +// +// Devices functions +// +// Most device-related functionality needs to happen +// on the worker thread (the audio thread,) so those calls will be +// proxied over to that thread. +// +void LLWebRTCImpl::setRecording(bool recording) +{ + mWorkerThread->PostTask( + [this, recording]() + { + if (recording) + { + mPeerDeviceModule->StartRecording(); + } + else + { + mPeerDeviceModule->StopRecording(); + } + }); +} + +void LLWebRTCImpl::setAudioConfig(LLWebRTCDeviceInterface::AudioConfig config) +{ + webrtc::AudioProcessing::Config apm_config; + apm_config.echo_canceller.enabled = config.mEchoCancellation; + apm_config.echo_canceller.mobile_mode = false; + apm_config.gain_controller1.enabled = config.mAGC; + apm_config.gain_controller1.mode = webrtc::AudioProcessing::Config::GainController1::kAdaptiveAnalog; + apm_config.gain_controller2.enabled = false; + apm_config.high_pass_filter.enabled = true; + apm_config.transient_suppression.enabled = true; + apm_config.pipeline.multi_channel_render = true; + apm_config.pipeline.multi_channel_capture = true; + apm_config.pipeline.multi_channel_capture = true; + + switch (config.mNoiseSuppressionLevel) + { + case LLWebRTCDeviceInterface::AudioConfig::NOISE_SUPPRESSION_LEVEL_NONE: + apm_config.noise_suppression.enabled = false; + apm_config.noise_suppression.level = webrtc::AudioProcessing::Config::NoiseSuppression::kLow; + break; + case LLWebRTCDeviceInterface::AudioConfig::NOISE_SUPPRESSION_LEVEL_LOW: + apm_config.noise_suppression.enabled = true; + apm_config.noise_suppression.level = webrtc::AudioProcessing::Config::NoiseSuppression::kLow; + break; + case LLWebRTCDeviceInterface::AudioConfig::NOISE_SUPPRESSION_LEVEL_MODERATE: + apm_config.noise_suppression.enabled = true; + apm_config.noise_suppression.level = webrtc::AudioProcessing::Config::NoiseSuppression::kModerate; + break; + case LLWebRTCDeviceInterface::AudioConfig::NOISE_SUPPRESSION_LEVEL_HIGH: + apm_config.noise_suppression.enabled = true; + apm_config.noise_suppression.level = webrtc::AudioProcessing::Config::NoiseSuppression::kHigh; + break; + case LLWebRTCDeviceInterface::AudioConfig::NOISE_SUPPRESSION_LEVEL_VERY_HIGH: + apm_config.noise_suppression.enabled = true; + apm_config.noise_suppression.level = webrtc::AudioProcessing::Config::NoiseSuppression::kVeryHigh; + break; + default: + apm_config.noise_suppression.enabled = false; + apm_config.noise_suppression.level = webrtc::AudioProcessing::Config::NoiseSuppression::kLow; + } + mAudioProcessingModule->ApplyConfig(apm_config); +} + +void LLWebRTCImpl::refreshDevices() +{ + mWorkerThread->PostTask([this]() { updateDevices(); }); +} + +void LLWebRTCImpl::setDevicesObserver(LLWebRTCDevicesObserver *observer) { mVoiceDevicesObserverList.emplace_back(observer); } + +void LLWebRTCImpl::unsetDevicesObserver(LLWebRTCDevicesObserver *observer) +{ + std::vector<LLWebRTCDevicesObserver *>::iterator it = + std::find(mVoiceDevicesObserverList.begin(), mVoiceDevicesObserverList.end(), observer); + if (it != mVoiceDevicesObserverList.end()) + { + mVoiceDevicesObserverList.erase(it); + } +} + +void ll_set_device_module_capture_device(rtc::scoped_refptr<webrtc::AudioDeviceModule> device_module, int16_t device) +{ + device_module->StopRecording(); +#if WEBRTC_WIN + if (device < 0) + { + device_module->SetRecordingDevice(webrtc::AudioDeviceModule::kDefaultDevice); + } + else + { + device_module->SetRecordingDevice(device); + } +#else + // passed in default is -1, but the device list + // has it at 0 + device_module->SetRecordingDevice(device + 1); +#endif + device_module->InitMicrophone(); + device_module->InitRecording(); + device_module->SetStereoRecording(false); + device_module->StartRecording(); +} + +void LLWebRTCImpl::setCaptureDevice(const std::string &id) +{ + int16_t recordingDevice = RECORD_DEVICE_DEFAULT; + if (id != "Default") + { + for (int16_t i = 0; i < mRecordingDeviceList.size(); i++) + { + if (mRecordingDeviceList[i].mID == id) + { + recordingDevice = i; + break; + } + } + } + if (recordingDevice == mRecordingDevice) + { + return; + } + mRecordingDevice = recordingDevice; + if (mTuningMode) + { + mWorkerThread->PostTask([this, recordingDevice]() { ll_set_device_module_capture_device(mTuningDeviceModule, recordingDevice); }); + } + else + { + mWorkerThread->PostTask([this, recordingDevice]() { ll_set_device_module_capture_device(mPeerDeviceModule, recordingDevice); }); + } +} + + +void ll_set_device_module_render_device(rtc::scoped_refptr<webrtc::AudioDeviceModule> device_module, int16_t device) +{ + device_module->StopPlayout(); +#if WEBRTC_WIN + if (device < 0) + { + device_module->SetPlayoutDevice(webrtc::AudioDeviceModule::kDefaultDevice); + } + else + { + device_module->SetPlayoutDevice(device); + } +#else + device_module->SetPlayoutDevice(device + 1); +#endif + device_module->InitSpeaker(); + device_module->InitPlayout(); + device_module->SetStereoPlayout(true); +} + +void LLWebRTCImpl::setRenderDevice(const std::string &id) +{ + int16_t playoutDevice = PLAYOUT_DEVICE_DEFAULT; + if (id != "Default") + { + for (int16_t i = 0; i < mPlayoutDeviceList.size(); i++) + { + if (mPlayoutDeviceList[i].mID == id) + { + playoutDevice = i; + break; + } + } + } + if (playoutDevice == mPlayoutDevice) + { + return; + } + mPlayoutDevice = playoutDevice; + + if (mTuningMode) + { + mWorkerThread->PostTask( + [this, playoutDevice]() + { + ll_set_device_module_render_device(mTuningDeviceModule, playoutDevice); + }); + } + else + { + mWorkerThread->PostTask( + [this, playoutDevice]() + { + ll_set_device_module_render_device(mPeerDeviceModule, playoutDevice); + mPeerDeviceModule->StartPlayout(); + }); + } +} + +// updateDevices needs to happen on the worker thread. +void LLWebRTCImpl::updateDevices() +{ + int16_t renderDeviceCount = mTuningDeviceModule->PlayoutDevices(); + + mPlayoutDeviceList.clear(); +#if WEBRTC_WIN + int16_t index = 0; +#else + // index zero is always "Default" for darwin/linux, + // which is a special case, so skip it. + int16_t index = 1; +#endif + for (; index < renderDeviceCount; index++) + { + char name[webrtc::kAdmMaxDeviceNameSize]; + char guid[webrtc::kAdmMaxGuidSize]; + mTuningDeviceModule->PlayoutDeviceName(index, name, guid); + mPlayoutDeviceList.emplace_back(name, guid); + } + + int16_t captureDeviceCount = mTuningDeviceModule->RecordingDevices(); + + mRecordingDeviceList.clear(); +#if WEBRTC_WIN + index = 0; +#else + // index zero is always "Default" for darwin/linux, + // which is a special case, so skip it. + index = 1; +#endif + for (; index < captureDeviceCount; index++) + { + char name[webrtc::kAdmMaxDeviceNameSize]; + char guid[webrtc::kAdmMaxGuidSize]; + mTuningDeviceModule->RecordingDeviceName(index, name, guid); + mRecordingDeviceList.emplace_back(name, guid); + } + + for (auto &observer : mVoiceDevicesObserverList) + { + observer->OnDevicesChanged(mPlayoutDeviceList, mRecordingDeviceList); + } +} + +void LLWebRTCImpl::OnDevicesUpdated() +{ + // reset these to a bad value so an update is forced + mRecordingDevice = RECORD_DEVICE_BAD; + mPlayoutDevice = PLAYOUT_DEVICE_BAD; + + updateDevices(); +} + + +void LLWebRTCImpl::setTuningMode(bool enable) +{ + mTuningMode = enable; + mWorkerThread->PostTask( + [this, enable] { + if (enable) + { + mPeerDeviceModule->StopRecording(); + mPeerDeviceModule->StopPlayout(); + ll_set_device_module_render_device(mTuningDeviceModule, mPlayoutDevice); + ll_set_device_module_capture_device(mTuningDeviceModule, mRecordingDevice); + mTuningDeviceModule->InitPlayout(); + mTuningDeviceModule->InitRecording(); + mTuningDeviceModule->StartRecording(); + // TODO: Starting Playout on the TDM appears to create an audio artifact (click) + // in this case, so disabling it for now. We may have to do something different + // if we enable 'echo playback' via the TDM when tuning. + //mTuningDeviceModule->StartPlayout(); + } + else + { + mTuningDeviceModule->StopRecording(); + //mTuningDeviceModule->StopPlayout(); + ll_set_device_module_render_device(mPeerDeviceModule, mPlayoutDevice); + ll_set_device_module_capture_device(mPeerDeviceModule, mRecordingDevice); + mPeerDeviceModule->InitPlayout(); + mPeerDeviceModule->InitRecording(); + mPeerDeviceModule->StartPlayout(); + mPeerDeviceModule->StartRecording(); + } + } + ); + mSignalingThread->PostTask( + [this, enable] + { + for (auto &connection : mPeerConnections) + { + if (enable) + { + connection->enableSenderTracks(false); + } + else + { + connection->resetMute(); + } + connection->enableReceiverTracks(!enable); + } + }); +} + +float LLWebRTCImpl::getTuningAudioLevel() { return -20 * log10f(mTuningAudioDeviceObserver->getMicrophoneEnergy()); } + +float LLWebRTCImpl::getPeerConnectionAudioLevel() { return -20 * log10f(mPeerCustomProcessor->getMicrophoneEnergy()); } + +void LLWebRTCImpl::setPeerConnectionGain(float gain) { mPeerCustomProcessor->setGain(gain); } + + +// +// Peer Connection Helpers +// + +LLWebRTCPeerConnectionInterface *LLWebRTCImpl::newPeerConnection() +{ + rtc::scoped_refptr<LLWebRTCPeerConnectionImpl> peerConnection = rtc::scoped_refptr<LLWebRTCPeerConnectionImpl>(new rtc::RefCountedObject<LLWebRTCPeerConnectionImpl>()); + peerConnection->init(this); + + mPeerConnections.emplace_back(peerConnection); + peerConnection->enableSenderTracks(!mMute); + return peerConnection.get(); +} + +void LLWebRTCImpl::freePeerConnection(LLWebRTCPeerConnectionInterface* peer_connection) +{ + std::vector<rtc::scoped_refptr<LLWebRTCPeerConnectionImpl>>::iterator it = + std::find(mPeerConnections.begin(), mPeerConnections.end(), peer_connection); + if (it != mPeerConnections.end()) + { + mPeerConnections.erase(it); + } + if (mPeerConnections.empty()) + { + setRecording(false); + } +} + + +// +// LLWebRTCPeerConnectionImpl implementation. +// +// Most peer connection (signaling) happens on +// the signaling thread. + +LLWebRTCPeerConnectionImpl::LLWebRTCPeerConnectionImpl() : + mWebRTCImpl(nullptr), + mPeerConnection(nullptr), + mMute(false), + mAnswerReceived(false) +{ +} + +LLWebRTCPeerConnectionImpl::~LLWebRTCPeerConnectionImpl() +{ + mSignalingObserverList.clear(); + mDataObserverList.clear(); +} + +// +// LLWebRTCPeerConnection interface +// + +void LLWebRTCPeerConnectionImpl::init(LLWebRTCImpl * webrtc_impl) +{ + mWebRTCImpl = webrtc_impl; + mPeerConnectionFactory = mWebRTCImpl->getPeerConnectionFactory(); +} +void LLWebRTCPeerConnectionImpl::terminate() +{ + mWebRTCImpl->PostSignalingTask( + [=]() + { + if (mPeerConnection) + { + if (mDataChannel) + { + { + mDataChannel->Close(); + mDataChannel = nullptr; + } + } + + mPeerConnection->Close(); + if (mLocalStream) + { + auto tracks = mLocalStream->GetAudioTracks(); + for (auto& track : tracks) + { + mLocalStream->RemoveTrack(track); + } + mLocalStream = nullptr; + } + mPeerConnection = nullptr; + + for (auto &observer : mSignalingObserverList) + { + observer->OnPeerConnectionClosed(); + } + } + }); +} + +void LLWebRTCPeerConnectionImpl::setSignalingObserver(LLWebRTCSignalingObserver *observer) { mSignalingObserverList.emplace_back(observer); } + +void LLWebRTCPeerConnectionImpl::unsetSignalingObserver(LLWebRTCSignalingObserver *observer) +{ + std::vector<LLWebRTCSignalingObserver *>::iterator it = + std::find(mSignalingObserverList.begin(), mSignalingObserverList.end(), observer); + if (it != mSignalingObserverList.end()) + { + mSignalingObserverList.erase(it); + } +} + + +bool LLWebRTCPeerConnectionImpl::initializeConnection(const LLWebRTCPeerConnectionInterface::InitOptions& options) +{ + RTC_DCHECK(!mPeerConnection); + mAnswerReceived = false; + + mWebRTCImpl->PostSignalingTask( + [this,options]() + { + webrtc::PeerConnectionInterface::RTCConfiguration config; + for (auto server : options.mServers) + { + webrtc::PeerConnectionInterface::IceServer ice_server; + for (auto url : server.mUrls) + { + ice_server.urls.push_back(url); + } + ice_server.username = server.mUserName; + ice_server.password = server.mPassword; + config.servers.push_back(ice_server); + } + config.sdp_semantics = webrtc::SdpSemantics::kUnifiedPlan; + + config.set_min_port(60000); + config.set_max_port(60100); + + webrtc::PeerConnectionDependencies pc_dependencies(this); + auto error_or_peer_connection = mPeerConnectionFactory->CreatePeerConnectionOrError(config, std::move(pc_dependencies)); + if (error_or_peer_connection.ok()) + { + mPeerConnection = std::move(error_or_peer_connection.value()); + } + else + { + RTC_LOG(LS_ERROR) << __FUNCTION__ << "Error creating peer connection: " << error_or_peer_connection.error().message(); + for (auto &observer : mSignalingObserverList) + { + observer->OnRenegotiationNeeded(); + } + return; + } + + webrtc::DataChannelInit init; + init.ordered = true; + + auto data_channel_or_error = mPeerConnection->CreateDataChannelOrError("SLData", &init); + if (data_channel_or_error.ok()) + { + mDataChannel = std::move(data_channel_or_error.value()); + + mDataChannel->RegisterObserver(this); + } + + cricket::AudioOptions audioOptions; + audioOptions.auto_gain_control = true; + audioOptions.echo_cancellation = true; + audioOptions.noise_suppression = true; + + mLocalStream = mPeerConnectionFactory->CreateLocalMediaStream("SLStream"); + + rtc::scoped_refptr<webrtc::AudioTrackInterface> audio_track( + mPeerConnectionFactory->CreateAudioTrack("SLAudio", mPeerConnectionFactory->CreateAudioSource(audioOptions).get())); + audio_track->set_enabled(false); + mLocalStream->AddTrack(audio_track); + + mPeerConnection->AddTrack(audio_track, {"SLStream"}); + + auto senders = mPeerConnection->GetSenders(); + + for (auto &sender : senders) + { + webrtc::RtpParameters params; + webrtc::RtpCodecParameters codecparam; + codecparam.name = "opus"; + codecparam.kind = cricket::MEDIA_TYPE_AUDIO; + codecparam.clock_rate = 48000; + codecparam.num_channels = 2; + codecparam.parameters["stereo"] = "1"; + codecparam.parameters["sprop-stereo"] = "1"; + params.codecs.push_back(codecparam); + sender->SetParameters(params); + } + + auto receivers = mPeerConnection->GetReceivers(); + for (auto &receiver : receivers) + { + webrtc::RtpParameters params; + webrtc::RtpCodecParameters codecparam; + codecparam.name = "opus"; + codecparam.kind = cricket::MEDIA_TYPE_AUDIO; + codecparam.clock_rate = 48000; + codecparam.num_channels = 2; + codecparam.parameters["stereo"] = "1"; + codecparam.parameters["sprop-stereo"] = "1"; + params.codecs.push_back(codecparam); + receiver->SetParameters(params); + } + + webrtc::PeerConnectionInterface::RTCOfferAnswerOptions offerOptions; + mPeerConnection->CreateOffer(this, offerOptions); + }); + + return true; +} + +bool LLWebRTCPeerConnectionImpl::shutdownConnection() +{ + terminate(); + return true; +} + +void LLWebRTCPeerConnectionImpl::enableSenderTracks(bool enable) +{ + // set_enabled shouldn't be done on the worker thread. + if (mPeerConnection) + { + auto senders = mPeerConnection->GetSenders(); + for (auto &sender : senders) + { + sender->track()->set_enabled(enable); + } + } +} + +void LLWebRTCPeerConnectionImpl::enableReceiverTracks(bool enable) +{ + // set_enabled shouldn't be done on the worker thread + if (mPeerConnection) + { + auto receivers = mPeerConnection->GetReceivers(); + for (auto &receiver : receivers) + { + receiver->track()->set_enabled(enable); + } + } +} + +// Tell the peer connection that we've received a SDP answer from the sim. +void LLWebRTCPeerConnectionImpl::AnswerAvailable(const std::string &sdp) +{ + RTC_LOG(LS_INFO) << __FUNCTION__ << " Remote SDP: " << sdp; + + mWebRTCImpl->PostSignalingTask( + [this, sdp]() + { + if (mPeerConnection) + { + RTC_LOG(LS_INFO) << __FUNCTION__ << " " << mPeerConnection->peer_connection_state(); + mPeerConnection->SetRemoteDescription(webrtc::CreateSessionDescription(webrtc::SdpType::kAnswer, sdp), + rtc::scoped_refptr<webrtc::SetRemoteDescriptionObserverInterface>(this)); + } + }); +} + + +// +// LLWebRTCAudioInterface implementation +// + +void LLWebRTCPeerConnectionImpl::setMute(bool mute) +{ + mMute = mute; + mWebRTCImpl->PostSignalingTask( + [this]() + { + if (mPeerConnection) + { + auto senders = mPeerConnection->GetSenders(); + + RTC_LOG(LS_INFO) << __FUNCTION__ << (mMute ? "disabling" : "enabling") << " streams count " << senders.size(); + for (auto &sender : senders) + { + auto track = sender->track(); + if (track) + { + track->set_enabled(!mMute); + } + } + } + }); +} + +void LLWebRTCPeerConnectionImpl::resetMute() +{ + setMute(mMute); +} + +void LLWebRTCPeerConnectionImpl::setReceiveVolume(float volume) +{ + mWebRTCImpl->PostSignalingTask( + [this, volume]() + { + if (mPeerConnection) + { + auto receivers = mPeerConnection->GetReceivers(); + + for (auto &receiver : receivers) + { + for (auto &stream : receiver->streams()) + { + for (auto &track : stream->GetAudioTracks()) + { + track->GetSource()->SetVolume(volume); + } + } + } + } + }); +} + +void LLWebRTCPeerConnectionImpl::setSendVolume(float volume) +{ + mWebRTCImpl->PostSignalingTask( + [this, volume]() + { + if (mLocalStream) + { + for (auto &track : mLocalStream->GetAudioTracks()) + { + track->GetSource()->SetVolume(volume*5.0); + } + } + }); +} + +// +// PeerConnectionObserver implementation. +// + +void LLWebRTCPeerConnectionImpl::OnAddTrack(rtc::scoped_refptr<webrtc::RtpReceiverInterface> receiver, + const std::vector<rtc::scoped_refptr<webrtc::MediaStreamInterface>> &streams) +{ + RTC_LOG(LS_INFO) << __FUNCTION__ << " " << receiver->id(); + webrtc::RtpParameters params; + webrtc::RtpCodecParameters codecparam; + codecparam.name = "opus"; + codecparam.kind = cricket::MEDIA_TYPE_AUDIO; + codecparam.clock_rate = 48000; + codecparam.num_channels = 2; + codecparam.parameters["stereo"] = "1"; + codecparam.parameters["sprop-stereo"] = "1"; + params.codecs.push_back(codecparam); + receiver->SetParameters(params); +} + +void LLWebRTCPeerConnectionImpl::OnRemoveTrack(rtc::scoped_refptr<webrtc::RtpReceiverInterface> receiver) +{ + RTC_LOG(LS_INFO) << __FUNCTION__ << " " << receiver->id(); +} + +void LLWebRTCPeerConnectionImpl::OnDataChannel(rtc::scoped_refptr<webrtc::DataChannelInterface> channel) +{ + if (mDataChannel) + { + mDataChannel->UnregisterObserver(); + } + mDataChannel = channel; + channel->RegisterObserver(this); +} + +void LLWebRTCPeerConnectionImpl::OnIceGatheringChange(webrtc::PeerConnectionInterface::IceGatheringState new_state) +{ + LLWebRTCSignalingObserver::EIceGatheringState webrtc_new_state = LLWebRTCSignalingObserver::EIceGatheringState::ICE_GATHERING_NEW; + switch (new_state) + { + case webrtc::PeerConnectionInterface::IceGatheringState::kIceGatheringNew: + webrtc_new_state = LLWebRTCSignalingObserver::EIceGatheringState::ICE_GATHERING_NEW; + break; + case webrtc::PeerConnectionInterface::IceGatheringState::kIceGatheringGathering: + webrtc_new_state = LLWebRTCSignalingObserver::EIceGatheringState::ICE_GATHERING_GATHERING; + break; + case webrtc::PeerConnectionInterface::IceGatheringState::kIceGatheringComplete: + webrtc_new_state = LLWebRTCSignalingObserver::EIceGatheringState::ICE_GATHERING_COMPLETE; + break; + default: + RTC_LOG(LS_ERROR) << __FUNCTION__ << " Bad Ice Gathering State" << new_state; + webrtc_new_state = LLWebRTCSignalingObserver::EIceGatheringState::ICE_GATHERING_NEW; + return; + } + + if (mAnswerReceived) + { + for (auto &observer : mSignalingObserverList) + { + observer->OnIceGatheringState(webrtc_new_state); + } + } +} + +// Called any time the PeerConnectionState changes. +void LLWebRTCPeerConnectionImpl::OnConnectionChange(webrtc::PeerConnectionInterface::PeerConnectionState new_state) +{ + RTC_LOG(LS_ERROR) << __FUNCTION__ << " Peer Connection State Change " << new_state; + + switch (new_state) + { + case webrtc::PeerConnectionInterface::PeerConnectionState::kConnected: + { + mWebRTCImpl->PostWorkerTask([this]() { + for (auto &observer : mSignalingObserverList) + { + observer->OnAudioEstablished(this); + } + }); + break; + } + case webrtc::PeerConnectionInterface::PeerConnectionState::kFailed: + case webrtc::PeerConnectionInterface::PeerConnectionState::kDisconnected: + { + for (auto &observer : mSignalingObserverList) + { + observer->OnRenegotiationNeeded(); + } + + break; + } + default: + { + break; + } + } +} + +// Convert an ICE candidate into a string appropriate for trickling +// to the Secondlife WebRTC server via the sim. +static std::string iceCandidateToTrickleString(const webrtc::IceCandidateInterface *candidate) +{ + std::ostringstream candidate_stream; + + candidate_stream << + candidate->candidate().foundation() << " " << + std::to_string(candidate->candidate().component()) << " " << + candidate->candidate().protocol() << " " << + std::to_string(candidate->candidate().priority()) << " " << + candidate->candidate().address().ipaddr().ToString() << " " << + candidate->candidate().address().PortAsString() << " typ "; + + if (candidate->candidate().type() == cricket::LOCAL_PORT_TYPE) + { + candidate_stream << "host"; + } + else if (candidate->candidate().type() == cricket::STUN_PORT_TYPE) + { + candidate_stream << "srflx " << + "raddr " << candidate->candidate().related_address().ipaddr().ToString() << " " << + "rport " << candidate->candidate().related_address().PortAsString(); + } + else if (candidate->candidate().type() == cricket::RELAY_PORT_TYPE) + { + candidate_stream << "relay " << + "raddr " << candidate->candidate().related_address().ipaddr().ToString() << " " << + "rport " << candidate->candidate().related_address().PortAsString(); + } + else if (candidate->candidate().type() == cricket::PRFLX_PORT_TYPE) + { + candidate_stream << "prflx " << + "raddr " << candidate->candidate().related_address().ipaddr().ToString() << " " << + "rport " << candidate->candidate().related_address().PortAsString(); + } + else { + RTC_LOG(LS_ERROR) << __FUNCTION__ << " Unknown candidate type " << candidate->candidate().type(); + } + if (candidate->candidate().protocol() == "tcp") + { + candidate_stream << " tcptype " << candidate->candidate().tcptype(); + } + + return candidate_stream.str(); +} + +// The webrtc library has a new ice candidate. +void LLWebRTCPeerConnectionImpl::OnIceCandidate(const webrtc::IceCandidateInterface *candidate) +{ + RTC_LOG(LS_INFO) << __FUNCTION__ << " " << candidate->sdp_mline_index(); + + if (!candidate) + { + RTC_LOG(LS_ERROR) << __FUNCTION__ << " No Ice Candidate Given"; + return; + } + if (mAnswerReceived) + { + // We've already received an answer SDP from the Secondlife WebRTC server + // so simply tell observers about our new ice candidate. + for (auto &observer : mSignalingObserverList) + { + LLWebRTCIceCandidate ice_candidate; + ice_candidate.mCandidate = iceCandidateToTrickleString(candidate); + ice_candidate.mMLineIndex = candidate->sdp_mline_index(); + ice_candidate.mSdpMid = candidate->sdp_mid(); + observer->OnIceCandidate(ice_candidate); + } + } + else + { + // As we've not yet received our answer, cache the candidate. + mCachedIceCandidates.push_back( + webrtc::CreateIceCandidate(candidate->sdp_mid(), + candidate->sdp_mline_index(), + candidate->candidate())); + } +} + +// +// CreateSessionDescriptionObserver implementation. +// +void LLWebRTCPeerConnectionImpl::OnSuccess(webrtc::SessionDescriptionInterface *desc) +{ + std::string sdp; + desc->ToString(&sdp); + RTC_LOG(LS_INFO) << sdp; + ; + // mangle the sdp as this is the only way currently to bump up + // the send audio rate to 48k + std::istringstream sdp_stream(sdp); + std::ostringstream sdp_mangled_stream; + std::string sdp_line; + std::string opus_payload; + while (std::getline(sdp_stream, sdp_line)) + { + int bandwidth = 0; + int payload_id = 0; + // force mono down, stereo up + if (std::sscanf(sdp_line.c_str(), "a=rtpmap:%i opus/%i/2", &payload_id, &bandwidth) == 2) + { + opus_payload = std::to_string(payload_id); + sdp_mangled_stream << "a=rtpmap:" << opus_payload << " opus/48000/2" << "\n"; + } + else if (sdp_line.find("a=fmtp:" + opus_payload) == 0) + { + sdp_mangled_stream << sdp_line << "a=fmtp:" << opus_payload + << " minptime=10;useinbandfec=1;stereo=1;sprop-stereo=1;maxplaybackrate=48000;sprop-maxplaybackrate=48000;sprop-maxcapturerate=48000\n"; + } + else + { + sdp_mangled_stream << sdp_line << "\n"; + } + } + + RTC_LOG(LS_INFO) << __FUNCTION__ << " Local SDP: " << sdp_mangled_stream.str(); + std::string mangled_sdp = sdp_mangled_stream.str(); + for (auto &observer : mSignalingObserverList) + { + observer->OnOfferAvailable(mangled_sdp); + } + + mPeerConnection->SetLocalDescription(std::unique_ptr<webrtc::SessionDescriptionInterface>( + webrtc::CreateSessionDescription(webrtc::SdpType::kOffer, mangled_sdp)), + rtc::scoped_refptr<webrtc::SetLocalDescriptionObserverInterface>(this)); + +} + +void LLWebRTCPeerConnectionImpl::OnFailure(webrtc::RTCError error) +{ + RTC_LOG(LS_ERROR) << ToString(error.type()) << ": " << error.message(); + for (auto &observer : mSignalingObserverList) + { + observer->OnRenegotiationNeeded(); + } +} + +// +// SetRemoteDescriptionObserverInterface implementation. +// +void LLWebRTCPeerConnectionImpl::OnSetRemoteDescriptionComplete(webrtc::RTCError error) +{ + // we've received an answer SDP from the sim. + + RTC_LOG(LS_INFO) << __FUNCTION__ << " " << mPeerConnection->signaling_state(); + if (!error.ok()) + { + RTC_LOG(LS_ERROR) << ToString(error.type()) << ": " << error.message(); + for (auto &observer : mSignalingObserverList) + { + observer->OnRenegotiationNeeded(); + } + return; + } + mAnswerReceived = true; + + // tell the observers about any cached ICE candidates. + for (auto &observer : mSignalingObserverList) + { + for (auto &candidate : mCachedIceCandidates) + { + LLWebRTCIceCandidate ice_candidate; + ice_candidate.mCandidate = iceCandidateToTrickleString(candidate.get()); + ice_candidate.mMLineIndex = candidate->sdp_mline_index(); + ice_candidate.mSdpMid = candidate->sdp_mid(); + observer->OnIceCandidate(ice_candidate); + } + } + mCachedIceCandidates.clear(); + if (mPeerConnection) + { + OnIceGatheringChange(mPeerConnection->ice_gathering_state()); + } + +} + +// +// SetLocalDescriptionObserverInterface implementation. +// +void LLWebRTCPeerConnectionImpl::OnSetLocalDescriptionComplete(webrtc::RTCError error) +{ +} + +// +// DataChannelObserver implementation +// + +void LLWebRTCPeerConnectionImpl::OnStateChange() +{ + if (!mDataChannel) + { + return; + } + RTC_LOG(LS_INFO) << __FUNCTION__ << " Data Channel State: " << webrtc::DataChannelInterface::DataStateString(mDataChannel->state()); + switch (mDataChannel->state()) + { + case webrtc::DataChannelInterface::kOpen: + RTC_LOG(LS_INFO) << __FUNCTION__ << " Data Channel State Open"; + for (auto &observer : mSignalingObserverList) + { + observer->OnDataChannelReady(this); + } + break; + case webrtc::DataChannelInterface::kConnecting: + RTC_LOG(LS_INFO) << __FUNCTION__ << " Data Channel State Connecting"; + break; + case webrtc::DataChannelInterface::kClosing: + RTC_LOG(LS_INFO) << __FUNCTION__ << " Data Channel State closing"; + break; + case webrtc::DataChannelInterface::kClosed: + RTC_LOG(LS_INFO) << __FUNCTION__ << " Data Channel State closed"; + break; + default: + break; + } +} + +void LLWebRTCPeerConnectionImpl::OnMessage(const webrtc::DataBuffer& buffer) +{ + std::string data((const char*)buffer.data.cdata(), buffer.size()); + for (auto &observer : mDataObserverList) + { + observer->OnDataReceived(data, buffer.binary); + } +} + +// +// LLWebRTCDataInterface +// + +void LLWebRTCPeerConnectionImpl::sendData(const std::string& data, bool binary) +{ + if (mDataChannel) + { + rtc::CopyOnWriteBuffer cowBuffer(data.data(), data.length()); + webrtc::DataBuffer buffer(cowBuffer, binary); + mWebRTCImpl->PostNetworkTask([this, buffer]() { + if (mDataChannel) + { + mDataChannel->Send(buffer); + } + }); + } +} + +void LLWebRTCPeerConnectionImpl::setDataObserver(LLWebRTCDataObserver* observer) +{ + mDataObserverList.emplace_back(observer); +} + +void LLWebRTCPeerConnectionImpl::unsetDataObserver(LLWebRTCDataObserver* observer) +{ + std::vector<LLWebRTCDataObserver *>::iterator it = + std::find(mDataObserverList.begin(), mDataObserverList.end(), observer); + if (it != mDataObserverList.end()) + { + mDataObserverList.erase(it); + } +} + +LLWebRTCImpl * gWebRTCImpl = nullptr; +LLWebRTCDeviceInterface * getDeviceInterface() +{ + return gWebRTCImpl; +} + +LLWebRTCPeerConnectionInterface* newPeerConnection() +{ + return gWebRTCImpl->newPeerConnection(); +} + +void freePeerConnection(LLWebRTCPeerConnectionInterface* peer_connection) +{ + gWebRTCImpl->freePeerConnection(peer_connection); +} + + +void init(LLWebRTCLogCallback* logCallback) +{ + gWebRTCImpl = new LLWebRTCImpl(logCallback); + gWebRTCImpl->init(); +} + +void terminate() +{ + if (gWebRTCImpl) + { + gWebRTCImpl->terminate(); + gWebRTCImpl = nullptr; + } +} + +} // namespace llwebrtc diff --git a/indra/llwebrtc/llwebrtc.h b/indra/llwebrtc/llwebrtc.h new file mode 100644 index 0000000000..c6fdb909dd --- /dev/null +++ b/indra/llwebrtc/llwebrtc.h @@ -0,0 +1,292 @@ +/** + * @file llwebrtc.h + * @brief WebRTC interface + * + * $LicenseInfo:firstyear=2023&license=viewerlgpl$ + * Second Life Viewer Source Code + * Copyright (C) 2023, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free tSoftware + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA + * $/LicenseInfo$ + */ + +/* + * llwebrtc wraps the native webrtc c++ library in a dynamic library with a simlified interface + * so that the viewer can use it. This is done because native webrtc has a different + * overall threading model than the viewer. + * The native webrtc library is also compiled with clang, and has memory management + * functions that conflict namespace-wise with those in the viewer. + * + * Due to these differences, code from the viewer cannot be pulled in to this + * dynamic library, so it remains very simple. + */ + +#ifndef LLWEBRTC_H +#define LLWEBRTC_H + +#include <string> +#include <vector> + +#ifdef LL_MAKEDLL +#ifdef WEBRTC_WIN +#define LLSYMEXPORT __declspec(dllexport) +#elif WEBRTC_LINUX +#define LLSYMEXPORT __attribute__((visibility("default"))) +#else +#define LLSYMEXPORT /**/ +#endif +#else +#define LLSYMEXPORT /**/ +#endif // LL_MAKEDLL + +namespace llwebrtc +{ + +class LLWebRTCLogCallback +{ +public: + typedef enum { + LOG_LEVEL_VERBOSE = 0, + LOG_LEVEL_INFO, + LOG_LEVEL_WARNING, + LOG_LEVEL_ERROR + } LogLevel; + + virtual void LogMessage(LogLevel level, const std::string& message) = 0; +}; + + +// LLWebRTCVoiceDevice is a simple representation of the +// components of a device, used to communicate this +// information to the viewer. + + +// A note on threading. +// Native WebRTC has it's own threading model. Some discussion +// can be found here (https://webrtc.github.io/webrtc-org/native-code/native-apis/) +// +// Note that all callbacks to observers will occurr on one of the WebRTC native threads +// (signaling, worker, etc.) Care should be taken to assure there are not +// bad interactions with the viewer threads. + +class LLWebRTCVoiceDevice +{ + public: + std::string mDisplayName; // friendly name for user interface purposes + std::string mID; // internal value for selection + + LLWebRTCVoiceDevice(const std::string &display_name, const std::string &id) : + mDisplayName(display_name), + mID(id) + { + if (mID.empty()) + { + mID = display_name; + } + }; +}; + +typedef std::vector<LLWebRTCVoiceDevice> LLWebRTCVoiceDeviceList; + + +// The LLWebRTCDeviceObserver should be implemented by the viewer +// webrtc module, which will receive notifications when devices +// change (are unplugged, etc.) +class LLWebRTCDevicesObserver +{ + public: + virtual void OnDevicesChanged(const LLWebRTCVoiceDeviceList &render_devices, + const LLWebRTCVoiceDeviceList &capture_devices) = 0; +}; + + +// The LLWebRTCDeviceInterface provides a way for the viewer +// to enumerate, set, and get notifications of changes +// for both capture (microphone) and render (speaker) +// devices. + +class LLWebRTCDeviceInterface +{ + public: + struct AudioConfig { + + bool mAGC { true }; + + bool mEchoCancellation { true }; + + // TODO: The various levels of noise suppression are configured + // on the APM which would require setting config on the APM. + // We should pipe the various values through + // later. + typedef enum { + NOISE_SUPPRESSION_LEVEL_NONE = 0, + NOISE_SUPPRESSION_LEVEL_LOW, + NOISE_SUPPRESSION_LEVEL_MODERATE, + NOISE_SUPPRESSION_LEVEL_HIGH, + NOISE_SUPPRESSION_LEVEL_VERY_HIGH + } ENoiseSuppressionLevel; + ENoiseSuppressionLevel mNoiseSuppressionLevel { NOISE_SUPPRESSION_LEVEL_VERY_HIGH }; + }; + + virtual void setAudioConfig(AudioConfig config) = 0; + + // instructs webrtc to refresh the device list. + virtual void refreshDevices() = 0; + + // set the capture and render devices using the unique identifier for the device + virtual void setCaptureDevice(const std::string& id) = 0; + virtual void setRenderDevice(const std::string& id) = 0; + + // Device observers for device change callbacks. + virtual void setDevicesObserver(LLWebRTCDevicesObserver *observer) = 0; + virtual void unsetDevicesObserver(LLWebRTCDevicesObserver *observer) = 0; + + // tuning and audio levels + virtual void setTuningMode(bool enable) = 0; + virtual float getTuningAudioLevel() = 0; // for use during tuning + virtual float getPeerConnectionAudioLevel() = 0; // for use when not tuning + virtual void setPeerConnectionGain(float gain) = 0; +}; + +// LLWebRTCAudioInterface provides the viewer with a way +// to set audio characteristics (mute, send and receive volume) +class LLWebRTCAudioInterface +{ + public: + virtual void setMute(bool mute) = 0; + virtual void setReceiveVolume(float volume) = 0; // volume between 0.0 and 1.0 + virtual void setSendVolume(float volume) = 0; // volume between 0.0 and 1.0 +}; + +// LLWebRTCDataObserver allows the viewer voice module to be notified when +// data is received over the data channel. +class LLWebRTCDataObserver +{ +public: + virtual void OnDataReceived(const std::string& data, bool binary) = 0; +}; + +// LLWebRTCDataInterface allows the viewer to send data over the data channel. +class LLWebRTCDataInterface +{ +public: + + virtual void sendData(const std::string& data, bool binary=false) = 0; + + virtual void setDataObserver(LLWebRTCDataObserver *observer) = 0; + virtual void unsetDataObserver(LLWebRTCDataObserver *observer) = 0; +}; + +// LLWebRTCIceCandidate is a basic structure containing +// information needed for ICE trickling. +struct LLWebRTCIceCandidate +{ + std::string mCandidate; + std::string mSdpMid; + int mMLineIndex; +}; + +// LLWebRTCSignalingObserver provides a way for the native +// webrtc library to notify the viewer voice module of +// various state changes. +class LLWebRTCSignalingObserver +{ + public: + + typedef enum e_ice_gathering_state { + ICE_GATHERING_NEW, + ICE_GATHERING_GATHERING, + ICE_GATHERING_COMPLETE + } EIceGatheringState; + + // Called when ICE gathering states have changed. + // This may be called at any time, as ICE gathering + // can be redone while a connection is up. + virtual void OnIceGatheringState(EIceGatheringState state) = 0; + + // Called when a new ice candidate is available. + virtual void OnIceCandidate(const LLWebRTCIceCandidate& candidate) = 0; + + // Called when an offer is available after a connection is requested. + virtual void OnOfferAvailable(const std::string& sdp) = 0; + + // Called when a connection enters a failure state and renegotiation is needed. + virtual void OnRenegotiationNeeded() = 0; + + // Called when a peer connection has shut down + virtual void OnPeerConnectionClosed() = 0; + + // Called when the audio channel has been established and audio + // can begin. + virtual void OnAudioEstablished(LLWebRTCAudioInterface *audio_interface) = 0; + + // Called when the data channel has been established and data + // transfer can begin. + virtual void OnDataChannelReady(LLWebRTCDataInterface *data_interface) = 0; +}; + +// LLWebRTCPeerConnectionInterface representsd a connection to a peer, +// in most cases a Secondlife WebRTC server. This interface +// allows for management of this peer connection. +class LLWebRTCPeerConnectionInterface +{ + public: + + struct InitOptions + { + // equivalent of PeerConnectionInterface::IceServer + struct IceServers { + + // Valid formats are described in RFC7064 and RFC7065. + // Urls should containe dns hostnames (not IP addresses) + // as the TLS certificate policy is 'secure.' + // and we do not currentply support TLS extensions. + std::vector<std::string> mUrls; + std::string mUserName; + std::string mPassword; + }; + + std::vector<IceServers> mServers; + }; + + virtual bool initializeConnection(const InitOptions& options) = 0; + virtual bool shutdownConnection() = 0; + + virtual void setSignalingObserver(LLWebRTCSignalingObserver* observer) = 0; + virtual void unsetSignalingObserver(LLWebRTCSignalingObserver* observer) = 0; + + virtual void AnswerAvailable(const std::string &sdp) = 0; +}; + +// The following define the dynamic linked library +// exports. + +// This library must be initialized before use. +LLSYMEXPORT void init(LLWebRTCLogCallback* logSink); + +// And should be terminated as part of shutdown. +LLSYMEXPORT void terminate(); + +// Return an interface for device management. +LLSYMEXPORT LLWebRTCDeviceInterface* getDeviceInterface(); + +// Allocate and free peer connections. +LLSYMEXPORT LLWebRTCPeerConnectionInterface* newPeerConnection(); +LLSYMEXPORT void freePeerConnection(LLWebRTCPeerConnectionInterface *connection); +} + +#endif // LLWEBRTC_H diff --git a/indra/llwebrtc/llwebrtc_impl.h b/indra/llwebrtc/llwebrtc_impl.h new file mode 100644 index 0000000000..bc9b7762dd --- /dev/null +++ b/indra/llwebrtc/llwebrtc_impl.h @@ -0,0 +1,442 @@ +/** + * @file llwebrtc_impl.h + * @brief WebRTC dynamic library implementation header + * + * $LicenseInfo:firstyear=2023&license=viewerlgpl$ + * Second Life Viewer Source Code + * Copyright (C) 2023, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA + * $/LicenseInfo$ + */ + +#ifndef LLWEBRTC_IMPL_H +#define LLWEBRTC_IMPL_H + +#define LL_MAKEDLL +#if defined(_WIN32) || defined(_WIN64) +#define WEBRTC_WIN 1 +#elif defined(__APPLE__) +#define WEBRTC_MAC 1 +#define WEBRTC_POSIX 1 +#elif __linux__ +#define WEBRTC_LINUX 1 +#define WEBRTC_POSIX 1 +#endif + +#include "llwebrtc.h" +// WebRTC Includes +#ifdef WEBRTC_WIN +#pragma warning(disable : 4996) // ignore 'deprecated.' We don't use the functions marked + // deprecated in the webrtc headers, but msvc complains anyway. + // Clang doesn't, and that's generally what webrtc uses. +#pragma warning(disable : 4068) // ignore 'invalid pragma.' There are clang pragma's in + // the webrtc headers, which msvc doesn't recognize. +#endif // WEBRTC_WIN + +#include "api/scoped_refptr.h" +#include "rtc_base/ref_count.h" +#include "rtc_base/ref_counted_object.h" +#include "rtc_base/ssl_adapter.h" +#include "rtc_base/thread.h" +#include "api/peer_connection_interface.h" +#include "api/media_stream_interface.h" +#include "api/create_peerconnection_factory.h" +#include "modules/audio_device/include/audio_device.h" +#include "modules/audio_device/include/audio_device_data_observer.h" +#include "rtc_base/task_queue.h" +#include "api/task_queue/task_queue_factory.h" +#include "api/task_queue/default_task_queue_factory.h" +#include "modules/audio_device/include/audio_device_defines.h" + + +namespace llwebrtc +{ + +class LLWebRTCPeerConnectionImpl; + +class LLWebRTCLogSink : public rtc::LogSink { +public: + LLWebRTCLogSink(LLWebRTCLogCallback* callback) : + mCallback(callback) + { + } + + // Destructor: close the log file + ~LLWebRTCLogSink() override + { + } + + void OnLogMessage(const std::string& msg, + rtc::LoggingSeverity severity) override + { + if (mCallback) + { + switch(severity) + { + case rtc::LS_VERBOSE: + mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg); + break; + case rtc::LS_INFO: + mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg); + break; + case rtc::LS_WARNING: + mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg); + break; + case rtc::LS_ERROR: + mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg); + break; + default: + break; + } + } + } + + void OnLogMessage(const std::string& message) override + { + if (mCallback) + { + mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, message); + } + } + +private: + LLWebRTCLogCallback* mCallback; +}; + +// Implements a class allowing capture of audio data +// to determine audio level of the microphone. +class LLAudioDeviceObserver : public webrtc::AudioDeviceDataObserver +{ + public: + LLAudioDeviceObserver(); + + // Retrieve the RMS audio loudness + float getMicrophoneEnergy(); + + // Data retrieved from the caputure device is + // passed in here for processing. + void OnCaptureData(const void *audio_samples, + const size_t num_samples, + const size_t bytes_per_sample, + const size_t num_channels, + const uint32_t samples_per_sec) override; + + // This is for data destined for the render device. + // not currently used. + void OnRenderData(const void *audio_samples, + const size_t num_samples, + const size_t bytes_per_sample, + const size_t num_channels, + const uint32_t samples_per_sec) override; + + protected: + static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing (30 frames) + float mSumVector[NUM_PACKETS_TO_FILTER]; + float mMicrophoneEnergy; +}; + +// Used to process/retrieve audio levels after +// all of the processing (AGC, AEC, etc.) for display in-world to the user. +class LLCustomProcessor : public webrtc::CustomProcessing +{ + public: + LLCustomProcessor(); + ~LLCustomProcessor() override {} + + // (Re-) Initializes the submodule. + void Initialize(int sample_rate_hz, int num_channels) override; + + // Analyzes the given capture or render signal. + void Process(webrtc::AudioBuffer *audio) override; + + // Returns a string representation of the module state. + std::string ToString() const override { return ""; } + + float getMicrophoneEnergy() { return mMicrophoneEnergy; } + + void setGain(float gain) { mGain = gain; } + + protected: + static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing + int mSampleRateHz; + int mNumChannels; + + float mSumVector[NUM_PACKETS_TO_FILTER]; + float mMicrophoneEnergy; + float mGain; +}; + + +// Primary singleton implementation for interfacing +// with the native webrtc library. +class LLWebRTCImpl : public LLWebRTCDeviceInterface +#if __x86_64__ && !__FreeBSD__ + , public webrtc::AudioDeviceSink +#endif +{ + public: + LLWebRTCImpl(LLWebRTCLogCallback* logCallback); + ~LLWebRTCImpl() + { + delete mLogSink; + } + + void init(); + void terminate(); + + // + // LLWebRTCDeviceInterface + // + + void setAudioConfig(LLWebRTCDeviceInterface::AudioConfig config = LLWebRTCDeviceInterface::AudioConfig()) override; + + void refreshDevices() override; + + void setDevicesObserver(LLWebRTCDevicesObserver *observer) override; + void unsetDevicesObserver(LLWebRTCDevicesObserver *observer) override; + + void setCaptureDevice(const std::string& id) override; + void setRenderDevice(const std::string& id) override; + + void setTuningMode(bool enable) override; + float getTuningAudioLevel() override; + float getPeerConnectionAudioLevel() override; + + void setPeerConnectionGain(float gain) override; + + // + // AudioDeviceSink + // + void OnDevicesUpdated() +#if __x86_64__ && !__FreeBSD__ + override +#endif + ; + + // + // Helpers + // + + // The following thread helpers allow the + // LLWebRTCPeerConnectionImpl class to post + // tasks to the native webrtc threads. + void PostWorkerTask(absl::AnyInvocable<void() &&> task, + const webrtc::Location& location = webrtc::Location::Current()) + { + mWorkerThread->PostTask(std::move(task), location); + } + + void PostSignalingTask(absl::AnyInvocable<void() &&> task, + const webrtc::Location& location = webrtc::Location::Current()) + { + mSignalingThread->PostTask(std::move(task), location); + } + + void PostNetworkTask(absl::AnyInvocable<void() &&> task, + const webrtc::Location& location = webrtc::Location::Current()) + { + mNetworkThread->PostTask(std::move(task), location); + } + + void WorkerBlockingCall(rtc::FunctionView<void()> functor, + const webrtc::Location& location = webrtc::Location::Current()) + { + mWorkerThread->BlockingCall(std::move(functor), location); + } + + void SignalingBlockingCall(rtc::FunctionView<void()> functor, + const webrtc::Location& location = webrtc::Location::Current()) + { + mSignalingThread->BlockingCall(std::move(functor), location); + } + + void NetworkBlockingCall(rtc::FunctionView<void()> functor, + const webrtc::Location& location = webrtc::Location::Current()) + { + mNetworkThread->BlockingCall(std::move(functor), location); + } + + // Allows the LLWebRTCPeerConnectionImpl class to retrieve the + // native webrtc PeerConnectionFactory. + rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> getPeerConnectionFactory() + { + return mPeerConnectionFactory; + } + + // create or destroy a peer connection. + LLWebRTCPeerConnectionInterface* newPeerConnection(); + void freePeerConnection(LLWebRTCPeerConnectionInterface* peer_connection); + + // enables/disables capture via the capture device + void setRecording(bool recording); + + protected: + LLWebRTCLogSink* mLogSink; + + // The native webrtc threads + std::unique_ptr<rtc::Thread> mNetworkThread; + std::unique_ptr<rtc::Thread> mWorkerThread; + std::unique_ptr<rtc::Thread> mSignalingThread; + + // The factory that allows creation of native webrtc PeerConnections. + rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> mPeerConnectionFactory; + + rtc::scoped_refptr<webrtc::AudioProcessing> mAudioProcessingModule; + + // more native webrtc stuff + std::unique_ptr<webrtc::TaskQueueFactory> mTaskQueueFactory; + + + // Devices + void updateDevices(); + rtc::scoped_refptr<webrtc::AudioDeviceModule> mTuningDeviceModule; + rtc::scoped_refptr<webrtc::AudioDeviceModule> mPeerDeviceModule; + std::vector<LLWebRTCDevicesObserver *> mVoiceDevicesObserverList; + + // accessors in native webrtc for devices aren't apparently implemented yet. + bool mTuningMode; + int32_t mRecordingDevice; + LLWebRTCVoiceDeviceList mRecordingDeviceList; + + int32_t mPlayoutDevice; + LLWebRTCVoiceDeviceList mPlayoutDeviceList; + + bool mMute; + + LLAudioDeviceObserver * mTuningAudioDeviceObserver; + LLCustomProcessor * mPeerCustomProcessor; + + // peer connections + std::vector<rtc::scoped_refptr<LLWebRTCPeerConnectionImpl>> mPeerConnections; +}; + + +// The implementation of a peer connection, which contains +// the various interfaces used by the viewer to interact with +// the webrtc connection. +class LLWebRTCPeerConnectionImpl : public LLWebRTCPeerConnectionInterface, + public LLWebRTCAudioInterface, + public LLWebRTCDataInterface, + public webrtc::PeerConnectionObserver, + public webrtc::CreateSessionDescriptionObserver, + public webrtc::SetRemoteDescriptionObserverInterface, + public webrtc::SetLocalDescriptionObserverInterface, + public webrtc::DataChannelObserver + +{ + public: + LLWebRTCPeerConnectionImpl(); + ~LLWebRTCPeerConnectionImpl(); + + void init(LLWebRTCImpl * webrtc_impl); + void terminate(); + + virtual void AddRef() const override = 0; + virtual rtc::RefCountReleaseStatus Release() const override = 0; + + // + // LLWebRTCPeerConnection + // + bool initializeConnection(const InitOptions& options) override; + bool shutdownConnection() override; + + void setSignalingObserver(LLWebRTCSignalingObserver *observer) override; + void unsetSignalingObserver(LLWebRTCSignalingObserver *observer) override; + void AnswerAvailable(const std::string &sdp) override; + + // + // LLWebRTCAudioInterface + // + void setMute(bool mute) override; + void setReceiveVolume(float volume) override; // volume between 0.0 and 1.0 + void setSendVolume(float volume) override; // volume between 0.0 and 1.0 + + // + // LLWebRTCDataInterface + // + void sendData(const std::string& data, bool binary=false) override; + void setDataObserver(LLWebRTCDataObserver *observer) override; + void unsetDataObserver(LLWebRTCDataObserver *observer) override; + + // + // PeerConnectionObserver implementation. + // + + void OnSignalingChange(webrtc::PeerConnectionInterface::SignalingState new_state) override {} + void OnAddTrack(rtc::scoped_refptr<webrtc::RtpReceiverInterface> receiver, + const std::vector<rtc::scoped_refptr<webrtc::MediaStreamInterface>> &streams) override; + void OnRemoveTrack(rtc::scoped_refptr<webrtc::RtpReceiverInterface> receiver) override; + void OnDataChannel(rtc::scoped_refptr<webrtc::DataChannelInterface> channel) override; + void OnRenegotiationNeeded() override {} + void OnIceConnectionChange(webrtc::PeerConnectionInterface::IceConnectionState new_state) override {}; + void OnIceGatheringChange(webrtc::PeerConnectionInterface::IceGatheringState new_state) override; + void OnIceCandidate(const webrtc::IceCandidateInterface *candidate) override; + void OnIceConnectionReceivingChange(bool receiving) override {} + void OnConnectionChange(webrtc::PeerConnectionInterface::PeerConnectionState new_state) override; + + // + // CreateSessionDescriptionObserver implementation. + // + void OnSuccess(webrtc::SessionDescriptionInterface *desc) override; + void OnFailure(webrtc::RTCError error) override; + + // + // SetRemoteDescriptionObserverInterface implementation. + // + void OnSetRemoteDescriptionComplete(webrtc::RTCError error) override; + + // + // SetLocalDescriptionObserverInterface implementation. + // + void OnSetLocalDescriptionComplete(webrtc::RTCError error) override; + + // + // DataChannelObserver implementation. + // + void OnStateChange() override; + void OnMessage(const webrtc::DataBuffer& buffer) override; + + // Helpers + void resetMute(); + void enableSenderTracks(bool enable); + void enableReceiverTracks(bool enable); + + protected: + + LLWebRTCImpl * mWebRTCImpl; + + rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> mPeerConnectionFactory; + + bool mMute; + + // signaling + std::vector<LLWebRTCSignalingObserver *> mSignalingObserverList; + std::vector<std::unique_ptr<webrtc::IceCandidateInterface>> mCachedIceCandidates; + bool mAnswerReceived; + + rtc::scoped_refptr<webrtc::PeerConnectionInterface> mPeerConnection; + rtc::scoped_refptr<webrtc::MediaStreamInterface> mLocalStream; + + // data + std::vector<LLWebRTCDataObserver *> mDataObserverList; + rtc::scoped_refptr<webrtc::DataChannelInterface> mDataChannel; +}; + +} + +#endif // LLWEBRTC_IMPL_H diff --git a/indra/newview/CMakeLists.txt b/indra/newview/CMakeLists.txt index 19ecfe425c..b79ccee934 100644 --- a/indra/newview/CMakeLists.txt +++ b/indra/newview/CMakeLists.txt @@ -718,6 +718,7 @@ set(viewer_SOURCE_FILES llvoiceclient.cpp llvoicevisualizer.cpp llvoicevivox.cpp + llvoicewebrtc.cpp llvoinventorylistener.cpp llvopartgroup.cpp llvosky.cpp @@ -745,6 +746,10 @@ set(viewer_SOURCE_FILES pipeline.cpp ) +if (CMAKE_SYSTEM_NAME MATCHES FreeBSD) + list(REMOVE_ITEM viewer_SOURCE_FILES llvoicewebrtc.cpp) +endif () + set(VIEWER_BINARY_NAME "secondlife-bin" CACHE STRING "The name of the viewer executable to create.") @@ -1375,6 +1380,7 @@ set(viewer_HEADER_FILES llvoiceclient.h llvoicevisualizer.h llvoicevivox.h + llvoicewebrtc.h llvoinventorylistener.h llvopartgroup.h llvosky.h @@ -1476,6 +1482,7 @@ if (USESYSTEMLIBS AND NOT DARWIN) endif (USESYSTEMLIBS AND NOT DARWIN) if (WINDOWS) + list(APPEND viewer_SOURCE_FILES llappviewerwin32.cpp llwindebug.cpp @@ -1755,6 +1762,7 @@ if (WINDOWS) ${SHARED_LIB_STAGING_DIR}/openjp2.dll ${SHARED_LIB_STAGING_DIR}/libhunspell.dll ${SHARED_LIB_STAGING_DIR}/uriparser.dll + ${SHARED_LIB_STAGING_DIR}/llwebrtc.dll #${SHARED_LIB_STAGING_DIR}/${LL_INTDIR}/SLVoice.exe #${SHARED_LIB_STAGING_DIR}/${LL_INTDIR}/libsndfile-1.dll #${SHARED_LIB_STAGING_DIR}/${LL_INTDIR}/vivoxoal.dll @@ -1821,13 +1829,14 @@ if (WINDOWS) DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/viewer_manifest.py stage_third_party_libs + llwebrtc ${COPY_INPUT_DEPENDENCIES} COMMENT "Performing viewer_manifest copy" ) add_custom_target(copy_w_viewer_manifest ALL DEPENDS ${CMAKE_CFG_INTDIR}/copy_touched.bat) - add_dependencies(${VIEWER_BINARY_NAME} stage_third_party_libs llcommon copy_w_viewer_manifest) + add_dependencies(${VIEWER_BINARY_NAME} stage_third_party_libs llcommon llwebrtc copy_w_viewer_manifest) if (EXISTS ${CMAKE_SOURCE_DIR}/copy_win_scripts) add_dependencies(${VIEWER_BINARY_NAME} copy_win_scripts) @@ -1960,6 +1969,10 @@ target_link_libraries(${VIEWER_BINARY_NAME} ll::tracy ) +if (NOT CMAKE_SYSTEM_NAME MATCHES FreeBSD) + target_link_libraries(${VIEWER_BINARY_NAME} llwebrtc ) +endif () + if (ENABLE_MEDIA_PLUGINS) target_link_libraries(${VIEWER_BINARY_NAME} ll::libvlc ) if (DARWIN OR LINUX) @@ -2278,8 +2291,6 @@ if (DARWIN) set(VIEWER_APP_BUNDLE "${CMAKE_CURRENT_BINARY_DIR}/$<IF:$<BOOL:${LL_GENERATOR_IS_MULTI_CONFIG}>,$<CONFIG>,>/${product}.app") set(VIEWER_APP_EXE "${VIEWER_APP_BUNDLE}/Contents/MacOS/${product}") - set(VIEWER_APP_DSYM "${VIEWER_APP_EXE}.dSYM") - set(VIEWER_APP_XCARCHIVE "${VIEWER_APP_BUNDLE}/../${product}.xcarchive.zip") configure_file( "${CMAKE_CURRENT_SOURCE_DIR}/Info-SecondLife.plist" @@ -2392,73 +2403,51 @@ if (PACKAGE AND USESYSTEMLIBS) include(CPack) endif (PACKAGE AND USESYSTEMLIBS) -# Note that the conventional VIEWER_SYMBOL_FILE is set by ../../build.sh if (PACKAGE AND (RELEASE_CRASH_REPORTING OR NON_RELEASE_CRASH_REPORTING) AND VIEWER_SYMBOL_FILE) if (USE_BUGSPLAT) # BugSplat symbol-file generation if (WINDOWS) - # Just pack up a tarball containing only the .pdb file for the - # executable. Because we intend to use cygwin tar, we must render - # VIEWER_SYMBOL_FILE in cygwin path syntax. - execute_process(COMMAND "cygpath" "-u" "${VIEWER_SYMBOL_FILE}" - OUTPUT_VARIABLE VIEWER_SYMBOL_FILE_CYGWIN - OUTPUT_STRIP_TRAILING_WHITESPACE) - execute_process(COMMAND "cygpath" "-u" "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}" - OUTPUT_VARIABLE PARENT_DIRECTORY_CYGWIN - OUTPUT_STRIP_TRAILING_WHITESPACE) - add_custom_command(OUTPUT "${VIEWER_SYMBOL_FILE}" - # Use of 'tar ...j' here assumes VIEWER_SYMBOL_FILE endswith .tar.xz; - # testing a string suffix is painful enough in CMake language that - # we'll continue assuming it until forced to generalize. + set(VIEWER_APP_SYMBOLS_ARCHIVE "${SYMBOLS_STAGING_DIR}.sym.tar.xz") + set_target_properties( ${VIEWER_BINARY_NAME} PROPERTIES PDB_OUTPUT_DIRECTORY "${SYMBOLS_STAGING_DIR}") + + # Just pack up a tarball containing only the .pdb files for the + # executables. + add_custom_command(OUTPUT "${VIEWER_APP_SYMBOLS_ARCHIVE}" COMMAND "tar" ARGS "cJf" - "${VIEWER_SYMBOL_FILE_CYGWIN}" - "-C" - "${PARENT_DIRECTORY_CYGWIN}" - "secondlife-bin.pdb" - DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/secondlife-bin.pdb" - COMMENT "Packing viewer PDB into ${VIEWER_SYMBOL_FILE_CYGWIN}" + "${VIEWER_CHANNEL}.sym.tar.xz" + "${VIEWER_CHANNEL}" + DEPENDS "${VIEWER_BINARY_NAME}" llwebrtc + WORKING_DIRECTORY "${SYMBOLS_STAGING_DIR}/.." + COMMENT "Packing viewer PDBs into ${VIEWER_APP_SYMBOLS_ARCHIVE}" ) - add_custom_target(generate_symbols DEPENDS "${VIEWER_SYMBOL_FILE}" ${VIEWER_BINARY_NAME}) - add_dependencies(generate_symbols ${VIEWER_BINARY_NAME}) + add_custom_target(generate_symbols DEPENDS "${VIEWER_APP_SYMBOLS_ARCHIVE}") + add_dependencies(generate_symbols ${VIEWER_BINARY_NAME} llwebrtc) + endif (WINDOWS) if (DARWIN) - # Have to run dsymutil first, then pack up the resulting .dSYM directory - add_custom_command(OUTPUT "${VIEWER_APP_DSYM}" - COMMAND "dsymutil" - ARGS - ${VIEWER_APP_EXE} - COMMENT "Generating ${VIEWER_APP_DSYM}" - ) - add_custom_target(dsym_generate DEPENDS "${VIEWER_APP_DSYM}") - add_dependencies(dsym_generate ${VIEWER_BINARY_NAME}) + set(VIEWER_APP_XCARCHIVE "${SYMBOLS_STAGING_DIR}.xcarchive.zip") + + # we only need an xcarchive with dSYMs (including the application) + set_target_properties(${VIEWER_BINARY_NAME} + PROPERTIES + XCODE_ATTRIBUTE_DEBUG_INFORMATION_FORMAT "dwarf-with-dsym" + XCODE_ATTRIBUTE_DWARF_DSYM_FOLDER_PATH "${SYMBOLS_STAGING_DIR}/dSYMs") + add_custom_command(OUTPUT "${VIEWER_APP_XCARCHIVE}" - COMMAND "zip" - ARGS - "-r" - "${VIEWER_APP_XCARCHIVE}" - "." - WORKING_DIRECTORY "${VIEWER_APP_DSYM}/.." - DEPENDS "${VIEWER_APP_DSYM}" - COMMENT "Generating xcarchive.zip for upload to BugSplat" - ) - add_custom_target(dsym_xcarchive DEPENDS "${VIEWER_APP_XCARCHIVE}") - add_dependencies(dsym_xcarchive dsym_generate) - # Have to create a stamp file, and depend on it, to force CMake to run - # the cleanup step. - add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/dsym.stamp" - COMMAND rm -rf "${VIEWER_APP_DSYM}" - COMMAND touch "${CMAKE_CURRENT_BINARY_DIR}/dsym.stamp" - DEPENDS "${VIEWER_APP_XCARCHIVE}" - COMMENT "Cleaning up dSYM" - ) + COMMAND "zip" + ARGS + "-r" + "${VIEWER_APP_XCARCHIVE}" + "${VIEWER_CHANNEL}" + WORKING_DIRECTORY "${SYMBOLS_STAGING_DIR}/.." + DEPENDS "${VIEWER_BINARY_NAME}" llwebrtc + COMMENT "Generating ${VIEWER_APP_XCARCHIVE} for upload to BugSplat" + ) add_custom_target(generate_symbols DEPENDS - "${VIEWER_APP_DSYM}" "${VIEWER_APP_XCARCHIVE}" - "${CMAKE_CURRENT_BINARY_DIR}/dsym.stamp" ) - add_dependencies(generate_symbols dsym_xcarchive) endif (DARWIN) if (LINUX) # TBD diff --git a/indra/newview/app_settings/settings.xml b/indra/newview/app_settings/settings.xml index e03fc429bf..13253b339e 100644 --- a/indra/newview/app_settings/settings.xml +++ b/indra/newview/app_settings/settings.xml @@ -379,7 +379,7 @@ <key>Value</key> <integer>0</integer> </map> - <key>AutoAcceptNewInventory</key> + <key>AutoAcceptNewInventory</key> <map> <key>Comment</key> <string>Automatically accept new notecards/textures/landmarks</string> @@ -13232,6 +13232,39 @@ <key>Value</key> <integer>44125</integer> </map> + <key>VoiceEchoCancellation</key> + <map> + <key>Comment</key> + <string>Voice Echo Cancellation</string> + <key>Persist</key> + <integer>1</integer> + <key>Type</key> + <string>Boolean</string> + <key>Value</key> + <integer>1</integer> + </map> + <key>VoiceAutomaticGainControl</key> + <map> + <key>Comment</key> + <string>Voice Automatic Gain Control</string> + <key>Persist</key> + <integer>1</integer> + <key>Type</key> + <string>Boolean</string> + <key>Value</key> + <integer>1</integer> + </map> + <key>VoiceNoiseSuppressionLevel</key> + <map> + <key>Comment</key> + <string>Voice Noise Suppression Level</string> + <key>Persist</key> + <integer>1</integer> + <key>Type</key> + <string>U32</string> + <key>Value</key> + <integer>4</integer> + </map> <key>WarningsAsChat</key> <map> <key>Comment</key> @@ -13246,13 +13279,13 @@ <key>VoiceServerType</key> <map> <key>Comment</key> - <string>The type of voice server to connect to.</string> + <string>The type of voice server to use for group, conference, and p2p calls.</string> <key>Persist</key> <integer>0</integer> <key>Type</key> <string>String</string> <key>Value</key> - <string>vivox</string> + <string/> </map> <key>WLSkyDetail</key> <map> diff --git a/indra/newview/licenses-mac.txt b/indra/newview/licenses-mac.txt index ac06498555..a3792f0b6b 100644 --- a/indra/newview/licenses-mac.txt +++ b/indra/newview/licenses-mac.txt @@ -764,3 +764,21 @@ sse2neon * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ + +=============================== +libwebrtc binaries (unofficial) +=============================== + +Copyright 2019 Zenichi Amano + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/indra/newview/llagent.cpp b/indra/newview/llagent.cpp index 6db7088812..68136c8396 100644 --- a/indra/newview/llagent.cpp +++ b/indra/newview/llagent.cpp @@ -310,7 +310,7 @@ bool LLAgent::isActionAllowed(const LLSD& sdname) } else { - allow_agent_voice = channel->isActive() && channel->callStarted(); + allow_agent_voice = channel->isActive(); } } @@ -4081,10 +4081,6 @@ bool LLAgent::teleportCore(bool is_local) } make_ui_sound("UISndTeleportOut"); - // MBW -- Let the voice client know a teleport has begun so it can leave the existing channel. - // This was breaking the case of teleporting within a single sim. Backing it out for now. -// LLVoiceClient::getInstance()->leaveChannel(); - return true; } diff --git a/indra/newview/llappviewer.cpp b/indra/newview/llappviewer.cpp index f1c0c104db..c6a7ecdaa1 100644 --- a/indra/newview/llappviewer.cpp +++ b/indra/newview/llappviewer.cpp @@ -3404,14 +3404,14 @@ LLSD LLAppViewer::getViewerInfo() const LLVoiceVersionInfo version = LLVoiceClient::getInstance()->getVersion(); const std::string build_version = version.mBuildVersion; std::ostringstream version_string; - if (std::equal(build_version.begin(), build_version.begin() + version.serverVersion.size(), + if (std::equal(version.mBuildVersion.begin(), version.mBuildVersion.begin() + version.serverVersion.size(), version.serverVersion.begin())) { // Normal case: Show type and build version. - version_string << version.serverType << " " << build_version << std::endl; + version_string << version.voiceServerType << " " << version.mBuildVersion << std::endl; } else { // Mismatch: Show both versions. - version_string << version.serverVersion << "/" << build_version << std::endl; + version_string << version.voiceServerType << " " << version.serverVersion << "/" << version.mBuildVersion << std::endl; } info["VOICE_VERSION"] = version_string.str(); } @@ -5195,7 +5195,7 @@ void LLAppViewer::sendLogoutRequest() if(LLVoiceClient::instanceExists()) { - LLVoiceClient::getInstance()->leaveChannel(); + LLVoiceClient::getInstance()->setVoiceEnabled(false); } } } diff --git a/indra/newview/llappviewer.h b/indra/newview/llappviewer.h index 27e1d32a38..4f18e722f2 100644 --- a/indra/newview/llappviewer.h +++ b/indra/newview/llappviewer.h @@ -43,6 +43,7 @@ #ifndef LL_LLAPPVIEWER_H #define LL_LLAPPVIEWER_H +#include "llapp.h" #include "llallocator.h" #include "llapr.h" #include "llcontrol.h" diff --git a/indra/newview/llavataractions.cpp b/indra/newview/llavataractions.cpp index f9c5f42de7..2857235dd2 100644 --- a/indra/newview/llavataractions.cpp +++ b/indra/newview/llavataractions.cpp @@ -241,7 +241,7 @@ static void on_avatar_name_cache_start_call(const LLUUID& agent_id, const LLAvatarName& av_name) { std::string name = av_name.getDisplayName(); - LLUUID session_id = gIMMgr->addSession(name, IM_NOTHING_SPECIAL, agent_id, true); + LLUUID session_id = gIMMgr->addSession(name, IM_NOTHING_SPECIAL, agent_id, LLSD()); if (session_id != LLUUID::null) { gIMMgr->startCall(session_id); @@ -277,8 +277,7 @@ void LLAvatarActions::startAdhocCall(const uuid_vec_t& ids, const LLUUID& floate // create the new ad hoc voice session const std::string title = LLTrans::getString("conference-title"); - LLUUID session_id = gIMMgr->addSession(title, IM_SESSION_CONFERENCE_START, - ids[0], id_array, true, floater_id); + LLUUID session_id = gIMMgr->addSession(title, IM_SESSION_CONFERENCE_START, ids[0], id_array, LLSD(), floater_id); if (session_id == LLUUID::null) { return; @@ -322,7 +321,7 @@ void LLAvatarActions::startConference(const uuid_vec_t& ids, const LLUUID& float id_array.push_back(*it); } const std::string title = LLTrans::getString("conference-title"); - LLUUID session_id = gIMMgr->addSession(title, IM_SESSION_CONFERENCE_START, ids[0], id_array, false, floater_id); + LLUUID session_id = gIMMgr->addSession(title, IM_SESSION_CONFERENCE_START, ids[0], id_array, LLSD(), floater_id); if (session_id == LLUUID::null) { diff --git a/indra/newview/llconversationmodel.cpp b/indra/newview/llconversationmodel.cpp index 4335168417..4cd85ac756 100644 --- a/indra/newview/llconversationmodel.cpp +++ b/indra/newview/llconversationmodel.cpp @@ -357,22 +357,20 @@ void LLConversationItemSession::clearParticipants() void LLConversationItemSession::clearAndDeparentModels() { - std::for_each(mChildren.begin(), mChildren.end(), - [](LLFolderViewModelItem* c) + for (LLFolderViewModelItem* child : mChildren) + { + if (child->getNumRefs() == 0) { - if (c->getNumRefs() == 0) - { - // LLConversationItemParticipant can be created but not assigned to any view, - // it was waiting for an "add_participant" event to be processed - delete c; - } - else - { - // Model is still assigned to some view/widget - c->setParent(NULL); - } + // LLConversationItemParticipant can be created but not assigned to any view, + // it was waiting for an "add_participant" event to be processed + delete child; } - ); + else + { + // Model is still assigned to some view/widget + child->setParent(NULL); + } + } mChildren.clear(); } diff --git a/indra/newview/llconversationview.cpp b/indra/newview/llconversationview.cpp index 1abebc996b..03eb9f0652 100644 --- a/indra/newview/llconversationview.cpp +++ b/indra/newview/llconversationview.cpp @@ -57,7 +57,7 @@ public: : conversation(conv) {} - virtual void onChange(EStatusType status, const std::string &channelURI, bool proximal) + virtual void onChange(EStatusType status, const LLSD& channelInfo, bool proximal) { conversation->showVoiceIndicator(conversation && status != STATUS_JOINING @@ -98,10 +98,7 @@ LLConversationViewSession::~LLConversationViewSession() if (mVoiceClientObserver) { - if (LLVoiceClient::instanceExists()) - { - LLVoiceClient::getInstance()->removeObserver(mVoiceClientObserver); - } + LLVoiceClient::removeObserver(mVoiceClientObserver); delete mVoiceClientObserver; } @@ -259,16 +256,15 @@ BOOL LLConversationViewSession::postBuild() icon->setVisible(true); mSpeakingIndicator->setSpeakerId(gAgentID, LLUUID::null, true); mIsInActiveVoiceChannel = true; - if(LLVoiceClient::instanceExists()) + + if (mVoiceClientObserver) { - if (mVoiceClientObserver) - { - LLVoiceClient::getInstance()->removeObserver(mVoiceClientObserver); - delete mVoiceClientObserver; - } - mVoiceClientObserver = new LLNearbyVoiceClientStatusObserver(this); - LLVoiceClient::getInstance()->addObserver(mVoiceClientObserver); + LLVoiceClient::removeObserver(mVoiceClientObserver); + delete mVoiceClientObserver; } + mVoiceClientObserver = new LLNearbyVoiceClientStatusObserver(this); + LLVoiceClient::addObserver(mVoiceClientObserver); + break; } default: diff --git a/indra/newview/llenvironment.cpp b/indra/newview/llenvironment.cpp index 3c0a523317..5170e9a68b 100644 --- a/indra/newview/llenvironment.cpp +++ b/indra/newview/llenvironment.cpp @@ -54,8 +54,6 @@ #include "llregioninfomodel.h" -#include <boost/make_shared.hpp> - #include "llatmosphere.h" #include "llagent.h" #include "roles_constants.h" diff --git a/indra/newview/lleventpoll.cpp b/indra/newview/lleventpoll.cpp index 471bb6d478..c05a7fef44 100644 --- a/indra/newview/lleventpoll.cpp +++ b/indra/newview/lleventpoll.cpp @@ -40,8 +40,6 @@ #include "llcorehttputil.h" #include "lleventfilter.h" -#include "boost/make_shared.hpp" - namespace LLEventPolling { namespace Details diff --git a/indra/newview/llflexibleobject.cpp b/indra/newview/llflexibleobject.cpp index 7ed62d68bd..009b9e2829 100644 --- a/indra/newview/llflexibleobject.cpp +++ b/indra/newview/llflexibleobject.cpp @@ -775,7 +775,7 @@ BOOL LLVolumeImplFlexible::doUpdateGeometry(LLDrawable *drawable) } } - if (volume->mDrawable.isNull()) + if (volume->mDrawable.isNull() || volume->mDrawable->isDead()) { return TRUE; // No update to complete } diff --git a/indra/newview/llfloatereditenvironmentbase.cpp b/indra/newview/llfloatereditenvironmentbase.cpp index 109c7a286c..e63032c0d8 100644 --- a/indra/newview/llfloatereditenvironmentbase.cpp +++ b/indra/newview/llfloatereditenvironmentbase.cpp @@ -28,8 +28,6 @@ #include "llfloatereditenvironmentbase.h" -#include <boost/make_shared.hpp> - // libs #include "llnotifications.h" #include "llnotificationsutil.h" diff --git a/indra/newview/llfloaterfixedenvironment.cpp b/indra/newview/llfloaterfixedenvironment.cpp index ad9f83701c..fb26f1b872 100644 --- a/indra/newview/llfloaterfixedenvironment.cpp +++ b/indra/newview/llfloaterfixedenvironment.cpp @@ -28,8 +28,6 @@ #include "llfloaterfixedenvironment.h" -#include <boost/make_shared.hpp> - // libs #include "llbutton.h" #include "llnotifications.h" diff --git a/indra/newview/llfloaterimsession.cpp b/indra/newview/llfloaterimsession.cpp index a2d1cb7036..f954e72376 100644 --- a/indra/newview/llfloaterimsession.cpp +++ b/indra/newview/llfloaterimsession.cpp @@ -94,6 +94,7 @@ LLFloaterIMSession::LLFloaterIMSession(const LLUUID& session_id) mEnableCallbackRegistrar.add("Avatar.EnableGearItem", boost::bind(&LLFloaterIMSession::enableGearMenuItem, this, _2)); mCommitCallbackRegistrar.add("Avatar.GearDoToSelected", boost::bind(&LLFloaterIMSession::GearDoToSelected, this, _2)); mEnableCallbackRegistrar.add("Avatar.CheckGearItem", boost::bind(&LLFloaterIMSession::checkGearMenuItem, this, _2)); + mVoiceChannelChanged = LLVoiceChannel::setCurrentVoiceChannelChangedCallback(boost::bind(&LLFloaterIMSession::onVoiceChannelChanged, this, _1)); setDocked(true); } @@ -286,12 +287,12 @@ void LLFloaterIMSession::sendMsg(const std::string& msg) LLFloaterIMSession::~LLFloaterIMSession() { mVoiceChannelStateChangeConnection.disconnect(); - if(LLVoiceClient::instanceExists()) - { - LLVoiceClient::getInstance()->removeObserver(this); - } + + LLVoiceClient::removeObserver(this); LLTransientFloaterMgr::getInstance()->removeControlView(LLTransientFloaterMgr::IM, this); + + mVoiceChannelChanged.disconnect(); } @@ -363,7 +364,7 @@ BOOL LLFloaterIMSession::postBuild() childSetAction("voice_call_btn", boost::bind(&LLFloaterIMSession::onCallButtonClicked, this)); - LLVoiceClient::getInstance()->addObserver(this); + LLVoiceClient::addObserver(this); //*TODO if session is not initialized yet, add some sort of a warning message like "starting session...blablabla" //see LLFloaterIMPanel for how it is done (IB) @@ -521,11 +522,20 @@ void LLFloaterIMSession::sendParticipantsAddedNotification(const uuid_vec_t& uui sendMsg(getString(uuids.size() > 1 ? "multiple_participants_added" : "participant_added", args)); } +void LLFloaterIMSession::onVoiceChannelChanged(const LLUUID &session_id) +{ + if (session_id == mSessionID) + { + boundVoiceChannel(); + } +} + void LLFloaterIMSession::boundVoiceChannel() { LLVoiceChannel* voice_channel = LLIMModel::getInstance()->getVoiceChannel(mSessionID); if(voice_channel) { + mVoiceChannelStateChangeConnection.disconnect(); mVoiceChannelStateChangeConnection = voice_channel->setStateChangedCallback( boost::bind(&LLFloaterIMSession::onVoiceChannelStateChanged, this, _1, _2)); @@ -552,7 +562,7 @@ void LLFloaterIMSession::onCallButtonClicked() } } -void LLFloaterIMSession::onChange(EStatusType status, const std::string &channelURI, bool proximal) +void LLFloaterIMSession::onChange(EStatusType status, const LLSD& channelInfo, bool proximal) { if(status != STATUS_JOINING && status != STATUS_LEFT_CHANNEL) { diff --git a/indra/newview/llfloaterimsession.h b/indra/newview/llfloaterimsession.h index 7be68ccfc7..5180b0cee1 100644 --- a/indra/newview/llfloaterimsession.h +++ b/indra/newview/llfloaterimsession.h @@ -114,8 +114,7 @@ public: // Implements LLVoiceClientStatusObserver::onChange() to enable the call // button when voice is available - void onChange(EStatusType status, const std::string &channelURI, - bool proximal); + void onChange(EStatusType status, const LLSD& channelInfo, bool proximal); virtual LLTransientFloaterMgr::ETransientGroup getGroup() { return LLTransientFloaterMgr::IM; } virtual void onVoiceChannelStateChanged( @@ -162,6 +161,8 @@ private: void onCallButtonClicked(); + void onVoiceChannelChanged(const LLUUID &session_id); + void boundVoiceChannel(); // Add the "User is typing..." indicator. @@ -196,6 +197,9 @@ private: uuid_vec_t mInvitedParticipants; uuid_vec_t mPendingParticipants; + // notification when the voice channel is swapped out from beneath us. + boost::signals2::connection mVoiceChannelChanged; + // connection to voice channel state change signal boost::signals2::connection mVoiceChannelStateChangeConnection; diff --git a/indra/newview/llfloaterimsessiontab.cpp b/indra/newview/llfloaterimsessiontab.cpp index dc64d09f9f..081288fae5 100644 --- a/indra/newview/llfloaterimsessiontab.cpp +++ b/indra/newview/llfloaterimsessiontab.cpp @@ -102,6 +102,26 @@ LLFloaterIMSessionTab::LLFloaterIMSessionTab(const LLSD& session_id) LLFloaterIMSessionTab::~LLFloaterIMSessionTab() { delete mRefreshTimer; + + LLFloaterIMContainer* im_container = LLFloaterIMContainer::findInstance(); + if (im_container) + { + LLParticipantList* session = dynamic_cast<LLParticipantList*>(im_container->getSessionModel(mSessionID)); + if (session) + { + for (const conversations_widgets_map::value_type& widget_pair : mConversationsWidgets) + { + LLFolderViewItem* widget = widget_pair.second; + LLFolderViewModelItem* item_vmi = widget->getViewModelItem(); + if (item_vmi && item_vmi->getNumRefs() == 1) + { + // This is the last pointer, remove participant from session + // before participant gets deleted on destroyView. + session->removeChild(item_vmi); + } + } + } + } } // static @@ -526,6 +546,12 @@ void LLFloaterIMSessionTab::closeFloater(bool app_quitting) super::closeFloater(app_quitting); } +void LLFloaterIMSessionTab::deleteAllChildren() +{ + super::deleteAllChildren(); + mVoiceButton = NULL; +} + std::string LLFloaterIMSessionTab::appendTime() { std::string timeStr = "[" + LLTrans::getString("TimeHour") + "]:" @@ -663,6 +689,27 @@ void LLFloaterIMSessionTab::removeConversationViewParticipant(const LLUUID& part LLFolderViewItem* widget = get_ptr_in_map(mConversationsWidgets,participant_id); if (widget) { + LLFolderViewModelItem* item_vmi = widget->getViewModelItem(); + if (item_vmi && item_vmi->getNumRefs() == 1) + { + // This is the last pointer, remove participant from session + // before participant gets deleted on destroyView. + // + // Floater (widget) and participant's view can simultaneously + // co-own the model, in which case view is responsible for + // the deletion and floater is free to clear and recreate + // the list, yet there are cases where only widget owns + // the pointer so it should do the cleanup. + // See "add_participant". + // + // Todo: If it keeps causing issues turn participants + // into LLPointers in the session + LLParticipantList* session = getParticipantList(); + if (session) + { + session->removeChild(item_vmi); + } + } widget->destroyView(); } mConversationsWidgets.erase(participant_id); diff --git a/indra/newview/llfloaterimsessiontab.h b/indra/newview/llfloaterimsessiontab.h index 00b43f499b..006b92e54b 100644 --- a/indra/newview/llfloaterimsessiontab.h +++ b/indra/newview/llfloaterimsessiontab.h @@ -82,6 +82,7 @@ public: /*virtual*/ void setVisible(BOOL visible); /*virtual*/ void setFocus(BOOL focus); /*virtual*/ void closeFloater(bool app_quitting = false); + /*virtual*/ void deleteAllChildren(); // Handle the left hand participant list widgets void addConversationViewParticipant(LLConversationItem* item, bool update_view = true); diff --git a/indra/newview/llgroupactions.cpp b/indra/newview/llgroupactions.cpp index 24ae90e3ae..58cb1a614d 100644 --- a/indra/newview/llgroupactions.cpp +++ b/indra/newview/llgroupactions.cpp @@ -248,7 +248,7 @@ void LLGroupActions::startCall(const LLUUID& group_id) return; } - LLUUID session_id = gIMMgr->addSession(gdata.mName, IM_SESSION_GROUP_START, group_id, true); + LLUUID session_id = gIMMgr->addSession(gdata.mName, IM_SESSION_GROUP_START, group_id, LLSD()); if (session_id == LLUUID::null) { LL_WARNS() << "Error adding session" << LL_ENDL; @@ -370,8 +370,8 @@ void LLGroupActions::processLeaveGroupDataResponse(const LLUUID group_id) if (gdatap->mMembershipFee > 0) { args["COST"] = gdatap->mMembershipFee; - LLNotificationsUtil::add("GroupLeaveConfirmMember", args, payload, onLeaveGroup); - } + LLNotificationsUtil::add("GroupLeaveConfirmMember", args, payload, onLeaveGroup); +} else { LLNotificationsUtil::add("GroupLeaveConfirmMemberNoFee", args, payload, onLeaveGroup); diff --git a/indra/newview/llimview.cpp b/indra/newview/llimview.cpp index 1cc76d7268..c9c70c95fe 100644 --- a/indra/newview/llimview.cpp +++ b/indra/newview/llimview.cpp @@ -89,8 +89,24 @@ const S32 XL8_PADDING = 3; // XL8_START_TAG.size() + XL8_END_TAG.size() /** Timeout of outgoing session initialization (in seconds) */ const static U32 SESSION_INITIALIZATION_TIMEOUT = 30; -void startConfrenceCoro(std::string url, LLUUID tempSessionId, LLUUID creatorId, LLUUID otherParticipantId, LLSD agents); -void chatterBoxInvitationCoro(std::string url, LLUUID sessionId, LLIMMgr::EInvitationType invitationType); +// This enum corresponds to the sim's and adds P2P_CHAT_SESSION, +// as webrtc uses the multiagent chat mechanism for p2p calls, +// instead of relying on vivox calling. +// Don't change this without consulting a server developer. +enum EMultiAgentChatSessionType +{ + GROUP_CHAT_SESSION = 0, + CONFERENCE_SESSION = 1, + P2P_CHAT_SESSION = 2, + SESSION_TYPE_COUNT +}; + + +void startConferenceCoro(std::string url, LLUUID tempSessionId, LLUUID creatorId, LLUUID otherParticipantId, LLSD agents); + +void startP2PVoiceCoro(std::string url, LLUUID tempSessionId, LLUUID creatorId, LLUUID otherParticipantId); + +void chatterBoxInvitationCoro(std::string url, LLUUID sessionId, LLIMMgr::EInvitationType invitationType, const LLSD& voiceChannelInfo); void chatterBoxHistoryCoro(std::string url, LLUUID sessionId, std::string from, std::string message, U32 timestamp); void start_deprecated_conference_chat(const LLUUID& temp_session_id, const LLUUID& creator_id, const LLUUID& other_participant_id, const LLSD& agents_to_invite); @@ -139,7 +155,7 @@ void process_dnd_im(const LLSD& notification) name, IM_NOTHING_SPECIAL, fromID, - false, + LLSD(), false); //will need slight refactor to retrieve whether offline message or not (assume online for now) } @@ -398,7 +414,7 @@ void on_new_message(const LLSD& msg) notify_of_message(msg, false); } -void startConfrenceCoro(std::string url, +void startConferenceCoro(std::string url, LLUUID tempSessionId, LLUUID creatorId, LLUUID otherParticipantId, LLSD agents) { LLCore::HttpRequest::policy_t httpPolicy(LLCore::HttpRequest::DEFAULT_POLICY_ID); @@ -410,6 +426,16 @@ void startConfrenceCoro(std::string url, postData["method"] = "start conference"; postData["session-id"] = tempSessionId; postData["params"] = agents; + LLSD altParams; + std::string voice_server_type = gSavedSettings.getString("VoiceServerType"); + if (voice_server_type.empty()) + { + // default to the server type associated with the region we're on. + LLVoiceVersionInfo versionInfo = LLVoiceClient::getInstance()->getVersion(); + voice_server_type = versionInfo.internalVoiceServerType; + } + altParams["voice_server_type"] = voice_server_type; + postData["alt_params"] = altParams; LLSD result = httpAdapter->postAndSuspend(httpRequest, url, postData); @@ -439,7 +465,46 @@ void startConfrenceCoro(std::string url, } } -void chatterBoxInvitationCoro(std::string url, LLUUID sessionId, LLIMMgr::EInvitationType invitationType) +void startP2PVoiceCoro(std::string url, LLUUID sessionID, LLUUID creatorId, LLUUID otherParticipantId) +{ + LLCore::HttpRequest::policy_t httpPolicy(LLCore::HttpRequest::DEFAULT_POLICY_ID); + LLCoreHttpUtil::HttpCoroutineAdapter::ptr_t httpAdapter(new LLCoreHttpUtil::HttpCoroutineAdapter("StartP2PVoiceCoro", httpPolicy)); + LLCore::HttpRequest::ptr_t httpRequest(new LLCore::HttpRequest); + + LLSD postData; + postData["method"] = "start p2p voice"; + postData["session-id"] = sessionID; + postData["params"] = otherParticipantId; + LLSD altParams; + std::string voice_server_type = gSavedSettings.getString("VoiceServerType"); + if (voice_server_type.empty()) + { + // default to the server type associated with the region we're on. + LLVoiceVersionInfo versionInfo = LLVoiceClient::getInstance()->getVersion(); + voice_server_type = versionInfo.internalVoiceServerType; + } + altParams["voice_server_type"] = voice_server_type; + postData["alt_params"] = altParams; + + LLSD result = httpAdapter->postAndSuspend(httpRequest, url, postData); + + LLSD httpResults = result[LLCoreHttpUtil::HttpCoroutineAdapter::HTTP_RESULTS]; + LLCore::HttpStatus status = LLCoreHttpUtil::HttpCoroutineAdapter::getStatusFromLLSD(httpResults); + + if (!status) + { + LL_WARNS("LLIMModel") << "Failed to start p2p session:" << postData << "->" << result << LL_ENDL; + // try an "old school" way. + // *TODO: What about other error status codes? 4xx 5xx? + if (status == LLCore::HttpStatus(HTTP_BAD_REQUEST)) + { + static const std::string error_string("session_does_not_exist_error"); + gIMMgr->showSessionStartError(error_string, sessionID); + } + } +} + +void chatterBoxInvitationCoro(std::string url, LLUUID sessionId, LLIMMgr::EInvitationType invitationType, const LLSD& voiceChannelInfo) { LLCore::HttpRequest::policy_t httpPolicy(LLCore::HttpRequest::DEFAULT_POLICY_ID); LLCoreHttpUtil::HttpCoroutineAdapter::ptr_t @@ -505,7 +570,7 @@ void chatterBoxInvitationCoro(std::string url, LLUUID sessionId, LLIMMgr::EInvit if (LLIMMgr::INVITATION_TYPE_VOICE == invitationType) { - gIMMgr->startCall(sessionId, LLVoiceChannel::INCOMING_CALL); + gIMMgr->startCall(sessionId, LLVoiceChannel::INCOMING_CALL, voiceChannelInfo); } if ((invitationType == LLIMMgr::INVITATION_TYPE_VOICE @@ -651,7 +716,13 @@ LLIMModel::LLIMModel() LLCallDialogManager::instance(); } -LLIMModel::LLIMSession::LLIMSession(const LLUUID& session_id, const std::string& name, const EInstantMessage& type, const LLUUID& other_participant_id, const uuid_vec_t& ids, bool voice, bool has_offline_msg) +LLIMModel::LLIMSession::LLIMSession(const LLUUID& session_id, + const std::string& name, + const EInstantMessage& type, + const LLUUID& other_participant_id, + const LLSD& voice_channel_info, + const uuid_vec_t& ids, + bool has_offline_msg) : mSessionID(session_id), mName(name), mType(type), @@ -661,43 +732,30 @@ LLIMModel::LLIMSession::LLIMSession(const LLUUID& session_id, const std::string& mOtherParticipantID(other_participant_id), mInitialTargetIDs(ids), mVoiceChannel(NULL), + mP2PAsAdhocCall(false), mSpeakers(NULL), mSessionInitialized(false), mCallBackEnabled(true), mTextIMPossible(true), mStartCallOnInitialize(false), - mStartedAsIMCall(voice), + mStartedAsIMCall(!voice_channel_info.isUndefined()), mIsDNDsend(false), mAvatarNameCacheConnection() { // set P2P type by default - mSessionType = P2P_SESSION; + mSessionType = P2P_SESSION; if (IM_NOTHING_SPECIAL == mType || IM_SESSION_P2P_INVITE == mType) { - mVoiceChannel = new LLVoiceChannelP2P(session_id, name, other_participant_id); + mP2PAsAdhocCall = (LLVoiceClient::getInstance()->getOutgoingCallInterface(voice_channel_info) == NULL); } else { - mVoiceChannel = new LLVoiceChannelGroup(session_id, name); - // determine whether it is group or conference session - if (gAgent.isInGroup(mSessionID)) - { - mSessionType = GROUP_SESSION; - } - else - { - mSessionType = ADHOC_SESSION; - } + mSessionType = gAgent.isInGroup(mSessionID) ? GROUP_SESSION : ADHOC_SESSION; } - if(mVoiceChannel) - { - mVoiceChannelStateChangeConnection = mVoiceChannel->setStateChangedCallback(boost::bind(&LLIMSession::onVoiceChannelStateChanged, this, _1, _2, _3)); - } - - mSpeakers = new LLIMSpeakerMgr(mVoiceChannel); + initVoiceChannel(voice_channel_info); // All participants will be added to the list of people we've recently interacted with. @@ -707,8 +765,7 @@ LLIMModel::LLIMSession::LLIMSession(const LLUUID& session_id, const std::string& //we need to wait for session initialization for outgoing ad-hoc and group chat session //correct session id for initiated ad-hoc chat will be received from the server - if (!LLIMModel::getInstance()->sendStartSession(mSessionID, mOtherParticipantID, - mInitialTargetIDs, mType)) + if (!LLIMModel::getInstance()->sendStartSession(mSessionID, mOtherParticipantID, mInitialTargetIDs, mType, mP2PAsAdhocCall)) { //we don't need to wait for any responses //so we're already initialized @@ -738,6 +795,73 @@ LLIMModel::LLIMSession::LLIMSession(const LLUUID& session_id, const std::string& } } +void LLIMModel::LLIMSession::initVoiceChannel(const LLSD& voiceChannelInfo) +{ + + if (mVoiceChannel) + { + if (mVoiceChannel->isThisVoiceChannel(voiceChannelInfo)) + { + return; + } + mVoiceChannelStateChangeConnection.disconnect(); + + mVoiceChannel->deactivate(); + + delete mVoiceChannel; + mVoiceChannel = NULL; + } + mP2PAsAdhocCall = false; + if (IM_NOTHING_SPECIAL == mType || IM_SESSION_P2P_INVITE == mType) + { + LLVoiceP2POutgoingCallInterface *outgoingInterface = LLVoiceClient::getInstance()->getOutgoingCallInterface(voiceChannelInfo); + + if (outgoingInterface) + { + // only use LLVoiceChannelP2P if the provider can handle the special P2P interface, + // which uses the voice server to relay calls and invites. Otherwise, + // we use the group voice provider. + mVoiceChannel = new LLVoiceChannelP2P(mSessionID, mName, mOtherParticipantID, outgoingInterface); + } + else + { + mP2PAsAdhocCall = true; + mVoiceChannel = new LLVoiceChannelGroup(mSessionID, mName, true); + } + } + else + { + // determine whether it is group or conference session + if (mSessionType == GROUP_SESSION) + { + mSessionType = GROUP_SESSION; + mVoiceChannel = new LLVoiceChannelGroup(mSessionID, mName, false); + } + else if (mSessionType == ADHOC_SESSION) + { + mSessionType = ADHOC_SESSION; + mVoiceChannel = new LLVoiceChannelGroup(mSessionID, mName, false); + } + else + { + LL_WARNS("Voice") << "Invalid Session Type when initializing voice channel: " << mSessionType << LL_ENDL; + return; + } + } + + mVoiceChannelStateChangeConnection = + mVoiceChannel->setStateChangedCallback(boost::bind(&LLIMSession::onVoiceChannelStateChanged, this, _1, _2, _3)); + + if (!mSpeakers) + { + mSpeakers = new LLIMSpeakerMgr(mVoiceChannel); + } + else + { + mSpeakers->setVoiceChannel(mVoiceChannel); + } +} + void LLIMModel::LLIMSession::onAdHocNameCache(const LLAvatarName& av_name) { mAvatarNameCacheConnection.disconnect(); @@ -841,7 +965,7 @@ void LLIMModel::LLIMSession::onVoiceChannelStateChanged(const LLVoiceChannel::ES break; } // Update speakers list when connected - if (LLVoiceChannel::STATE_CONNECTED == new_state) + if (mSpeakers && LLVoiceChannel::STATE_CONNECTED == new_state) { mSpeakers->update(true); } @@ -857,22 +981,6 @@ LLIMModel::LLIMSession::~LLIMSession() delete mSpeakers; mSpeakers = NULL; - // End the text IM session if necessary - if(LLVoiceClient::getInstance() && mOtherParticipantID.notNull()) - { - switch(mType) - { - case IM_NOTHING_SPECIAL: - case IM_SESSION_P2P_INVITE: - LLVoiceClient::getInstance()->endUserIMSession(mOtherParticipantID); - break; - - default: - // Appease the linux compiler - break; - } - } - mVoiceChannelStateChangeConnection.disconnect(); // HAVE to do this here -- if it happens in the LLVoiceChannel destructor it will call the wrong version (since the object's partially deconstructed at that point). @@ -889,7 +997,10 @@ void LLIMModel::LLIMSession::sessionInitReplyReceived(const LLUUID& new_session_ if (new_session_id != mSessionID) { mSessionID = new_session_id; - mVoiceChannel->updateSessionID(new_session_id); + if (mVoiceChannel) + { + mVoiceChannel->updateSessionID(new_session_id); + } } } @@ -1447,7 +1558,7 @@ void LLIMModel::testMessages() //session name should not be empty bool LLIMModel::newSession(const LLUUID& session_id, const std::string& name, const EInstantMessage& type, - const LLUUID& other_participant_id, const uuid_vec_t& ids, bool voice, bool has_offline_msg) + const LLUUID& other_participant_id, const uuid_vec_t& ids, const LLSD& voiceChannelInfo, bool has_offline_msg) { if (name.empty()) { @@ -1461,7 +1572,7 @@ bool LLIMModel::newSession(const LLUUID& session_id, const std::string& name, co return false; } - LLIMSession* session = new LLIMSession(session_id, name, type, other_participant_id, ids, voice, has_offline_msg); + LLIMSession *session = new LLIMSession(session_id, name, type, other_participant_id, voiceChannelInfo, ids, has_offline_msg); mId2SessionMap[session_id] = session; // When notifying observer, name of session is used instead of "name", because they may not be the @@ -1473,11 +1584,11 @@ bool LLIMModel::newSession(const LLUUID& session_id, const std::string& name, co } -bool LLIMModel::newSession(const LLUUID& session_id, const std::string& name, const EInstantMessage& type, const LLUUID& other_participant_id, bool voice, bool has_offline_msg) +bool LLIMModel::newSession(const LLUUID& session_id, const std::string& name, const EInstantMessage& type, const LLUUID& other_participant_id, const LLSD& voiceChannelInfo, bool has_offline_msg) { uuid_vec_t ids; ids.push_back(other_participant_id); - return newSession(session_id, name, type, other_participant_id, ids, voice, has_offline_msg); + return newSession(session_id, name, type, other_participant_id, ids, voiceChannelInfo, has_offline_msg); } bool LLIMModel::clearSession(const LLUUID& session_id) @@ -1725,7 +1836,7 @@ EInstantMessage LLIMModel::getType(const LLUUID& session_id) const return session->mType; } -LLVoiceChannel* LLIMModel::getVoiceChannel( const LLUUID& session_id ) const +LLVoiceChannel* LLIMModel::getVoiceChannel( const LLUUID& session_id) const { LLIMSession* session = findIMSession(session_id); if (!session) @@ -2026,7 +2137,8 @@ bool LLIMModel::sendStartSession( const LLUUID& temp_session_id, const LLUUID& other_participant_id, const uuid_vec_t& ids, - EInstantMessage dialog) + EInstantMessage dialog, + bool p2p_as_adhoc_call) { if ( dialog == IM_SESSION_GROUP_START ) { @@ -2042,7 +2154,7 @@ bool LLIMModel::sendStartSession( return true; } - else if ( dialog == IM_SESSION_CONFERENCE_START ) + else if (dialog == IM_SESSION_CONFERENCE_START ) { LLSD agents; for (int i = 0; i < (S32) ids.size(); i++) @@ -2057,8 +2169,8 @@ bool LLIMModel::sendStartSession( std::string url = region->getCapability( "ChatSessionRequest"); - LLCoros::instance().launch("startConfrenceCoro", - boost::bind(&startConfrenceCoro, url, + LLCoros::instance().launch("startConferenceCoro", + boost::bind(&startConferenceCoro, url, temp_session_id, gAgent.getID(), other_participant_id, agents)); } else @@ -2073,7 +2185,16 @@ bool LLIMModel::sendStartSession( //we also need to wait for reply from the server in case of ad-hoc chat (we'll get new session id) return true; } - + else if (p2p_as_adhoc_call && ((dialog == IM_SESSION_P2P_INVITE) || (dialog == IM_NOTHING_SPECIAL))) + { + LLViewerRegion *region = gAgent.getRegion(); + if (region) + { + std::string url = region->getCapability("ChatSessionRequest"); + LLCoros::instance().launch("startP2PVoiceCoro", boost::bind(&startP2PVoiceCoro, url, temp_session_id, gAgent.getID(), other_participant_id)); + } + return true; + } return false; } @@ -2317,6 +2438,12 @@ void LLCallDialogManager::onVoiceChannelStateChangedInt(const LLVoiceChannel::ES } break; + case LLVoiceChannel::STATE_NO_CHANNEL_INFO : + // This will happen in p2p calls using the adhoc + // infrastructure, which marks the channel as no channel info + // after the call is closed, which forces a dialogue. + return; + case LLVoiceChannel::STATE_HUNG_UP: // this state is coming before session is changed break; @@ -2659,15 +2786,15 @@ bool is_voice_call_type(const std::string &value) } LLIncomingCallDialog::LLIncomingCallDialog(const LLSD& payload) : -LLCallDialog(payload), -mAvatarNameCacheConnection() + LLCallDialog(payload), + mAvatarNameCacheConnection() { } void LLIncomingCallDialog::onLifetimeExpired() { - std::string session_handle = mPayload["session_handle"].asString(); - if (LLVoiceClient::getInstance()->isValidChannel(session_handle)) + LLVoiceP2PIncomingCallInterfacePtr call = LLVoiceClient::getInstance()->getIncomingCallInterface(mPayload["voice_channel_info"]); + if (call) { // restart notification's timer if call is still valid mLifetimeTimer.start(); @@ -2679,7 +2806,7 @@ void LLIncomingCallDialog::onLifetimeExpired() LLUUID session_id = mPayload["session_id"].asUUID(); gIMMgr->clearPendingAgentListUpdates(session_id); gIMMgr->clearPendingInvitation(session_id); - closeFloater(); + LLIncomingCallDialog::onReject(this); } } @@ -2858,16 +2985,17 @@ void LLIncomingCallDialog::processCallResponse(S32 response, const LLSD &payload { if (type == IM_SESSION_P2P_INVITE) { + if (session_name.empty()) + { + session_name = payload["caller_name"].asString(); + } // create a normal IM session session_id = gIMMgr->addP2PSession( - session_name, - caller_id, - payload["session_handle"].asString(), - payload["session_uri"].asString()); + session_name, caller_id, payload["voice_channel_info"]); if (voice) { - gIMMgr->startCall(session_id, LLVoiceChannel::INCOMING_CALL); + gIMMgr->startCall(session_id, LLVoiceChannel::INCOMING_CALL, payload["voice_channel_info"]); } else { @@ -2913,7 +3041,7 @@ void LLIncomingCallDialog::processCallResponse(S32 response, const LLSD &payload } } - gIMMgr->addSession(correct_session_name, type, session_id, true); + gIMMgr->addSession(correct_session_name, type, session_id, payload["voice_channel_info"]); std::string url = gAgent.getRegion()->getCapability( "ChatSessionRequest"); @@ -2921,8 +3049,7 @@ void LLIncomingCallDialog::processCallResponse(S32 response, const LLSD &payload if (voice) { LLCoros::instance().launch("chatterBoxInvitationCoro", - boost::bind(&chatterBoxInvitationCoro, url, - session_id, inv_type)); + boost::bind(&chatterBoxInvitationCoro, url, session_id, inv_type, payload["voice_channel_info"])); // send notification message to the corresponding chat if (payload["notify_box_type"].asString() == "VoiceInviteGroup" || payload["notify_box_type"].asString() == "VoiceInviteAdHoc") @@ -2943,114 +3070,48 @@ void LLIncomingCallDialog::processCallResponse(S32 response, const LLSD &payload { if (type == IM_SESSION_P2P_INVITE) { - if(LLVoiceClient::getInstance()) - { - std::string s = payload["session_handle"].asString(); - LLVoiceClient::getInstance()->declineInvite(s); - } - } - else - { - std::string url = gAgent.getRegion()->getCapability( - "ChatSessionRequest"); - - LLSD data; - data["method"] = "decline invitation"; - data["session-id"] = session_id; - - LLCoreHttpUtil::HttpCoroutineAdapter::messageHttpPost(url, data, - "Invitation declined", - "Invitation decline failed."); - } - } - - gIMMgr->clearPendingAgentListUpdates(session_id); - gIMMgr->clearPendingInvitation(session_id); - } -} - -bool inviteUserResponse(const LLSD& notification, const LLSD& response) -{ - if (!gIMMgr) - return false; - - const LLSD& payload = notification["payload"]; - LLUUID session_id = payload["session_id"].asUUID(); - EInstantMessage type = (EInstantMessage)payload["type"].asInteger(); - LLIMMgr::EInvitationType inv_type = (LLIMMgr::EInvitationType)payload["inv_type"].asInteger(); - S32 option = LLNotificationsUtil::getSelectedOption(notification, response); - switch(option) - { - case 0: // accept - { - if (type == IM_SESSION_P2P_INVITE) + // decline p2p voice, either via the vivox-style call mechanism + // or via the webrtc-style "decline p2p" mechanism. + LLVoiceP2PIncomingCallInterfacePtr call = LLVoiceClient::getInstance()->getIncomingCallInterface(payload["voice_channel_info"]); + if (call) { - // create a normal IM session - session_id = gIMMgr->addP2PSession( - payload["session_name"].asString(), - payload["caller_id"].asUUID(), - payload["session_handle"].asString(), - payload["session_uri"].asString()); - - gIMMgr->startCall(session_id); - - gIMMgr->clearPendingAgentListUpdates(session_id); - gIMMgr->clearPendingInvitation(session_id); + call->declineInvite(); } else { - gIMMgr->addSession( - payload["session_name"].asString(), - type, - session_id, true); + // webrtc-style decline. + LLViewerRegion *region = gAgent.getRegion(); + if (region) + { + std::string url = region->getCapability("ChatSessionRequest"); - std::string url = gAgent.getRegion()->getCapability( - "ChatSessionRequest"); + LLSD data; + data["method"] = "decline p2p voice"; + data["session-id"] = session_id; - LLCoros::instance().launch("chatterBoxInvitationCoro", - boost::bind(&chatterBoxInvitationCoro, url, - session_id, inv_type)); + LLCoreHttpUtil::HttpCoroutineAdapter::messageHttpPost(url, data, "P2P declined", "P2P decline failed."); + } } } - break; - case 2: // mute (also implies ignore, so this falls through to the "ignore" case below) - { - // mute the sender of this invite - if (!LLMuteList::getInstance()->isMuted(payload["caller_id"].asUUID())) - { - LLMute mute(payload["caller_id"].asUUID(), payload["caller_name"].asString(), LLMute::AGENT); - LLMuteList::getInstance()->add(mute); - } - } - /* FALLTHROUGH */ - - case 1: // decline - { - if (type == IM_SESSION_P2P_INVITE) - { - std::string s = payload["session_handle"].asString(); - LLVoiceClient::getInstance()->declineInvite(s); - } else { - std::string url = gAgent.getRegion()->getCapability( - "ChatSessionRequest"); + LLViewerRegion *region = gAgent.getRegion(); + if (region) + { + std::string url = region->getCapability("ChatSessionRequest"); + + LLSD data; + data["method"] = "decline invitation"; + data["session-id"] = session_id; - LLSD data; - data["method"] = "decline invitation"; - data["session-id"] = session_id; - LLCoreHttpUtil::HttpCoroutineAdapter::messageHttpPost(url, data, - "Invitation declined.", - "Invitation decline failed."); + LLCoreHttpUtil::HttpCoroutineAdapter::messageHttpPost(url, data, "Invitation declined", "Invitation decline failed."); + } } } gIMMgr->clearPendingAgentListUpdates(session_id); gIMMgr->clearPendingInvitation(session_id); - break; } - - return false; } // @@ -3124,7 +3185,7 @@ void LLIMMgr::addMessage( { fixed_session_name = av_name.getDisplayName(); } - LLIMModel::getInstance()->newSession(new_session_id, fixed_session_name, dialog, other_participant_id, false, is_offline_msg); + LLIMModel::getInstance()->newSession(new_session_id, fixed_session_name, dialog, other_participant_id, LLSD(), is_offline_msg); LLIMModel::LLIMSession* session = LLIMModel::instance().findIMSession(new_session_id); if (session) @@ -3286,22 +3347,11 @@ void LLIMMgr::autoStartCallOnStartup(const LLUUID& session_id) } LLUUID LLIMMgr::addP2PSession(const std::string& name, - const LLUUID& other_participant_id, - const std::string& voice_session_handle, - const std::string& caller_uri) + const LLUUID& other_participant_id, + const LLSD& voice_channel_info) { - LLUUID session_id = addSession(name, IM_NOTHING_SPECIAL, other_participant_id, true); - - LLIMSpeakerMgr* speaker_mgr = LLIMModel::getInstance()->getSpeakerManager(session_id); - if (speaker_mgr) - { - LLVoiceChannelP2P* voice_channel = dynamic_cast<LLVoiceChannelP2P*>(speaker_mgr->getVoiceChannel()); - if (voice_channel) - { - voice_channel->setSessionHandle(voice_session_handle, caller_uri); - } - } - return session_id; + LL_DEBUGS("Voice") << "Add p2p voice channel info: " << voice_channel_info << LL_ENDL; + return addSession(name, IM_NOTHING_SPECIAL, other_participant_id, voice_channel_info); } // This adds a session to the talk view. The name is the local name of @@ -3311,11 +3361,12 @@ LLUUID LLIMMgr::addP2PSession(const std::string& name, LLUUID LLIMMgr::addSession( const std::string& name, EInstantMessage dialog, - const LLUUID& other_participant_id, bool voice) + const LLUUID& other_participant_id, + const LLSD& voiceChannelInfo) { std::vector<LLUUID> ids; ids.push_back(other_participant_id); - LLUUID session_id = addSession(name, dialog, other_participant_id, ids, voice); + LLUUID session_id = addSession(name, dialog, other_participant_id, ids, voiceChannelInfo); return session_id; } @@ -3325,7 +3376,8 @@ LLUUID LLIMMgr::addSession( const std::string& name, EInstantMessage dialog, const LLUUID& other_participant_id, - const std::vector<LLUUID>& ids, bool voice, + const std::vector<LLUUID>& ids, + const LLSD& voiceChannelInfo, const LLUUID& floater_id) { if (ids.empty()) @@ -3354,28 +3406,32 @@ LLUUID LLIMMgr::addSession( im_floater->reloadMessages(); } } + LLIMModel::LLIMSession *session = LLIMModel::getInstance()->findIMSession(session_id); - bool new_session = (LLIMModel::getInstance()->findIMSession(session_id) == NULL); + bool new_session = (session == NULL); //works only for outgoing ad-hoc sessions - if (new_session && IM_SESSION_CONFERENCE_START == dialog && ids.size()) + if (new_session && + ((IM_NOTHING_SPECIAL == dialog) || (IM_SESSION_P2P_INVITE == dialog) || (IM_SESSION_CONFERENCE_START == dialog)) && + ids.size()) { - LLIMModel::LLIMSession* ad_hoc_found = LLIMModel::getInstance()->findAdHocIMSession(ids); - if (ad_hoc_found) + session = LLIMModel::getInstance()->findAdHocIMSession(ids); + if (session) { new_session = false; - session_id = ad_hoc_found->mSessionID; + session_id = session->mSessionID; } } //Notify observers that a session was added if (new_session) { - LLIMModel::getInstance()->newSession(session_id, name, dialog, other_participant_id, ids, voice); + LLIMModel::getInstance()->newSession(session_id, name, dialog, other_participant_id, ids, voiceChannelInfo); } //Notifies observers that the session was already added else { + session->initVoiceChannel(voiceChannelInfo); std::string session_name = LLIMModel::getInstance()->getName(session_id); LLIMMgr::getInstance()->notifyObserverSessionActivated(session_id, session_name, other_participant_id); } @@ -3432,9 +3488,15 @@ void LLIMMgr::inviteToSession( const std::string& caller_name, EInstantMessage type, EInvitationType inv_type, - const std::string& session_handle, - const std::string& session_uri) + const LLSD& voice_channel_info) { + + if (caller_id == gAgentID) + { + // ignore invites from ourself. + return; + } + std::string notify_box_type; // voice invite question is different from default only for group call (EXT-7118) std::string question_type = "VoiceInviteQuestionDefault"; @@ -3475,11 +3537,11 @@ void LLIMMgr::inviteToSession( payload["caller_name"] = caller_name; payload["type"] = type; payload["inv_type"] = inv_type; - payload["session_handle"] = session_handle; - payload["session_uri"] = session_uri; payload["notify_box_type"] = notify_box_type; payload["question_type"] = question_type; + LL_WARNS("Voice") << "INVITE PAYLOAD: " << payload << LL_ENDL; + //ignore invites from muted residents if (!is_linden) { @@ -3504,7 +3566,6 @@ void LLIMMgr::inviteToSession( LLIncomingCallDialog::processCallResponse(0, payload); return; } - if (voice_invite) { bool isRejectGroupCall = (gSavedSettings.getBOOL("VoiceCallsRejectGroup") && (notify_box_type == "VoiceInviteGroup")); @@ -3528,7 +3589,7 @@ void LLIMMgr::inviteToSession( fixed_session_name = av_name.getDisplayName(); } } - LLIMModel::getInstance()->newSession(session_id, fixed_session_name, IM_NOTHING_SPECIAL, caller_id, false, false); + LLIMModel::getInstance()->newSession(session_id, fixed_session_name, IM_NOTHING_SPECIAL, caller_id, LLSD(), false); } LLSD args; @@ -3543,6 +3604,9 @@ void LLIMMgr::inviteToSession( if ( !mPendingInvitations.has(session_id.asString()) ) { + // we're throwing up a dialogue, so we're using the voice channel passed to us, + // save it in the payload. + payload["voice_channel_info"] = voice_channel_info; if (caller_name.empty()) { LLAvatarNameCache::get(caller_id, @@ -3596,6 +3660,40 @@ void LLIMMgr::clearPendingInvitation(const LLUUID& session_id) void LLIMMgr::processAgentListUpdates(const LLUUID& session_id, const LLSD& body) { + if (body.isMap() && body.has("agent_updates") && body["agent_updates"].isMap()) + { + LLSD::map_const_iterator update_it; + for (update_it = body["agent_updates"].beginMap(); update_it != body["agent_updates"].endMap(); ++update_it) + { + LLUUID agent_id = LLUUID(update_it->first); + LLSD agent_data = update_it->second; + if (agent_data.has("transition") && agent_data["transition"].asString() == "LEAVE") + { + // ignore actual leaves as those will be handled separately. + continue; + } + + if (agent_id != gAgentID && agent_data.isMap() && agent_data.has("info") && agent_data["info"].isMap()) + { + // Is one of the participants leaving a P2P Chat? + if (agent_data["info"].has("can_voice_chat") && !agent_data["info"]["can_voice_chat"].asBoolean()) + { + LLVoiceChannelGroup *channelp = dynamic_cast < LLVoiceChannelGroup*>(LLVoiceChannel::getChannelByID(session_id)); + if (channelp && channelp->isP2P()) + { + // it's an adhoc-style P2P channel, and the peer has declined voice. notify the user + // and shut down the voice channel. + LLSD notifyArgs = LLSD::emptyMap(); + notifyArgs["VOICE_CHANNEL_NAME"] = channelp->getSessionName(); + LLNotificationsUtil::add("P2PCallDeclined", notifyArgs); + endCall(session_id); + break; + } + } + } + } + } + LLFloaterIMSession* im_floater = LLFloaterIMSession::findInstance(session_id); if ( im_floater ) { @@ -3753,11 +3851,19 @@ void LLIMMgr::removeSessionObserver(LLIMSessionObserver *observer) mSessionObservers.remove(observer); } -bool LLIMMgr::startCall(const LLUUID& session_id, LLVoiceChannel::EDirection direction) +bool LLIMMgr::startCall(const LLUUID& session_id, LLVoiceChannel::EDirection direction, const LLSD& voice_channel_info) { LLVoiceChannel* voice_channel = LLIMModel::getInstance()->getVoiceChannel(session_id); if (!voice_channel) return false; - + if (voice_channel_info.isDefined() && voice_channel_info.isMap() && voice_channel_info.size() > 0) + { + voice_channel->setChannelInfo(voice_channel_info); + } + else if (voice_channel->getState() < LLVoiceChannel::STATE_READY) + { + // restart if there wa an error or it was hang up + voice_channel->resetChannelInfo(); + } voice_channel->setCallDirection(direction); voice_channel->activate(); return true; @@ -4065,6 +4171,15 @@ public: { im_mgr->processSessionUpdate(input["body"]["info"]); } + if (input["body"]["info"].has("voice_channel_info")) + { + LLIMModel::LLIMSession* session = LLIMModel::getInstance()->findIMSession(session_id); + if (session) + { + session->initVoiceChannel(input["body"]["info"]["voice_channel_info"]); + session->mVoiceChannel->activate(); + } + } } }; @@ -4149,7 +4264,7 @@ public: { LLCoros::instance().launch("chatterBoxInvitationCoro", boost::bind(&chatterBoxInvitationCoro, url, - session_id, LLIMMgr::INVITATION_TYPE_INSTANT_MESSAGE)); + session_id, LLIMMgr::INVITATION_TYPE_INSTANT_MESSAGE, LLSD())); } } //end if invitation has instant message else if ( input["body"].has("voice") ) @@ -4160,13 +4275,16 @@ public: return; } + BOOL session_type_p2p = input["body"]["voice"].get("invitation_type").asInteger() == EMultiAgentChatSessionType::P2P_CHAT_SESSION; + LL_DEBUGS("Voice") << "Received voice information from the server: " << input["body"]<< LL_ENDL; gIMMgr->inviteToSession( input["body"]["session_id"].asUUID(), input["body"]["session_name"].asString(), input["body"]["from_id"].asUUID(), input["body"]["from_name"].asString(), - IM_SESSION_INVITE, - LLIMMgr::INVITATION_TYPE_VOICE); + session_type_p2p ? IM_SESSION_P2P_INVITE : IM_SESSION_INVITE, + LLIMMgr::INVITATION_TYPE_VOICE, + input["body"]["voice"]); } else if ( input["body"].has("immediate") ) { diff --git a/indra/newview/llimview.h b/indra/newview/llimview.h index 8d1bc1c76a..9fd5e9d5c4 100644 --- a/indra/newview/llimview.h +++ b/indra/newview/llimview.h @@ -80,9 +80,11 @@ public: } SType; LLIMSession(const LLUUID& session_id, const std::string& name, - const EInstantMessage& type, const LLUUID& other_participant_id, const uuid_vec_t& ids, bool voice, bool has_offline_msg); + const EInstantMessage& type, const LLUUID& other_participant_id, const LLSD& voiceChannelInfo, const uuid_vec_t& ids, bool has_offline_msg); virtual ~LLIMSession(); + void initVoiceChannel(const LLSD &voiceChannelInfo = LLSD()); + void sessionInitReplyReceived(const LLUUID& new_session_id); void addMessagesFromHistoryCache(const std::list<LLSD>& history); // From local file void addMessagesFromServerHistory(const LLSD& history, const std::string& target_from, const std::string& target_message, U32 timestamp); // From chat server @@ -141,6 +143,7 @@ public: LLVoiceChannel* mVoiceChannel; LLIMSpeakerMgr* mSpeakers; + bool mP2PAsAdhocCall; bool mSessionInitialized; @@ -199,10 +202,10 @@ public: * @param name session name should not be empty, will return false if empty */ bool newSession(const LLUUID& session_id, const std::string& name, const EInstantMessage& type, const LLUUID& other_participant_id, - const uuid_vec_t& ids, bool voice = false, bool has_offline_msg = false); + const uuid_vec_t& ids, const LLSD& voiceChannelInfo = LLSD(), bool has_offline_msg = false); - bool newSession(const LLUUID& session_id, const std::string& name, const EInstantMessage& type, - const LLUUID& other_participant_id, bool voice = false, bool has_offline_msg = false); + bool newSession(const LLUUID& session_id, const std::string& name, const EInstantMessage& type, const LLUUID &other_participant_id, + const LLSD &voiceChannelInfo = LLSD(), bool has_offline_msg = false); /** * Remove all session data associated with a session specified by session_id @@ -296,7 +299,7 @@ public: static void sendLeaveSession(const LLUUID& session_id, const LLUUID& other_participant_id); static bool sendStartSession(const LLUUID& temp_session_id, const LLUUID& other_participant_id, - const uuid_vec_t& ids, EInstantMessage dialog); + const uuid_vec_t& ids, EInstantMessage dialog, bool p2p_as_adhoc_call); static void sendTypingState(LLUUID session_id, LLUUID other_participant_id, BOOL typing); static void sendMessage(const std::string& utf8_text, const LLUUID& im_session_id, const LLUUID& other_participant_id, EInstantMessage dialog); @@ -379,7 +382,8 @@ public: // session. LLUUID addSession(const std::string& name, EInstantMessage dialog, - const LLUUID& other_participant_id, bool voice = false); + const LLUUID& other_participant_id, + const LLSD& voiceChannelInfo = LLSD()); // Adds a session using a specific group of starting agents // the dialog type is assumed correct. Returns the uuid of the session. @@ -387,7 +391,8 @@ public: LLUUID addSession(const std::string& name, EInstantMessage dialog, const LLUUID& other_participant_id, - const std::vector<LLUUID>& ids, bool voice = false, + const std::vector<LLUUID> &ids, + const LLSD& voiceChannelInfo = LLSD(), const LLUUID& floater_id = LLUUID::null); /** @@ -397,10 +402,7 @@ public: * @param caller_uri - sip URI of caller. It should be always be passed into the method to avoid * incorrect working of LLVoiceChannel instances. See EXT-2985. */ - LLUUID addP2PSession(const std::string& name, - const LLUUID& other_participant_id, - const std::string& voice_session_handle, - const std::string& caller_uri); + LLUUID addP2PSession(const std::string &name, const LLUUID &other_participant_id, const LLSD &voice_call_info); /** * Leave the session with session id. Send leave session notification @@ -416,8 +418,8 @@ public: const std::string& caller_name, EInstantMessage type, EInvitationType inv_type, - const std::string& session_handle = LLStringUtil::null, - const std::string& session_uri = LLStringUtil::null); + const LLSD &voice_channel_info = LLSD() + ); void processIMTypingStart(const LLUUID& from_id, const EInstantMessage im_type); void processIMTypingStop(const LLUUID& from_id, const EInstantMessage im_type); @@ -465,7 +467,7 @@ public: * Start call in a session * @return false if voice channel doesn't exist **/ - bool startCall(const LLUUID& session_id, LLVoiceChannel::EDirection direction = LLVoiceChannel::OUTGOING_CALL); + bool startCall(const LLUUID& session_id, LLVoiceChannel::EDirection direction = LLVoiceChannel::OUTGOING_CALL, const LLSD& voice_channel_info = LLSD()); /** * End call in a session diff --git a/indra/newview/lloutputmonitorctrl.cpp b/indra/newview/lloutputmonitorctrl.cpp index db43c57139..28433b36f4 100644 --- a/indra/newview/lloutputmonitorctrl.cpp +++ b/indra/newview/lloutputmonitorctrl.cpp @@ -126,29 +126,31 @@ void LLOutputMonitorCtrl::draw() const F32 LEVEL_1 = LLVoiceClient::OVERDRIVEN_POWER_LEVEL * 2.f / 3.f; const F32 LEVEL_2 = LLVoiceClient::OVERDRIVEN_POWER_LEVEL; + LLVoiceClient* vocie_client = LLVoiceClient::getInstance(); + if (getVisible() && mAutoUpdate && !getIsMuted() && mSpeakerId.notNull()) { - setPower(LLVoiceClient::getInstance()->getCurrentPower(mSpeakerId)); + setPower(vocie_client->getCurrentPower(mSpeakerId)); if(mIsAgentControl) { - setIsTalking(LLVoiceClient::getInstance()->getUserPTTState()); + setIsTalking(vocie_client->getUserPTTState()); } else { - setIsTalking(LLVoiceClient::getInstance()->getIsSpeaking(mSpeakerId)); + setIsTalking(vocie_client->getIsSpeaking(mSpeakerId)); } } if ((mPower == 0.f && !mIsTalking) && mShowParticipantsSpeaking) { std::set<LLUUID> participant_uuids; - LLVoiceClient::instance().getParticipantList(participant_uuids); + vocie_client->getParticipantList(participant_uuids); std::set<LLUUID>::const_iterator part_it = participant_uuids.begin(); F32 power = 0; for (; part_it != participant_uuids.end(); ++part_it) { - power = LLVoiceClient::instance().getCurrentPower(*part_it); + power = vocie_client->getCurrentPower(*part_it); if (power) { mPower = power; diff --git a/indra/newview/llpanelgroup.cpp b/indra/newview/llpanelgroup.cpp index 6b7680d19d..1ab8265d83 100644 --- a/indra/newview/llpanelgroup.cpp +++ b/indra/newview/llpanelgroup.cpp @@ -98,10 +98,7 @@ LLPanelGroup::LLPanelGroup() LLPanelGroup::~LLPanelGroup() { LLGroupMgr::getInstance()->removeObserver(this); - if(LLVoiceClient::instanceExists()) - { - LLVoiceClient::getInstance()->removeObserver(this); - } + LLVoiceClient::removeObserver(this); } void LLPanelGroup::onOpen(const LLSD& key) @@ -194,7 +191,7 @@ BOOL LLPanelGroup::postBuild() mJoinText = panel_general->getChild<LLUICtrl>("join_cost_text"); } - LLVoiceClient::getInstance()->addObserver(this); + LLVoiceClient::addObserver(this); return TRUE; } @@ -279,9 +276,9 @@ void LLPanelGroup::onBtnJoin() } else { - LL_DEBUGS() << "joining group: " << mID << LL_ENDL; - LLGroupActions::join(mID); - } + LL_DEBUGS() << "joining group: " << mID << LL_ENDL; + LLGroupActions::join(mID); +} } void LLPanelGroup::changed(LLGroupChange gc) @@ -292,7 +289,7 @@ void LLPanelGroup::changed(LLGroupChange gc) } // virtual -void LLPanelGroup::onChange(EStatusType status, const std::string &channelURI, bool proximal) +void LLPanelGroup::onChange(EStatusType status, const LLSD& channelInfo, bool proximal) { if(status == STATUS_JOINING || status == STATUS_LEFT_CHANNEL) { diff --git a/indra/newview/llpanelgroup.h b/indra/newview/llpanelgroup.h index d329a9561f..ddc1613fb6 100644 --- a/indra/newview/llpanelgroup.h +++ b/indra/newview/llpanelgroup.h @@ -62,7 +62,7 @@ public: // Implements LLVoiceClientStatusObserver::onChange() to enable the call // button when voice is available - /*virtual*/ void onChange(EStatusType status, const std::string &channelURI, bool proximal); + /*virtual*/ void onChange(EStatusType status, const LLSD& channelInfo, bool proximal); void showNotice(const std::string& subject, const std::string& message, diff --git a/indra/newview/llpanelpeople.cpp b/indra/newview/llpanelpeople.cpp index 6bd4e53fcf..7ab4930c0b 100644 --- a/indra/newview/llpanelpeople.cpp +++ b/indra/newview/llpanelpeople.cpp @@ -338,7 +338,7 @@ public: LLAvatarTracker::instance().addObserver(this); // For notification when SIP online status changes. - LLVoiceClient::getInstance()->addObserver(this); + LLVoiceClient::addObserver(this); mInvObserver = new LLInventoryFriendCardObserver(this); } @@ -346,10 +346,7 @@ public: { // will be deleted by ~LLInventoryModel //delete mInvObserver; - if (LLVoiceClient::instanceExists()) - { - LLVoiceClient::getInstance()->removeObserver(this); - } + LLVoiceClient::removeObserver(this); LLAvatarTracker::instance().removeObserver(this); } @@ -577,10 +574,7 @@ LLPanelPeople::~LLPanelPeople() delete mFriendListUpdater; delete mRecentListUpdater; - if(LLVoiceClient::instanceExists()) - { - LLVoiceClient::getInstance()->removeObserver(this); - } + LLVoiceClient::removeObserver(this); } void LLPanelPeople::onFriendsAccordionExpandedCollapsed(LLUICtrl* ctrl, const LLSD& param, LLAvatarList* avatar_list) @@ -721,7 +715,7 @@ BOOL LLPanelPeople::postBuild() // Must go after setting commit callback and initializing all pointers to children. mTabContainer->selectTabByName(NEARBY_TAB_NAME); - LLVoiceClient::getInstance()->addObserver(this); + LLVoiceClient::addObserver(this); // call this method in case some list is empty and buttons can be in inconsistent state updateButtons(); @@ -733,7 +727,7 @@ BOOL LLPanelPeople::postBuild() } // virtual -void LLPanelPeople::onChange(EStatusType status, const std::string &channelURI, bool proximal) +void LLPanelPeople::onChange(EStatusType status, const LLSD& channelInfo, bool proximal) { if(status == STATUS_JOINING || status == STATUS_LEFT_CHANNEL) { diff --git a/indra/newview/llpanelpeople.h b/indra/newview/llpanelpeople.h index f38c71e0b2..a00700700c 100644 --- a/indra/newview/llpanelpeople.h +++ b/indra/newview/llpanelpeople.h @@ -55,7 +55,7 @@ public: /*virtual*/ bool notifyChildren(const LLSD& info); // Implements LLVoiceClientStatusObserver::onChange() to enable call buttons // when voice is available - /*virtual*/ void onChange(EStatusType status, const std::string &channelURI, bool proximal); + /*virtual*/ void onChange(EStatusType status, const LLSD& channelInfo, bool proximal); // internals class Updater; diff --git a/indra/newview/llpanelprofile.cpp b/indra/newview/llpanelprofile.cpp index b184668290..628ae46a00 100644 --- a/indra/newview/llpanelprofile.cpp +++ b/indra/newview/llpanelprofile.cpp @@ -702,10 +702,7 @@ LLPanelProfileSecondLife::~LLPanelProfileSecondLife() LLAvatarTracker::instance().removeParticularFriendObserver(getAvatarId(), this); } - if (LLVoiceClient::instanceExists()) - { - LLVoiceClient::getInstance()->removeObserver((LLVoiceClientStatusObserver*)this); - } + LLVoiceClient::removeObserver((LLVoiceClientStatusObserver*)this); if (mAvatarNameCacheConnection.connected()) { @@ -974,7 +971,7 @@ void LLPanelProfileSecondLife::fillCommonData(const LLAvatarData* avatar_data) setDescriptionText(avatar_data->about_text); - mSecondLifePic->setValue(avatar_data->image_id); + mSecondLifePic->setValue(avatar_data->image_id); if (getSelfProfile()) { @@ -1122,10 +1119,10 @@ void LLPanelProfileSecondLife::fillAgeData(const LLAvatarData* avatar_data) } else { - std::string register_date = getString("age_format"); - LLSD args_age; + std::string register_date = getString("age_format"); + LLSD args_age; args_age["[AGE]"] = LLDateUtil::ageFromDate(avatar_data->born_on, LLDate::now()); - LLStringUtil::format(register_date, args_age); + LLStringUtil::format(register_date, args_age); userAgeCtrl->setValue(register_date); } @@ -1172,7 +1169,7 @@ void LLPanelProfileSecondLife::changed(U32 mask) } // virtual, called by LLVoiceClient -void LLPanelProfileSecondLife::onChange(EStatusType status, const std::string &channelURI, bool proximal) +void LLPanelProfileSecondLife::onChange(EStatusType status, const LLSD& channelInfo, bool proximal) { if(status == STATUS_JOINING || status == STATUS_LEFT_CHANNEL) { @@ -1575,10 +1572,10 @@ void LLPanelProfileSecondLife::onShowInSearchCallback() mAllowPublish = value; saveAgentUserInfoCoro("allow_publish", value); -} + } void LLPanelProfileSecondLife::onHideAgeCallback() -{ + { bool value = mHideAgeCombo->getValue().asInteger(); if (value == mHideAge) return; @@ -1728,34 +1725,34 @@ void LLPanelProfileSecondLife::onCommitProfileImage(const LLUUID& id) return; std::function<void(bool)> callback = [id](bool result) - { - if (result) { - LLAvatarIconIDCache::getInstance()->add(gAgentID, id); + if (result) + { + LLAvatarIconIDCache::getInstance()->add(gAgentID, id); // Should trigger callbacks in icon controls (or request Legacy) - LLAvatarPropertiesProcessor::getInstance()->sendAvatarPropertiesRequest(gAgentID); - } - }; + LLAvatarPropertiesProcessor::getInstance()->sendAvatarPropertiesRequest(gAgentID); + } + }; if (!saveAgentUserInfoCoro("sl_image_id", id, callback)) return; mSecondLifePic->setValue(id); - LLFloater *floater = mFloaterProfileTextureHandle.get(); - if (floater) - { - LLFloaterProfileTexture * texture_view = dynamic_cast<LLFloaterProfileTexture*>(floater); - if (id == LLUUID::null) - { - texture_view->resetAsset(); - } - else + LLFloater *floater = mFloaterProfileTextureHandle.get(); + if (floater) { + LLFloaterProfileTexture * texture_view = dynamic_cast<LLFloaterProfileTexture*>(floater); + if (id == LLUUID::null) + { + texture_view->resetAsset(); + } + else + { texture_view->loadAsset(id); + } } } -} ////////////////////////////////////////////////////////////////////////// // LLPanelProfileWeb diff --git a/indra/newview/llpanelprofile.h b/indra/newview/llpanelprofile.h index 1afc248073..f0c0565628 100644 --- a/indra/newview/llpanelprofile.h +++ b/indra/newview/llpanelprofile.h @@ -85,7 +85,7 @@ public: // Implements LLVoiceClientStatusObserver::onChange() to enable the call // button when voice is available - void onChange(EStatusType status, const std::string &channelURI, bool proximal) override; + void onChange(EStatusType status, const LLSD& channelInfo, bool proximal) override; void setAvatarId(const LLUUID& avatar_id) override; diff --git a/indra/newview/llpanelvoicedevicesettings.cpp b/indra/newview/llpanelvoicedevicesettings.cpp index cac5421ffc..1a9c5716dc 100644 --- a/indra/newview/llpanelvoicedevicesettings.cpp +++ b/indra/newview/llpanelvoicedevicesettings.cpp @@ -256,43 +256,46 @@ void LLPanelVoiceDeviceSettings::refresh() if(mCtrlInputDevices) { - mCtrlInputDevices->removeall(); - mCtrlInputDevices->add(getLocalizedDeviceName(DEFAULT_DEVICE), DEFAULT_DEVICE, ADD_BOTTOM); - - for(device=LLVoiceClient::getInstance()->getCaptureDevices().begin(); - device != LLVoiceClient::getInstance()->getCaptureDevices().end(); - device++) + LLVoiceDeviceList devices = LLVoiceClient::getInstance()->getCaptureDevices(); + if (devices.size() > 0) // if zero, we've not received our devices yet { - mCtrlInputDevices->add(getLocalizedDeviceName(device->display_name), device->full_name, ADD_BOTTOM); - } + mCtrlInputDevices->removeall(); + mCtrlInputDevices->add(getLocalizedDeviceName(DEFAULT_DEVICE), DEFAULT_DEVICE, ADD_BOTTOM); + for (auto& device : devices) + { + mCtrlInputDevices->add(getLocalizedDeviceName(device.display_name), device.full_name, ADD_BOTTOM); + } - // Fix invalid input audio device preference. - if (!mCtrlInputDevices->setSelectedByValue(mInputDevice, TRUE)) - { - mCtrlInputDevices->setValue(DEFAULT_DEVICE); - gSavedSettings.setString("VoiceInputAudioDevice", DEFAULT_DEVICE); - mInputDevice = DEFAULT_DEVICE; + // Fix invalid input audio device preference. + if (!mCtrlInputDevices->setSelectedByValue(mInputDevice, TRUE)) + { + mCtrlInputDevices->setValue(DEFAULT_DEVICE); + gSavedSettings.setString("VoiceInputAudioDevice", DEFAULT_DEVICE); + mInputDevice = DEFAULT_DEVICE; + } } } if(mCtrlOutputDevices) { - mCtrlOutputDevices->removeall(); - mCtrlOutputDevices->add(getLocalizedDeviceName(DEFAULT_DEVICE), DEFAULT_DEVICE, ADD_BOTTOM); - - for(device = LLVoiceClient::getInstance()->getRenderDevices().begin(); - device != LLVoiceClient::getInstance()->getRenderDevices().end(); - device++) + LLVoiceDeviceList devices = LLVoiceClient::getInstance()->getRenderDevices(); + if (devices.size() > 0) // if zero, we've not received our devices yet { - mCtrlOutputDevices->add(getLocalizedDeviceName(device->display_name), device->full_name, ADD_BOTTOM); - } + mCtrlOutputDevices->removeall(); + mCtrlOutputDevices->add(getLocalizedDeviceName(DEFAULT_DEVICE), DEFAULT_DEVICE, ADD_BOTTOM); - // Fix invalid output audio device preference. - if (!mCtrlOutputDevices->setSelectedByValue(mOutputDevice, TRUE)) - { - mCtrlOutputDevices->setValue(DEFAULT_DEVICE); - gSavedSettings.setString("VoiceOutputAudioDevice", DEFAULT_DEVICE); - mOutputDevice = DEFAULT_DEVICE; + for (auto& device : devices) + { + mCtrlOutputDevices->add(getLocalizedDeviceName(device.display_name), device.full_name, ADD_BOTTOM); + } + + // Fix invalid output audio device preference. + if (!mCtrlOutputDevices->setSelectedByValue(mOutputDevice, TRUE)) + { + mCtrlOutputDevices->setValue(DEFAULT_DEVICE); + gSavedSettings.setString("VoiceOutputAudioDevice", DEFAULT_DEVICE); + mOutputDevice = DEFAULT_DEVICE; + } } } } @@ -336,8 +339,11 @@ void LLPanelVoiceDeviceSettings::onCommitInputDevice() if(LLVoiceClient::getInstance()) { mInputDevice = mCtrlInputDevices->getValue().asString(); - LLVoiceClient::getInstance()->setRenderDevice(mInputDevice); + LLVoiceClient::getInstance()->setCaptureDevice(mInputDevice); } + // the preferences floater stuff is a mess, hence apply will never + // be called when 'ok' is pressed, so just force it for now. + apply(); } void LLPanelVoiceDeviceSettings::onCommitOutputDevice() @@ -348,6 +354,9 @@ void LLPanelVoiceDeviceSettings::onCommitOutputDevice() mOutputDevice = mCtrlOutputDevices->getValue().asString(); LLVoiceClient::getInstance()->setRenderDevice(mOutputDevice); } + // the preferences floater stuff is a mess, hence apply will never + // be called when 'ok' is pressed, so just force it for now. + apply(); } void LLPanelVoiceDeviceSettings::onOutputDevicesClicked() diff --git a/indra/newview/llsculptidsize.cpp b/indra/newview/llsculptidsize.cpp index 3bc5ad7616..6be05fb003 100644 --- a/indra/newview/llsculptidsize.cpp +++ b/indra/newview/llsculptidsize.cpp @@ -29,8 +29,6 @@ #include "llvovolume.h" #include "lldrawable.h" #include "llvoavatar.h" -//boost -#include "boost/make_shared.hpp" //........... diff --git a/indra/newview/llselectmgr.h b/indra/newview/llselectmgr.h index 02c74d0ab0..0f9c76a2a1 100644 --- a/indra/newview/llselectmgr.h +++ b/indra/newview/llselectmgr.h @@ -48,7 +48,6 @@ #include <deque> #include <boost/iterator/filter_iterator.hpp> #include <boost/signals2.hpp> -#include <boost/make_shared.hpp> // boost::make_shared class LLMessageSystem; class LLViewerTexture; diff --git a/indra/newview/llsettingsvo.cpp b/indra/newview/llsettingsvo.cpp index 76632a83ae..5008061ec8 100644 --- a/indra/newview/llsettingsvo.cpp +++ b/indra/newview/llsettingsvo.cpp @@ -33,7 +33,6 @@ #include <algorithm> #include <cstdio> -#include <boost/make_shared.hpp> #include "lltrace.h" #include "llfasttimer.h" #include "v3colorutil.h" diff --git a/indra/newview/llspeakers.cpp b/indra/newview/llspeakers.cpp index 2d8163d9e1..cbccc79f4e 100644 --- a/indra/newview/llspeakers.cpp +++ b/indra/newview/llspeakers.cpp @@ -288,6 +288,10 @@ LLSpeakerMgr::~LLSpeakerMgr() LLPointer<LLSpeaker> LLSpeakerMgr::setSpeaker(const LLUUID& id, const std::string& name, LLSpeaker::ESpeakerStatus status, LLSpeaker::ESpeakerType type) { + if (!mVoiceChannel) + { + return NULL; + } LLUUID session_id = getSessionID(); if (id.isNull() || (id == session_id)) { @@ -373,16 +377,17 @@ void LLSpeakerMgr::update(BOOL resort_ok) } // update status of all current speakers - BOOL voice_channel_active = (!mVoiceChannel && LLVoiceClient::getInstance()->inProximalChannel()) || (mVoiceChannel && mVoiceChannel->isActive()); + LLVoiceClient* voice_client = LLVoiceClient::getInstance(); + bool voice_channel_active = (!mVoiceChannel && voice_client->inProximalChannel()) || (mVoiceChannel && mVoiceChannel->isActive()); for (speaker_map_t::iterator speaker_it = mSpeakers.begin(); speaker_it != mSpeakers.end(); speaker_it++) { LLUUID speaker_id = speaker_it->first; LLSpeaker* speakerp = speaker_it->second; - if (voice_channel_active && LLVoiceClient::getInstance()->getVoiceEnabled(speaker_id)) + if (voice_channel_active && voice_client->getVoiceEnabled(speaker_id)) { - speakerp->mSpeechVolume = LLVoiceClient::getInstance()->getCurrentPower(speaker_id); - BOOL moderator_muted_voice = LLVoiceClient::getInstance()->getIsModeratorMuted(speaker_id); + speakerp->mSpeechVolume = voice_client->getCurrentPower(speaker_id); + BOOL moderator_muted_voice = voice_client->getIsModeratorMuted(speaker_id); if (moderator_muted_voice != speakerp->mModeratorMutedVoice) { speakerp->mModeratorMutedVoice = moderator_muted_voice; @@ -390,11 +395,11 @@ void LLSpeakerMgr::update(BOOL resort_ok) speakerp->fireEvent(new LLSpeakerVoiceModerationEvent(speakerp)); } - if (LLVoiceClient::getInstance()->getOnMuteList(speaker_id) || speakerp->mModeratorMutedVoice) + if (voice_client->getOnMuteList(speaker_id) || speakerp->mModeratorMutedVoice) { speakerp->mStatus = LLSpeaker::STATUS_MUTED; } - else if (LLVoiceClient::getInstance()->getIsSpeaking(speaker_id)) + else if (voice_client->getIsSpeaking(speaker_id)) { // reset inactivity expiration if (speakerp->mStatus != LLSpeaker::STATUS_SPEAKING) @@ -477,20 +482,21 @@ void LLSpeakerMgr::update(BOOL resort_ok) void LLSpeakerMgr::updateSpeakerList() { // Are we bound to the currently active voice channel? - if ((!mVoiceChannel && LLVoiceClient::getInstance()->inProximalChannel()) || (mVoiceChannel && mVoiceChannel->isActive())) + LLVoiceClient* vocie_client = LLVoiceClient::getInstance(); + if ((!mVoiceChannel && vocie_client->inProximalChannel()) || (mVoiceChannel && mVoiceChannel->isActive())) { std::set<LLUUID> participants; - LLVoiceClient::getInstance()->getParticipantList(participants); + vocie_client->getParticipantList(participants); // If we are, add all voice client participants to our list of known speakers for (std::set<LLUUID>::iterator participant_it = participants.begin(); participant_it != participants.end(); ++participant_it) { setSpeaker(*participant_it, - LLVoiceClient::getInstance()->getDisplayName(*participant_it), + vocie_client->getDisplayName(*participant_it), LLSpeaker::STATUS_VOICE_ACTIVE, - (LLVoiceClient::getInstance()->isParticipantAvatar(*participant_it)?LLSpeaker::SPEAKER_AGENT:LLSpeaker::SPEAKER_EXTERNAL)); + (vocie_client->isParticipantAvatar(*participant_it)?LLSpeaker::SPEAKER_AGENT:LLSpeaker::SPEAKER_EXTERNAL)); } } - else + else if (mVoiceChannel) { // If not, check if the list is empty, except if it's Nearby Chat (session_id NULL). LLUUID session_id = getSessionID(); @@ -618,7 +624,7 @@ void LLSpeakerMgr::getSpeakerList(speaker_list_t* speaker_list, BOOL include_tex const LLUUID LLSpeakerMgr::getSessionID() { - return mVoiceChannel->getSessionID(); + return mVoiceChannel ? mVoiceChannel->getSessionID() : LLUUID(); } bool LLSpeakerMgr::isSpeakerToBeRemoved(const LLUUID& speaker_id) @@ -816,7 +822,7 @@ void LLIMSpeakerMgr::updateSpeakers(const LLSD& update) void LLIMSpeakerMgr::toggleAllowTextChat(const LLUUID& speaker_id) { LLPointer<LLSpeaker> speakerp = findSpeaker(speaker_id); - if (!speakerp) return; + if (!speakerp || !mVoiceChannel) return; std::string url = gAgent.getRegionCapability("ChatSessionRequest"); LLSD data; @@ -835,7 +841,7 @@ void LLIMSpeakerMgr::toggleAllowTextChat(const LLUUID& speaker_id) void LLIMSpeakerMgr::moderateVoiceParticipant(const LLUUID& avatar_id, bool unmute) { LLPointer<LLSpeaker> speakerp = findSpeaker(avatar_id); - if (!speakerp) return; + if (!speakerp || !mVoiceChannel) return; // *NOTE: mantipov: probably this condition will be incorrect when avatar will be blocked for // text chat via moderation (LLSpeaker::mModeratorMutedText == TRUE) diff --git a/indra/newview/llspeakers.h b/indra/newview/llspeakers.h index eb86fadea1..c16da269e4 100644 --- a/indra/newview/llspeakers.h +++ b/indra/newview/llspeakers.h @@ -241,6 +241,7 @@ public: typedef std::vector<LLPointer<LLSpeaker> > speaker_list_t; void getSpeakerList(speaker_list_t* speaker_list, BOOL include_text); LLVoiceChannel* getVoiceChannel() { return mVoiceChannel; } + void setVoiceChannel(LLVoiceChannel *voiceChannel) { mVoiceChannel = voiceChannel; } const LLUUID getSessionID(); bool isSpeakerToBeRemoved(const LLUUID& speaker_id); diff --git a/indra/newview/llspeakingindicatormanager.cpp b/indra/newview/llspeakingindicatormanager.cpp index f16ab3b25a..34bed8a269 100644 --- a/indra/newview/llspeakingindicatormanager.cpp +++ b/indra/newview/llspeakingindicatormanager.cpp @@ -182,7 +182,7 @@ void SpeakingIndicatorManager::unregisterSpeakingIndicator(const LLUUID& speaker SpeakingIndicatorManager::SpeakingIndicatorManager() { LLVoiceChannel::setCurrentVoiceChannelChangedCallback(boost::bind(&SpeakingIndicatorManager::sOnCurrentChannelChanged, this, _1)); - LLVoiceClient::getInstance()->addObserver(this); + LLVoiceClient::addObserver(this); } SpeakingIndicatorManager::~SpeakingIndicatorManager() @@ -193,10 +193,7 @@ void SpeakingIndicatorManager::cleanupSingleton() { // Don't use LLVoiceClient::getInstance() here without a check, // singleton MAY have already been destroyed. - if (LLVoiceClient::instanceExists()) - { - LLVoiceClient::getInstance()->removeObserver(this); - } + LLVoiceClient::removeObserver(this); } void SpeakingIndicatorManager::sOnCurrentChannelChanged(const LLUUID& /*session_id*/) diff --git a/indra/newview/llviewercontrol.cpp b/indra/newview/llviewercontrol.cpp index b69f1aa41b..7997bcfbdb 100644 --- a/indra/newview/llviewercontrol.cpp +++ b/indra/newview/llviewercontrol.cpp @@ -844,6 +844,9 @@ void settings_setup_listeners() setting_setup_signal_listener(gSavedSettings, "PushToTalkButton", handleVoiceClientPrefsChanged); setting_setup_signal_listener(gSavedSettings, "PushToTalkToggle", handleVoiceClientPrefsChanged); setting_setup_signal_listener(gSavedSettings, "VoiceEarLocation", handleVoiceClientPrefsChanged); + setting_setup_signal_listener(gSavedSettings, "VoiceEchoCancellation", handleVoiceClientPrefsChanged); + setting_setup_signal_listener(gSavedSettings, "VoiceAutomaticGainControl", handleVoiceClientPrefsChanged); + setting_setup_signal_listener(gSavedSettings, "VoiceNoiseSuppressionLevel", handleVoiceClientPrefsChanged); setting_setup_signal_listener(gSavedSettings, "VoiceInputAudioDevice", handleVoiceClientPrefsChanged); setting_setup_signal_listener(gSavedSettings, "VoiceOutputAudioDevice", handleVoiceClientPrefsChanged); setting_setup_signal_listener(gSavedSettings, "AudioLevelMic", handleVoiceClientPrefsChanged); diff --git a/indra/newview/llviewerhelp.cpp b/indra/newview/llviewerhelp.cpp index e7b150965d..bca1c4166a 100644 --- a/indra/newview/llviewerhelp.cpp +++ b/indra/newview/llviewerhelp.cpp @@ -45,7 +45,7 @@ public: // requests will be throttled from a non-trusted browser LLHelpHandler() : LLCommandHandler("help", UNTRUSTED_CLICK_ONLY) {} - bool handle(const LLSD& params, const LLSD& query_map, const std::string& grid, LLMediaCtrl* web) + bool handle(const LLSD& params, const LLSD& query_map, const std::string& grid, LLMediaCtrl* web) override { LLViewerHelp* vhelp = LLViewerHelp::getInstance(); if (! vhelp) diff --git a/indra/newview/llviewermedia.cpp b/indra/newview/llviewermedia.cpp index 7b5eb581fe..6ab702ba58 100644 --- a/indra/newview/llviewermedia.cpp +++ b/indra/newview/llviewermedia.cpp @@ -1757,9 +1757,7 @@ LLPluginClassMedia* LLViewerMediaImpl::newSourceFromMediaType(std::string media_ bool javascript_enabled = gSavedSettings.getBOOL("BrowserJavascriptEnabled"); media_source->setJavascriptEnabled(javascript_enabled || clean_browser); - // collect 'web security disabled' (see Chrome --web-security-disabled) setting from prefs and send to embedded browser - bool web_security_disabled = gSavedSettings.getBOOL("BrowserWebSecurityDisabled"); - media_source->setWebSecurityDisabled(web_security_disabled || clean_browser); + media_source->setWebSecurityDisabled(clean_browser); // collect setting indicates if local file access from file URLs is allowed from prefs and send to embedded browser bool file_access_from_file_urls = gSavedSettings.getBOOL("BrowserFileAccessFromFileUrls"); diff --git a/indra/newview/llviewermenu.cpp b/indra/newview/llviewermenu.cpp index 88ff6a28b4..7f6c6e2571 100644 --- a/indra/newview/llviewermenu.cpp +++ b/indra/newview/llviewermenu.cpp @@ -9363,15 +9363,6 @@ class LLUpdateMembershipLabel : public view_listener_t } }; -void handle_voice_morphing_subscribe() -{ - LLWeb::loadURL(LLTrans::getString("voice_morphing_url")); -} - -void handle_premium_voice_morphing_subscribe() -{ - LLWeb::loadURL(LLTrans::getString("premium_voice_morphing_url")); -} class LLToggleUIHints : public view_listener_t { @@ -9572,16 +9563,6 @@ void initialize_menus() //Communicate Nearby chat view_listener_t::addMenu(new LLCommunicateNearbyChat(), "Communicate.NearbyChat"); - // Communicate > Voice morphing > Subscribe... - commit.add("Communicate.VoiceMorphing.Subscribe", boost::bind(&handle_voice_morphing_subscribe)); - // Communicate > Voice morphing > Premium perk... - commit.add("Communicate.VoiceMorphing.PremiumPerk", boost::bind(&handle_premium_voice_morphing_subscribe)); - LLVivoxVoiceClient * voice_clientp = LLVivoxVoiceClient::getInstance(); - enable.add("Communicate.VoiceMorphing.NoVoiceMorphing.Check" - , boost::bind(&LLVivoxVoiceClient::onCheckVoiceEffect, voice_clientp, "NoVoiceMorphing")); - commit.add("Communicate.VoiceMorphing.NoVoiceMorphing.Click" - , boost::bind(&LLVivoxVoiceClient::onClickVoiceEffect, voice_clientp, "NoVoiceMorphing")); - // World menu view_listener_t::addMenu(new LLWorldAlwaysRun(), "World.AlwaysRun"); view_listener_t::addMenu(new LLWorldCreateLandmark(), "World.CreateLandmark"); diff --git a/indra/newview/llviewerregion.cpp b/indra/newview/llviewerregion.cpp index a71746f3d3..4c6c76bdd8 100755 --- a/indra/newview/llviewerregion.cpp +++ b/indra/newview/llviewerregion.cpp @@ -3233,6 +3233,7 @@ void LLViewerRegionImpl::buildCapabilityNames(LLSD& capabilityNames) capabilityNames.append("ParcelVoiceInfoRequest"); capabilityNames.append("ProductInfoRequest"); capabilityNames.append("ProvisionVoiceAccountRequest"); + capabilityNames.append("VoiceSignalingRequest"); capabilityNames.append("ReadOfflineMsgs"); // Requires to respond reliably: AcceptFriendship, AcceptGroupInvite, DeclineFriendship, DeclineGroupInvite capabilityNames.append("RegionObjects"); capabilityNames.append("RemoteParcelRequest"); diff --git a/indra/newview/llviewerregion.h b/indra/newview/llviewerregion.h index b88d68fdf8..d598406952 100644 --- a/indra/newview/llviewerregion.h +++ b/indra/newview/llviewerregion.h @@ -43,6 +43,7 @@ #include "m4math.h" // LLMatrix4 #include "llframetimer.h" #include "llreflectionmap.h" +#include "llpointer.h" // Surface id's #define LAND 1 diff --git a/indra/newview/llviewertexture.cpp b/indra/newview/llviewertexture.cpp index 9aee1c0caf..03dca1e5f3 100644 --- a/indra/newview/llviewertexture.cpp +++ b/indra/newview/llviewertexture.cpp @@ -834,7 +834,7 @@ S32 LLViewerTexture::getTotalNumFaces() const S32 LLViewerTexture::getNumFaces(U32 ch) const { llassert(ch < LLRender::NUM_TEXTURE_CHANNELS); - return mNumFaces[ch]; + return ch < LLRender::NUM_TEXTURE_CHANNELS ? mNumFaces[ch] : 0; } diff --git a/indra/newview/llvoavatar.cpp b/indra/newview/llvoavatar.cpp index d7382eaac6..26adcf59cd 100644 --- a/indra/newview/llvoavatar.cpp +++ b/indra/newview/llvoavatar.cpp @@ -614,7 +614,8 @@ BOOL LLVOAvatar::sShowAnimationDebug = FALSE; BOOL LLVOAvatar::sVisibleInFirstPerson = FALSE; F32 LLVOAvatar::sLODFactor = 1.f; F32 LLVOAvatar::sPhysicsLODFactor = 1.f; -BOOL LLVOAvatar::sJointDebug = FALSE; +BOOL LLVOAvatar::sJointDebug = FALSE; +BOOL LLVOAvatar::sLipSyncEnabled = FALSE; F32 LLVOAvatar::sUnbakedTime = 0.f; F32 LLVOAvatar::sUnbakedUpdateTime = 0.f; F32 LLVOAvatar::sGreyTime = 0.f; @@ -1155,6 +1156,7 @@ void LLVOAvatar::initClass() LLControlAvatar::sRegionChangedSlot = gAgent.addRegionChangedCallback(&LLControlAvatar::onRegionChanged); sCloudTexture = LLViewerTextureManager::getFetchedTextureFromFile("cloud-particle.j2c"); + gSavedSettings.getControl("LipSyncEnabled")->getSignal()->connect(boost::bind(&LLVOAvatar::handleVOAvatarPrefsChanged, _2)); } @@ -1162,6 +1164,12 @@ void LLVOAvatar::cleanupClass() { } +bool LLVOAvatar::handleVOAvatarPrefsChanged(const LLSD &newvalue) +{ + sLipSyncEnabled = gSavedSettings.getBOOL("LipSyncEnabled"); + return true; +} + // virtual void LLVOAvatar::initInstance() { @@ -2763,7 +2771,7 @@ void LLVOAvatar::idleUpdateVoiceVisualizer(bool voice_enabled, const LLVector3 & // Notice the calls to "gAwayTimer.reset()". This resets the timer that determines how long the avatar has been // "away", so that the avatar doesn't lapse into away-mode (and slump over) while the user is still talking. //----------------------------------------------------------------------------------------------------------------- - if (LLVoiceClient::getInstance()->getIsSpeaking( mID )) + if (LLVoiceClient::getInstance()->getIsSpeaking( mID ) && (!isInMuteList() || isSelf())) { if (!mVoiceVisualizer->getCurrentlySpeaking()) { @@ -3057,7 +3065,7 @@ void LLVOAvatar::idleUpdateLipSync(bool voice_enabled) // Use the Lipsync_Ooh and Lipsync_Aah morphs for lip sync if ( voice_enabled && mLastRezzedStatus > 0 // no point updating lip-sync for clouds - && (LLVoiceClient::getInstance()->lipSyncEnabled()) + && sLipSyncEnabled && LLVoiceClient::getInstance()->getIsSpeaking( mID ) ) { F32 ooh_morph_amount = 0.0f; @@ -9341,8 +9349,8 @@ void LLVOAvatar::parseAppearanceMessage(LLMessageSystem* mesgsys, LLAppearanceMe } else { - LL_DEBUGS("Avatar") << "AvatarAppearance msg received without any parameters, object: " << getID() << LL_ENDL; - } + LL_DEBUGS("Avatar") << "AvatarAppearance msg received without any parameters, object: " << getID() << LL_ENDL; + } LLVisualParam* appearance_version_param = getVisualParam(11000); if (appearance_version_param) diff --git a/indra/newview/llvoavatar.h b/indra/newview/llvoavatar.h index c14784cc6d..1ccefa9c1e 100644 --- a/indra/newview/llvoavatar.h +++ b/indra/newview/llvoavatar.h @@ -109,6 +109,7 @@ public: virtual void initInstance(); // Called after construction to initialize the class. protected: virtual ~LLVOAvatar(); + static bool handleVOAvatarPrefsChanged(const LLSD &newvalue); /** Initialization ** ** @@ -366,6 +367,7 @@ public: static F32 sLODFactor; // user-settable LOD factor static F32 sPhysicsLODFactor; // user-settable physics LOD factor static BOOL sJointDebug; // output total number of joints being touched for each avatar + static BOOL sLipSyncEnabled; static LLPointer<LLViewerTexture> sCloudTexture; diff --git a/indra/newview/llvocache.cpp b/indra/newview/llvocache.cpp index bfac68b68f..a65e49a724 100644 --- a/indra/newview/llvocache.cpp +++ b/indra/newview/llvocache.cpp @@ -529,6 +529,16 @@ F32 LLVOCacheEntry::getSquaredPixelThreshold(bool is_front) bool LLVOCacheEntry::isAnyVisible(const LLVector4a& camera_origin, const LLVector4a& local_camera_origin, F32 dist_threshold) { +#if 0 + // this is ill-conceived and should be removed pending QA + // In the name of saving memory, we evict objects that are still within view distance from memory + // This results in constant paging of objects in and out of memory, leading to poor performance + // and many unacceptable visual glitches when rotating the camera + + // Honestly, the entire VOCache partition system needs to be removed since it doubles the overhead of + // the spatial partition system and is redundant to the object cache, but this is a start + // - davep 2024.06.07 + LLOcclusionCullingGroup* group = (LLOcclusionCullingGroup*)getGroup(); if(!group) { @@ -565,6 +575,9 @@ bool LLVOCacheEntry::isAnyVisible(const LLVector4a& camera_origin, const LLVecto } return vis; +#else + return true; +#endif } void LLVOCacheEntry::calcSceneContribution(const LLVector4a& camera_origin, bool needs_update, U32 last_update, F32 max_dist) diff --git a/indra/newview/llvoicechannel.cpp b/indra/newview/llvoicechannel.cpp index 7ca73a24bf..7f578171af 100644 --- a/indra/newview/llvoicechannel.cpp +++ b/indra/newview/llvoicechannel.cpp @@ -39,7 +39,6 @@ #include "llcorehttputil.h" LLVoiceChannel::voice_channel_map_t LLVoiceChannel::sVoiceChannelMap; -LLVoiceChannel::voice_channel_map_uri_t LLVoiceChannel::sVoiceChannelURIMap; LLVoiceChannel* LLVoiceChannel::sCurrentVoiceChannel = NULL; LLVoiceChannel* LLVoiceChannel::sSuspendedVoiceChannel = NULL; LLVoiceChannel::channel_changed_signal_t LLVoiceChannel::sCurrentVoiceChannelChangedSignal; @@ -82,36 +81,22 @@ LLVoiceChannel::~LLVoiceChannel() { sCurrentVoiceChannel = NULL; // Must check instance exists here, the singleton MAY have already been destroyed. - if(LLVoiceClient::instanceExists()) - { - LLVoiceClient::getInstance()->removeObserver(this); - } + LLVoiceClient::removeObserver(this); } sVoiceChannelMap.erase(mSessionID); - sVoiceChannelURIMap.erase(mURI); } -void LLVoiceChannel::setChannelInfo( - const std::string& uri, - const std::string& credentials) +void LLVoiceChannel::setChannelInfo(const LLSD &channelInfo) { - setURI(uri); - - mCredentials = credentials; + mChannelInfo = channelInfo; if (mState == STATE_NO_CHANNEL_INFO) { - if (mURI.empty()) + if (mChannelInfo.isUndefined() || !mChannelInfo.isMap() || mChannelInfo.size() == 0) { LLNotificationsUtil::add("VoiceChannelJoinFailed", mNotifyArgs); - LL_WARNS("Voice") << "Received empty URI for channel " << mSessionName << LL_ENDL; - deactivate(); - } - else if (mCredentials.empty()) - { - LLNotificationsUtil::add("VoiceChannelJoinFailed", mNotifyArgs); - LL_WARNS("Voice") << "Received empty credentials for channel " << mSessionName << LL_ENDL; + LL_WARNS("Voice") << "Received empty channel info for channel " << mSessionName << LL_ENDL; deactivate(); } else @@ -130,9 +115,21 @@ void LLVoiceChannel::setChannelInfo( } } -void LLVoiceChannel::onChange(EStatusType type, const std::string &channelURI, bool proximal) +void LLVoiceChannel::resetChannelInfo() +{ + mChannelInfo = LLSD(); + mState = STATE_NO_CHANNEL_INFO; +} + +void LLVoiceChannel::onChange(EStatusType type, const LLSD& channelInfo, bool proximal) { - if (channelURI != mURI) + LL_DEBUGS("Voice") << "Incoming channel info: " << channelInfo << LL_ENDL; + LL_DEBUGS("Voice") << "Current channel info: " << mChannelInfo << LL_ENDL; + if (mChannelInfo.isUndefined() || (mChannelInfo.isMap() && mChannelInfo.size() == 0)) + { + mChannelInfo = channelInfo; + } + if (!LLVoiceClient::getInstance()->compareChannels(mChannelInfo, channelInfo)) { return; } @@ -158,13 +155,16 @@ void LLVoiceChannel::handleStatusChange(EStatusType type) case STATUS_LOGGED_IN: break; case STATUS_LEFT_CHANNEL: - if (callStarted() && !mIgnoreNextSessionLeave && !sSuspended) + if (callStarted() && !sSuspended) { // if forceably removed from channel // update the UI and revert to default channel + // deactivate will set the State to STATE_HUNG_UP + // so when handleStatusChange is called again during + // shutdown callStarted will return false and deactivate + // won't be called again. deactivate(); } - mIgnoreNextSessionLeave = FALSE; break; case STATUS_JOINING: if (callStarted()) @@ -190,13 +190,13 @@ void LLVoiceChannel::handleError(EStatusType type) setState(STATE_ERROR); } -BOOL LLVoiceChannel::isActive() +bool LLVoiceChannel::isActive() const { // only considered active when currently bound channel matches what our channel - return callStarted() && LLVoiceClient::getInstance()->getCurrentChannel() == mURI; + return callStarted() && LLVoiceClient::getInstance()->isCurrentChannel(mChannelInfo); } -BOOL LLVoiceChannel::callStarted() +bool LLVoiceChannel::callStarted() const { return mState >= STATE_CALL_STARTED; } @@ -222,7 +222,7 @@ void LLVoiceChannel::deactivate() LLVoiceClient::getInstance()->setUserPTTState(false); } } - LLVoiceClient::getInstance()->removeObserver(this); + LLVoiceClient::removeObserver(this); if (sCurrentVoiceChannel == this) { @@ -246,10 +246,8 @@ void LLVoiceChannel::activate() // activating the proximal channel between IM calls LLVoiceChannel* old_channel = sCurrentVoiceChannel; sCurrentVoiceChannel = this; - mCallDialogPayload["old_channel_name"] = ""; if (old_channel) { - mCallDialogPayload["old_channel_name"] = old_channel->getSessionName(); old_channel->deactivate(); } } @@ -257,20 +255,20 @@ void LLVoiceChannel::activate() if (mState == STATE_NO_CHANNEL_INFO) { // responsible for setting status to active - getChannelInfo(); + requestChannelInfo(); } else { setState(STATE_CALL_STARTED); } - LLVoiceClient::getInstance()->addObserver(this); + LLVoiceClient::addObserver(this); //do not send earlier, channel should be initialized, should not be in STATE_NO_CHANNEL_INFO state sCurrentVoiceChannelChangedSignal(this->mSessionID); } -void LLVoiceChannel::getChannelInfo() +void LLVoiceChannel::requestChannelInfo() { // pretend we have everything we need if (sCurrentVoiceChannel == this) @@ -293,20 +291,6 @@ LLVoiceChannel* LLVoiceChannel::getChannelByID(const LLUUID& session_id) } } -//static -LLVoiceChannel* LLVoiceChannel::getChannelByURI(std::string uri) -{ - voice_channel_map_uri_t::iterator found_it = sVoiceChannelURIMap.find(uri); - if (found_it == sVoiceChannelURIMap.end()) - { - return NULL; - } - else - { - return found_it->second; - } -} - LLVoiceChannel* LLVoiceChannel::getCurrentVoiceChannel() { return sCurrentVoiceChannel; @@ -319,13 +303,6 @@ void LLVoiceChannel::updateSessionID(const LLUUID& new_session_id) sVoiceChannelMap.insert(std::make_pair(mSessionID, this)); } -void LLVoiceChannel::setURI(std::string uri) -{ - sVoiceChannelURIMap.erase(mURI); - mURI = uri; - sVoiceChannelURIMap.insert(std::make_pair(mURI, this)); -} - void LLVoiceChannel::setState(EState state) { switch(state) @@ -410,8 +387,11 @@ boost::signals2::connection LLVoiceChannel::setCurrentVoiceChannelChangedCallbac // LLVoiceChannelGroup // -LLVoiceChannelGroup::LLVoiceChannelGroup(const LLUUID& session_id, const std::string& session_name) : - LLVoiceChannel(session_id, session_name) +LLVoiceChannelGroup::LLVoiceChannelGroup(const LLUUID &session_id, + const std::string &session_name, + bool is_p2p) : + LLVoiceChannel(session_id, session_name), + mIsP2P(is_p2p) { mRetries = DEFAULT_RETRIES_COUNT; mIsRetrying = FALSE; @@ -424,7 +404,15 @@ void LLVoiceChannelGroup::deactivate() LLVoiceClient::getInstance()->leaveNonSpatialChannel(); } LLVoiceChannel::deactivate(); -} + + if (mIsP2P) + { + // void the channel info for p2p adhoc channels + // so we request it again, hence throwing up the + // connect dialogue on the other side. + setState(STATE_NO_CHANNEL_INFO); + } + } void LLVoiceChannelGroup::activate() { @@ -435,43 +423,46 @@ void LLVoiceChannelGroup::activate() if (callStarted()) { // we have the channel info, just need to use it now - LLVoiceClient::getInstance()->setNonSpatialChannel( - mURI, - mCredentials); + LLVoiceClient::getInstance()->setNonSpatialChannel(mChannelInfo, + mIsP2P && (mCallDirection == OUTGOING_CALL), + mIsP2P); - if (!gAgent.isInGroup(mSessionID)) // ad-hoc channel + if (mIsP2P) + { + LLIMModel::addSpeakersToRecent(mSessionID); + } + else { - LLIMModel::LLIMSession* session = LLIMModel::getInstance()->findIMSession(mSessionID); - // Adding ad-hoc call participants to Recent People List. - // If it's an outgoing ad-hoc, we can use mInitialTargetIDs that holds IDs of people we - // called(both online and offline) as source to get people for recent (STORM-210). - if (session->isOutgoingAdHoc()) + if (!gAgent.isInGroup(mSessionID)) // ad-hoc channel { - for (uuid_vec_t::iterator it = session->mInitialTargetIDs.begin(); - it!=session->mInitialTargetIDs.end();++it) + LLIMModel::LLIMSession *session = LLIMModel::getInstance()->findIMSession(mSessionID); + // Adding ad-hoc call participants to Recent People List. + // If it's an outgoing ad-hoc, we can use mInitialTargetIDs that holds IDs of people we + // called(both online and offline) as source to get people for recent (STORM-210). + if (session && session->isOutgoingAdHoc()) { - const LLUUID id = *it; - LLRecentPeople::instance().add(id); + for (uuid_vec_t::iterator it = session->mInitialTargetIDs.begin(); it != session->mInitialTargetIDs.end(); ++it) + { + const LLUUID id = *it; + LLRecentPeople::instance().add(id); + } + } + // If this ad-hoc is incoming then trying to get ids of people from mInitialTargetIDs + // would lead to EXT-8246. So in this case we get them from speakers list. + else + { + LLIMModel::addSpeakersToRecent(mSessionID); } - } - // If this ad-hoc is incoming then trying to get ids of people from mInitialTargetIDs - // would lead to EXT-8246. So in this case we get them from speakers list. - else - { - LLIMModel::addSpeakersToRecent(mSessionID); } } - //Mic default state is OFF on initiating/joining Ad-Hoc/Group calls - if (LLVoiceClient::getInstance()->getUserPTTState() && LLVoiceClient::getInstance()->getPTTIsToggle()) - { - LLVoiceClient::getInstance()->inputUserControlState(true); - } + // Mic default state is OFF on initiating/joining Ad-Hoc/Group calls. It's on for P2P using the AdHoc infra. + LLVoiceClient::getInstance()->setUserPTTState(mIsP2P); } } -void LLVoiceChannelGroup::getChannelInfo() +void LLVoiceChannelGroup::requestChannelInfo() { LLViewerRegion* region = gAgent.getRegion(); if (region) @@ -483,17 +474,13 @@ void LLVoiceChannelGroup::getChannelInfo() } } -void LLVoiceChannelGroup::setChannelInfo( - const std::string& uri, - const std::string& credentials) +void LLVoiceChannelGroup::setChannelInfo(const LLSD& channelInfo) { - setURI(uri); - - mCredentials = credentials; + mChannelInfo = channelInfo; if (mState == STATE_NO_CHANNEL_INFO) { - if(!mURI.empty() && !mCredentials.empty()) + if(mChannelInfo.isDefined() && mChannelInfo.isMap()) { setState(STATE_READY); @@ -516,9 +503,9 @@ void LLVoiceChannelGroup::setChannelInfo( else if ( mIsRetrying ) { // we have the channel info, just need to use it now - LLVoiceClient::getInstance()->setNonSpatialChannel( - mURI, - mCredentials); + LLVoiceClient::getInstance()->setNonSpatialChannel(channelInfo, + mCallDirection == OUTGOING_CALL, + mIsP2P); } } @@ -556,7 +543,7 @@ void LLVoiceChannelGroup::handleError(EStatusType status) mIsRetrying = TRUE; mIgnoreNextSessionLeave = TRUE; - getChannelInfo(); + requestChannelInfo(); return; } else @@ -612,6 +599,16 @@ void LLVoiceChannelGroup::voiceCallCapCoro(std::string url) LLSD postData; postData["method"] = "call"; postData["session-id"] = mSessionID; + LLSD altParams; + std::string preferred_voice_server_type = gSavedSettings.getString("VoiceServerType"); + if (preferred_voice_server_type.empty()) + { + // default to the server type associated with the region we're on. + LLVoiceVersionInfo versionInfo = LLVoiceClient::getInstance()->getVersion(); + preferred_voice_server_type = versionInfo.internalVoiceServerType; + } + altParams["preferred_voice_server_type"] = preferred_voice_server_type; + postData["alt_params"] = altParams; LL_INFOS("Voice", "voiceCallCapCoro") << "Generic POST for " << url << LL_ENDL; @@ -651,14 +648,12 @@ void LLVoiceChannelGroup::voiceCallCapCoro(std::string url) LLSD::map_const_iterator iter; for (iter = result.beginMap(); iter != result.endMap(); ++iter) { - LL_DEBUGS("Voice") << "LLVoiceCallCapResponder::result got " + LL_DEBUGS("Voice") << "LLVoiceChannelGroup::voiceCallCapCoro got " << iter->first << LL_ENDL; } + LL_INFOS("Voice") << "LLVoiceChannelGroup::voiceCallCapCoro got " << result << LL_ENDL; - channelp->setChannelInfo( - result["voice_credentials"]["channel_uri"].asString(), - result["voice_credentials"]["channel_credentials"].asString()); - + channelp->setChannelInfo(result["voice_credentials"]); } @@ -670,7 +665,7 @@ LLVoiceChannelProximal::LLVoiceChannelProximal() : { } -BOOL LLVoiceChannelProximal::isActive() +bool LLVoiceChannelProximal::isActive() const { return callStarted() && LLVoiceClient::getInstance()->inProximalChannel(); } @@ -684,11 +679,12 @@ void LLVoiceChannelProximal::activate() // we're connected to a non-spatial channel, so disconnect. LLVoiceClient::getInstance()->leaveNonSpatialChannel(); } - LLVoiceChannel::activate(); + LLVoiceClient::getInstance()->activateSpatialChannel(true); + LLVoiceChannel::activate(); } -void LLVoiceChannelProximal::onChange(EStatusType type, const std::string &channelURI, bool proximal) +void LLVoiceChannelProximal::onChange(EStatusType type, const LLSD& channelInfo, bool proximal) { if (!proximal) { @@ -749,7 +745,7 @@ void LLVoiceChannelProximal::handleError(EStatusType status) LLNotificationsUtil::add(notify, mNotifyArgs); } - LLVoiceChannel::handleError(status); + // proximal voice remains up and the provider will try to reconnect. } void LLVoiceChannelProximal::deactivate() @@ -758,19 +754,24 @@ void LLVoiceChannelProximal::deactivate() { setState(STATE_HUNG_UP); } + LLVoiceClient::removeObserver(this); + LLVoiceClient::getInstance()->activateSpatialChannel(false); } // // LLVoiceChannelP2P // -LLVoiceChannelP2P::LLVoiceChannelP2P(const LLUUID& session_id, const std::string& session_name, const LLUUID& other_user_id) : - LLVoiceChannelGroup(session_id, session_name), - mOtherUserID(other_user_id), - mReceivedCall(FALSE) +LLVoiceChannelP2P::LLVoiceChannelP2P(const LLUUID &session_id, + const std::string &session_name, + const LLUUID &other_user_id, + LLVoiceP2POutgoingCallInterface* outgoing_call_interface) : + LLVoiceChannelGroup(session_id, session_name, true), + mOtherUserID(other_user_id), + mReceivedCall(FALSE), + mOutgoingCallInterface(outgoing_call_interface) { - // make sure URI reflects encoded version of other user's agent id - setURI(LLVoiceClient::getInstance()->sipURIFromID(other_user_id)); + mChannelInfo = LLVoiceClient::getInstance()->getP2PChannelInfoTemplate(other_user_id); } void LLVoiceChannelP2P::handleStatusChange(EStatusType type) @@ -837,23 +838,23 @@ void LLVoiceChannelP2P::activate() if (callStarted()) { // no session handle yet, we're starting the call - if (mSessionHandle.empty()) + if (mIncomingCallInterface == nullptr) { mReceivedCall = FALSE; - LLVoiceClient::getInstance()->callUser(mOtherUserID); + mOutgoingCallInterface->callUser(mOtherUserID); } // otherwise answering the call else { - if (!LLVoiceClient::getInstance()->answerInvite(mSessionHandle)) + if (!mIncomingCallInterface->answerInvite()) { mCallEndedByAgent = false; - mSessionHandle.clear(); + mIncomingCallInterface.reset(); handleError(ERROR_UNKNOWN); return; } - // using the session handle invalidates it. Clear it out here so we can't reuse it by accident. - mSessionHandle.clear(); + // using the incoming call interface invalidates it. Clear it out here so we can't reuse it by accident. + mIncomingCallInterface.reset(); } // Add the party to the list of people with which we've recently interacted. @@ -867,7 +868,17 @@ void LLVoiceChannelP2P::activate() } } -void LLVoiceChannelP2P::getChannelInfo() +void LLVoiceChannelP2P::deactivate() +{ + if (callStarted()) + { + mOutgoingCallInterface->hangup(); + } + LLVoiceChannel::deactivate(); +} + + +void LLVoiceChannelP2P::requestChannelInfo() { // pretend we have everything we need, since P2P doesn't use channel info if (sCurrentVoiceChannel == this) @@ -877,8 +888,9 @@ void LLVoiceChannelP2P::getChannelInfo() } // receiving session from other user who initiated call -void LLVoiceChannelP2P::setSessionHandle(const std::string& handle, const std::string &inURI) +void LLVoiceChannelP2P::setChannelInfo(const LLSD& channel_info) { + mChannelInfo = channel_info; BOOL needs_activate = FALSE; if (callStarted()) { @@ -893,34 +905,28 @@ void LLVoiceChannelP2P::setSessionHandle(const std::string& handle, const std::s { // we are active and have priority, invite the other user again // under the assumption they will join this new session - mSessionHandle.clear(); - LLVoiceClient::getInstance()->callUser(mOtherUserID); + mOutgoingCallInterface->callUser(mOtherUserID); return; } } - mSessionHandle = handle; - - // The URI of a p2p session should always be the other end's SIP URI. - if(!inURI.empty()) - { - setURI(inURI); - } - else + mReceivedCall = TRUE; + if (channel_info.isDefined() && channel_info.isMap()) { - LL_WARNS("Voice") << "incoming SIP URL is not provided. Channel may not work properly." << LL_ENDL; - // See LLVoiceClient::sessionAddedEvent() - setURI(LLVoiceClient::getInstance()->sipURIFromID(mOtherUserID)); + mIncomingCallInterface = LLVoiceClient::getInstance()->getIncomingCallInterface(channel_info); } - - mReceivedCall = TRUE; - if (needs_activate) { activate(); } } +void LLVoiceChannelP2P::resetChannelInfo() +{ + mChannelInfo = LLVoiceClient::getInstance()->getP2PChannelInfoTemplate(mOtherUserID); + mState = STATE_NO_CHANNEL_INFO; // we have template, not full info +} + void LLVoiceChannelP2P::setState(EState state) { LL_INFOS("Voice") << "P2P CALL STATE CHANGE: incoming=" << int(mReceivedCall) << " oldstate=" << mState << " newstate=" << state << LL_ENDL; @@ -932,7 +938,7 @@ void LLVoiceChannelP2P::setState(EState state) if (mReceivedCall && state == STATE_RINGING) { //TODO: remove or redirect this call status notification -// LLCallInfoDialog::show("answering", mNotifyArgs); + // LLCallInfoDialog::show("answering", mNotifyArgs); doSetState(state); return; } diff --git a/indra/newview/llvoicechannel.h b/indra/newview/llvoicechannel.h index 28651b73ec..95fd247ca6 100644 --- a/indra/newview/llvoicechannel.h +++ b/indra/newview/llvoicechannel.h @@ -66,18 +66,17 @@ public: LLVoiceChannel(const LLUUID& session_id, const std::string& session_name); virtual ~LLVoiceChannel(); - /*virtual*/ void onChange(EStatusType status, const std::string &channelURI, bool proximal); + virtual void onChange(EStatusType status, const LLSD& channelInfo, bool proximal); virtual void handleStatusChange(EStatusType status); virtual void handleError(EStatusType status); virtual void deactivate(); virtual void activate(); - virtual void setChannelInfo( - const std::string& uri, - const std::string& credentials); - virtual void getChannelInfo(); - virtual BOOL isActive(); - virtual BOOL callStarted(); + virtual void setChannelInfo(const LLSD& channelInfo); + virtual void resetChannelInfo(); + virtual void requestChannelInfo(); + virtual bool isActive() const; + virtual bool callStarted() const; // Session name is a UI label used for feedback about which person, // group, or phone number you are talking to @@ -95,8 +94,9 @@ public: void setCallDirection(EDirection direction) {mCallDirection = direction;} EDirection getCallDirection() {return mCallDirection;} + bool isThisVoiceChannel(const LLSD &voiceChannelInfo) { return LLVoiceClient::getInstance()->compareChannels(mChannelInfo, voiceChannelInfo); } + static LLVoiceChannel* getChannelByID(const LLUUID& session_id); - static LLVoiceChannel* getChannelByURI(std::string uri); static LLVoiceChannel* getCurrentVoiceChannel(); static void initClass(); @@ -104,35 +104,30 @@ public: static void suspend(); static void resume(); -protected: + protected: virtual void setState(EState state); /** * Use this method if you want mStateChangedCallback to be executed while state is changed */ void doSetState(const EState& state); - void setURI(std::string uri); // there can be two directions INCOMING and OUTGOING EDirection mCallDirection; - std::string mURI; - std::string mCredentials; LLUUID mSessionID; EState mState; std::string mSessionName; - LLSD mNotifyArgs; - LLSD mCallDialogPayload; + LLSD mNotifyArgs; + LLSD mChannelInfo; // true if call was ended by agent bool mCallEndedByAgent; - BOOL mIgnoreNextSessionLeave; + bool mIgnoreNextSessionLeave; LLHandle<LLPanel> mLoginNotificationHandle; typedef std::map<LLUUID, LLVoiceChannel*> voice_channel_map_t; static voice_channel_map_t sVoiceChannelMap; - typedef std::map<std::string, LLVoiceChannel*> voice_channel_map_uri_t; - static voice_channel_map_uri_t sVoiceChannelURIMap; - + static LLVoiceChannel* sProximalVoiceChannel; static LLVoiceChannel* sCurrentVoiceChannel; static LLVoiceChannel* sSuspendedVoiceChannel; static BOOL sSuspended; @@ -144,55 +139,61 @@ private: class LLVoiceChannelGroup : public LLVoiceChannel { public: - LLVoiceChannelGroup(const LLUUID& session_id, const std::string& session_name); + LLVoiceChannelGroup(const LLUUID& session_id, + const std::string& session_name, + bool is_p2p); + + void handleStatusChange(EStatusType status) override; + void handleError(EStatusType status) override; + void activate() override; + void deactivate() override; + void setChannelInfo(const LLSD &channelInfo) override; + void requestChannelInfo() override; - /*virtual*/ void handleStatusChange(EStatusType status); - /*virtual*/ void handleError(EStatusType status); - /*virtual*/ void activate(); - /*virtual*/ void deactivate(); - /*vritual*/ void setChannelInfo( - const std::string& uri, - const std::string& credentials); - /*virtual*/ void getChannelInfo(); + bool isP2P() { return mIsP2P; } protected: - virtual void setState(EState state); + void setState(EState state) override; private: void voiceCallCapCoro(std::string url); U32 mRetries; BOOL mIsRetrying; + bool mIsP2P; }; class LLVoiceChannelProximal : public LLVoiceChannel, public LLSingleton<LLVoiceChannelProximal> { - LLSINGLETON(LLVoiceChannelProximal); -public: - - /*virtual*/ void onChange(EStatusType status, const std::string &channelURI, bool proximal) override; - /*virtual*/ void handleStatusChange(EStatusType status) override; - /*virtual*/ void handleError(EStatusType status) override; - /*virtual*/ BOOL isActive() override; - /*virtual*/ void activate() override; - /*virtual*/ void deactivate() override; - + LLSINGLETON_C11(LLVoiceChannelProximal); + public: + + void onChange(EStatusType status, const LLSD &channelInfo, bool proximal) override; + void handleStatusChange(EStatusType status) override; + void handleError(EStatusType status) override; + bool isActive() const override; + void activate() override; + void deactivate() override; }; class LLVoiceChannelP2P : public LLVoiceChannelGroup { -public: - LLVoiceChannelP2P(const LLUUID& session_id, const std::string& session_name, const LLUUID& other_user_id); - - /*virtual*/ void handleStatusChange(EStatusType status) override; - /*virtual*/ void handleError(EStatusType status) override; - /*virtual*/ void activate() override; - /*virtual*/ void getChannelInfo() override; - - void setSessionHandle(const std::string& handle, const std::string &inURI); - -protected: - virtual void setState(EState state) override; + public: + LLVoiceChannelP2P(const LLUUID &session_id, + const std::string &session_name, + const LLUUID &other_user_id, + LLVoiceP2POutgoingCallInterface * outgoing_call_interface); + + void handleStatusChange(EStatusType status) override; + void handleError(EStatusType status) override; + void activate() override; + void requestChannelInfo() override; + void deactivate() override; + void setChannelInfo(const LLSD& channel_info) override; + void resetChannelInfo() override; + + protected: + void setState(EState state) override; private: @@ -201,10 +202,10 @@ private: * **/ void addToTheRecentPeopleList(); - - std::string mSessionHandle; LLUUID mOtherUserID; BOOL mReceivedCall; + LLVoiceP2POutgoingCallInterface *mOutgoingCallInterface; + LLVoiceP2PIncomingCallInterfacePtr mIncomingCallInterface; }; #endif // LL_VOICECHANNEL_H diff --git a/indra/newview/llvoiceclient.cpp b/indra/newview/llvoiceclient.cpp index 0d882cf49b..a914c4e6c9 100644 --- a/indra/newview/llvoiceclient.cpp +++ b/indra/newview/llvoiceclient.cpp @@ -24,19 +24,22 @@ * $/LicenseInfo$ */ -#include "llviewerprecompiledheaders.h" #include "llvoiceclient.h" -#include "llviewercontrol.h" -#include "llviewerwindow.h" #include "llvoicevivox.h" +#if !__FreeBSD__ +#include "llvoicewebrtc.h" +#endif #include "llviewernetwork.h" +#include "llviewercontrol.h" #include "llcommandhandler.h" +#include "lldir.h" #include "llhttpnode.h" #include "llnotificationsutil.h" #include "llsdserialize.h" #include "llui.h" #include "llkeyboard.h" #include "llagent.h" +#include "lltrans.h" #include "lluiusage.h" const F32 LLVoiceClient::OVERDRIVEN_POWER_LEVEL = 0.7f; @@ -113,16 +116,35 @@ std::string LLVoiceClientStatusObserver::status2string(LLVoiceClientStatusObserv return result; } - +LLVoiceModuleInterface *getVoiceModule(const std::string &voice_server_type) +{ + if (voice_server_type == VIVOX_VOICE_SERVER_TYPE || voice_server_type.empty()) + { + return (LLVoiceModuleInterface *) LLVivoxVoiceClient::getInstance(); + } +#if !__FreeBSD__ + else if (voice_server_type == WEBRTC_VOICE_SERVER_TYPE) + { + return (LLVoiceModuleInterface *) LLWebRTCVoiceClient::getInstance(); + } +#endif + else + { + LLNotificationsUtil::add("VoiceVersionMismatch"); + return nullptr; + } +} /////////////////////////////////////////////////////////////////////////////////////////////// LLVoiceClient::LLVoiceClient(LLPumpIO *pump) : - mVoiceModule(NULL), + mSpatialVoiceModule(NULL), + mNonSpatialVoiceModule(NULL), m_servicePump(NULL), mVoiceEffectEnabled(LLCachedControl<bool>(gSavedSettings, "VoiceMorphingEnabled", true)), mVoiceEffectDefault(LLCachedControl<std::string>(gSavedPerAccountSettings, "VoiceEffectDefault", "00000000-0000-0000-0000-000000000000")), + mVoiceEffectSupportNotified(false), mPTTDirty(true), mPTT(true), mUsePTT(true), @@ -133,7 +155,6 @@ LLVoiceClient::LLVoiceClient(LLPumpIO *pump) mMuteMic(false), mDisableMic(false) { - updateSettings(); init(pump); } @@ -142,46 +163,142 @@ LLVoiceClient::LLVoiceClient(LLPumpIO *pump) LLVoiceClient::~LLVoiceClient() { - llassert(!mVoiceModule); } void LLVoiceClient::init(LLPumpIO *pump) { // Initialize all of the voice modules m_servicePump = pump; +#if !__FreeBSD__ + LLWebRTCVoiceClient::getInstance()->init(pump); +#endif + LLVivoxVoiceClient::getInstance()->init(pump); } void LLVoiceClient::userAuthorized(const std::string& user_id, const LLUUID &agentID) { - // In the future, we should change this to allow voice module registration - // with a table lookup of sorts. - std::string voice_server = gSavedSettings.getString("VoiceServerType"); - LL_DEBUGS("Voice") << "voice server type " << voice_server << LL_ENDL; - if(voice_server == "vivox") + gAgent.addRegionChangedCallback(boost::bind(&LLVoiceClient::onRegionChanged, this)); +#if !__FreeBSD__ + LLWebRTCVoiceClient::getInstance()->userAuthorized(user_id, agentID); +#endif + LLVivoxVoiceClient::getInstance()->userAuthorized(user_id, agentID); +} + +void LLVoiceClient::handleSimulatorFeaturesReceived(const LLSD &simulatorFeatures) +{ + std::string voiceServerType = simulatorFeatures["VoiceServerType"].asString(); + if (voiceServerType.empty()) { - mVoiceModule = (LLVoiceModuleInterface *)LLVivoxVoiceClient::getInstance(); + voiceServerType = VIVOX_VOICE_SERVER_TYPE; } - else + + if (mSpatialVoiceModule && !mNonSpatialVoiceModule) + { + // stop processing if we're going to change voice modules + // and we're not currently in non-spatial. + LLVoiceVersionInfo version = mSpatialVoiceModule->getVersion(); + if (version.internalVoiceServerType != voiceServerType) + { + mSpatialVoiceModule->processChannels(false); + } + } + setSpatialVoiceModule(simulatorFeatures["VoiceServerType"].asString()); + + // if we should be in spatial voice, switch to it and set the creds + if (mSpatialVoiceModule && !mNonSpatialVoiceModule) + { + if (!mSpatialCredentials.isUndefined()) + { + mSpatialVoiceModule->setSpatialChannel(mSpatialCredentials); + } + mSpatialVoiceModule->processChannels(true); + } +} + +static void simulator_features_received_callback(const LLUUID& region_id) +{ + LLViewerRegion *region = gAgent.getRegion(); + if (region && (region->getRegionID() == region_id)) + { + LLSD simulatorFeatures; + region->getSimulatorFeatures(simulatorFeatures); + if (LLVoiceClient::getInstance()) + { + LLVoiceClient::getInstance()->handleSimulatorFeaturesReceived(simulatorFeatures); + } + } +} + +void LLVoiceClient::onRegionChanged() +{ + LLViewerRegion *region = gAgent.getRegion(); + if (region && region->simulatorFeaturesReceived()) + { + LLSD simulatorFeatures; + region->getSimulatorFeatures(simulatorFeatures); + if (LLVoiceClient::getInstance()) + { + LLVoiceClient::getInstance()->handleSimulatorFeaturesReceived(simulatorFeatures); + } + } + else if (region) + { + if (mSimulatorFeaturesReceivedSlot.connected()) + { + mSimulatorFeaturesReceivedSlot.disconnect(); + } + mSimulatorFeaturesReceivedSlot = + region->setSimulatorFeaturesReceivedCallback(boost::bind(&simulator_features_received_callback, _1)); + } +} + +void LLVoiceClient::setSpatialVoiceModule(const std::string &voice_server_type) +{ + LLVoiceModuleInterface *module = getVoiceModule(voice_server_type); + if (!module) { - mVoiceModule = NULL; return; } - mVoiceModule->init(m_servicePump); - mVoiceModule->userAuthorized(user_id, agentID); + if (module != mSpatialVoiceModule) + { + if (inProximalChannel()) + { + mSpatialVoiceModule->processChannels(false); + } + module->processChannels(true); + mSpatialVoiceModule = module; + mSpatialVoiceModule->updateSettings(); + } } -void LLVoiceClient::setHidden(bool hidden) +void LLVoiceClient::setNonSpatialVoiceModule(const std::string &voice_server_type) { - if (mVoiceModule) + mNonSpatialVoiceModule = getVoiceModule(voice_server_type); + if (!mNonSpatialVoiceModule) { - mVoiceModule->setHidden(hidden); + // we don't have a non-spatial voice module, + // so revert to spatial. + if (mSpatialVoiceModule) + { + mSpatialVoiceModule->processChannels(true); + } + return; } + mNonSpatialVoiceModule->updateSettings(); +} + +void LLVoiceClient::setHidden(bool hidden) +{ +#if !__FreeBSD__ + LLWebRTCVoiceClient::getInstance()->setHidden(hidden); +#endif + LLVivoxVoiceClient::getInstance()->setHidden(hidden); } void LLVoiceClient::terminate() { - if (mVoiceModule) mVoiceModule->terminate(); - mVoiceModule = NULL; + if (mSpatialVoiceModule) mSpatialVoiceModule->terminate(); + mSpatialVoiceModule = NULL; m_servicePump = NULL; // Shutdown speaker volume storage before LLSingletonBase::deleteAll() does it @@ -193,15 +310,15 @@ void LLVoiceClient::terminate() const LLVoiceVersionInfo LLVoiceClient::getVersion() { - if (mVoiceModule) + if (mSpatialVoiceModule) { - return mVoiceModule->getVersion(); + return mSpatialVoiceModule->getVersion(); } else { LLVoiceVersionInfo result; result.serverVersion = std::string(); - result.serverType = std::string(); + result.voiceServerType = std::string(); result.mBuildVersion = std::string(); return result; } @@ -215,10 +332,10 @@ void LLVoiceClient::updateSettings() updateMicMuteLogic(); - if (mVoiceModule) - { - mVoiceModule->updateSettings(); - } +#if !__FreeBSD__ + LLWebRTCVoiceClient::getInstance()->updateSettings(); +#endif + LLVivoxVoiceClient::getInstance()->updateSettings(); } //-------------------------------------------------- @@ -226,193 +343,156 @@ void LLVoiceClient::updateSettings() void LLVoiceClient::tuningStart() { - if (mVoiceModule) mVoiceModule->tuningStart(); +#if !__FreeBSD__ + LLWebRTCVoiceClient::getInstance()->tuningStart(); +#endif + LLVivoxVoiceClient::getInstance()->tuningStart(); } void LLVoiceClient::tuningStop() { - if (mVoiceModule) mVoiceModule->tuningStop(); +#if !__FreeBSD__ + LLWebRTCVoiceClient::getInstance()->tuningStop(); +#endif + LLVivoxVoiceClient::getInstance()->tuningStop(); } bool LLVoiceClient::inTuningMode() { - if (mVoiceModule) - { - return mVoiceModule->inTuningMode(); - } - else - { - return false; - } +#if __FreeBSD__ + return LLVivoxVoiceClient::getInstance()->inTuningMode(); +#else + return LLWebRTCVoiceClient::getInstance()->inTuningMode(); +#endif } void LLVoiceClient::tuningSetMicVolume(float volume) { - if (mVoiceModule) mVoiceModule->tuningSetMicVolume(volume); +#if __FreeBSD__ + LLVivoxVoiceClient::getInstance()->tuningSetMicVolume(volume); +#else + LLWebRTCVoiceClient::getInstance()->tuningSetMicVolume(volume); +#endif } void LLVoiceClient::tuningSetSpeakerVolume(float volume) { - if (mVoiceModule) mVoiceModule->tuningSetSpeakerVolume(volume); +#if __FreeBSD__ + LLVivoxVoiceClient::getInstance()->tuningSetSpeakerVolume(volume); +#else + LLWebRTCVoiceClient::getInstance()->tuningSetSpeakerVolume(volume); +#endif } float LLVoiceClient::tuningGetEnergy(void) { - if (mVoiceModule) - { - return mVoiceModule->tuningGetEnergy(); - } - else - { - return 0.0; - } +#if __FreeBSD__ + return LLVivoxVoiceClient::getInstance()->tuningGetEnergy(); +#else + return LLWebRTCVoiceClient::getInstance()->tuningGetEnergy(); +#endif } - //------------------------------------------------ // devices bool LLVoiceClient::deviceSettingsAvailable() { - if (mVoiceModule) - { - return mVoiceModule->deviceSettingsAvailable(); - } - else - { - return false; - } +#if __FreeBSD__ + return LLVivoxVoiceClient::getInstance()->deviceSettingsAvailable(); +#else + return LLWebRTCVoiceClient::getInstance()->deviceSettingsAvailable(); +#endif } bool LLVoiceClient::deviceSettingsUpdated() { - if (mVoiceModule) - { - return mVoiceModule->deviceSettingsUpdated(); - } - else - { - return false; - } +#if __FreeBSD__ + return LLVivoxVoiceClient::getInstance()->deviceSettingsUpdated(); +#else + return LLWebRTCVoiceClient::getInstance()->deviceSettingsUpdated(); +#endif } void LLVoiceClient::refreshDeviceLists(bool clearCurrentList) { - if (mVoiceModule) mVoiceModule->refreshDeviceLists(clearCurrentList); +#if __FreeBSD__ + LLVivoxVoiceClient::getInstance()->refreshDeviceLists(clearCurrentList); +#else + LLWebRTCVoiceClient::getInstance()->refreshDeviceLists(clearCurrentList); +#endif } void LLVoiceClient::setCaptureDevice(const std::string& name) { - if (mVoiceModule) mVoiceModule->setCaptureDevice(name); - + LLVivoxVoiceClient::getInstance()->setCaptureDevice(name); +#if !__FreeBSD__ + LLWebRTCVoiceClient::getInstance()->setCaptureDevice(name); +#endif } void LLVoiceClient::setRenderDevice(const std::string& name) { - if (mVoiceModule) mVoiceModule->setRenderDevice(name); + LLVivoxVoiceClient::getInstance()->setRenderDevice(name); +#if !__FreeBSD__ + LLWebRTCVoiceClient::getInstance()->setRenderDevice(name); +#endif } const LLVoiceDeviceList& LLVoiceClient::getCaptureDevices() { - static LLVoiceDeviceList nullCaptureDevices; - if (mVoiceModule) - { - return mVoiceModule->getCaptureDevices(); - } - else - { - return nullCaptureDevices; - } +#if __FreeBSD__ + return LLVivoxVoiceClient::getInstance()->getCaptureDevices(); +#else + return LLWebRTCVoiceClient::getInstance()->getCaptureDevices(); +#endif } const LLVoiceDeviceList& LLVoiceClient::getRenderDevices() { - static LLVoiceDeviceList nullRenderDevices; - if (mVoiceModule) - { - return mVoiceModule->getRenderDevices(); - } - else - { - return nullRenderDevices; - } +#if __FreeBSD__ + return LLVivoxVoiceClient::getInstance()->getRenderDevices(); +#else + return LLWebRTCVoiceClient::getInstance()->getRenderDevices(); +#endif } //-------------------------------------------------- // participants -void LLVoiceClient::getParticipantList(std::set<LLUUID> &participants) +void LLVoiceClient::getParticipantList(std::set<LLUUID> &participants) const { - if (mVoiceModule) - { - mVoiceModule->getParticipantList(participants); - } - else - { - participants = std::set<LLUUID>(); - } +#if !__FreeBSD__ + LLWebRTCVoiceClient::getInstance()->getParticipantList(participants); +#endif + LLVivoxVoiceClient::getInstance()->getParticipantList(participants); } -bool LLVoiceClient::isParticipant(const LLUUID &speaker_id) +bool LLVoiceClient::isParticipant(const LLUUID &speaker_id) const { - if(mVoiceModule) - { - return mVoiceModule->isParticipant(speaker_id); - } - return false; + return +#if !__FreeBSD__ + LLWebRTCVoiceClient::getInstance()->isParticipant(speaker_id) || +#endif + LLVivoxVoiceClient::getInstance()->isParticipant(speaker_id); } //-------------------------------------------------- // text chat - BOOL LLVoiceClient::isSessionTextIMPossible(const LLUUID& id) { - if (mVoiceModule) - { - return mVoiceModule->isSessionTextIMPossible(id); - } - else - { - return FALSE; - } + // all sessions can do TextIM, as we no longer support PSTN + return TRUE; } BOOL LLVoiceClient::isSessionCallBackPossible(const LLUUID& id) { - if (mVoiceModule) - { - return mVoiceModule->isSessionCallBackPossible(id); - } - else - { - return FALSE; - } -} - -/* obsolete -BOOL LLVoiceClient::sendTextMessage(const LLUUID& participant_id, const std::string& message) -{ - if (mVoiceModule) - { - return mVoiceModule->sendTextMessage(participant_id, message); - } - else - { - return FALSE; - } -} -*/ - -void LLVoiceClient::endUserIMSession(const LLUUID& participant_id) -{ - if (mVoiceModule) - { - // mVoiceModule->endUserIMSession(participant_id); // A SLim leftover - } + // we don't support PSTN calls anymore. (did we ever?) + return TRUE; } //---------------------------------------------- @@ -420,9 +500,9 @@ void LLVoiceClient::endUserIMSession(const LLUUID& participant_id) bool LLVoiceClient::inProximalChannel() { - if (mVoiceModule) + if (mSpatialVoiceModule) { - return mVoiceModule->inProximalChannel(); + return mSpatialVoiceModule->inProximalChannel(); } else { @@ -431,128 +511,178 @@ bool LLVoiceClient::inProximalChannel() } void LLVoiceClient::setNonSpatialChannel( - const std::string &uri, - const std::string &credentials) + const LLSD& channelInfo, + bool notify_on_first_join, + bool hangup_on_last_leave) { - if (mVoiceModule) + setNonSpatialVoiceModule(channelInfo["voice_server_type"].asString()); + if (mSpatialVoiceModule && mSpatialVoiceModule != mNonSpatialVoiceModule) { - mVoiceModule->setNonSpatialChannel(uri, credentials); + mSpatialVoiceModule->processChannels(false); + } + if (mNonSpatialVoiceModule) + { + mNonSpatialVoiceModule->processChannels(true); + mNonSpatialVoiceModule->setNonSpatialChannel(channelInfo, notify_on_first_join, hangup_on_last_leave); } } -void LLVoiceClient::setSpatialChannel( - const std::string &uri, - const std::string &credentials) +void LLVoiceClient::setSpatialChannel(const LLSD &channelInfo) { - if (mVoiceModule) + mSpatialCredentials = channelInfo; + LLViewerRegion *region = gAgent.getRegion(); + if (region && region->simulatorFeaturesReceived()) { - mVoiceModule->setSpatialChannel(uri, credentials); + LLSD simulatorFeatures; + region->getSimulatorFeatures(simulatorFeatures); + setSpatialVoiceModule(simulatorFeatures["VoiceServerType"].asString()); + } + else + { + return; } -} -void LLVoiceClient::leaveNonSpatialChannel() -{ - if (mVoiceModule) + if (mSpatialVoiceModule) { - mVoiceModule->leaveNonSpatialChannel(); + mSpatialVoiceModule->setSpatialChannel(channelInfo); } } -void LLVoiceClient::leaveChannel(void) +void LLVoiceClient::leaveNonSpatialChannel() { - if (mVoiceModule) + if (mNonSpatialVoiceModule) { - mVoiceModule->leaveChannel(); + mNonSpatialVoiceModule->leaveNonSpatialChannel(); + mNonSpatialVoiceModule->processChannels(false); + mNonSpatialVoiceModule = nullptr; } } -std::string LLVoiceClient::getCurrentChannel() +void LLVoiceClient::activateSpatialChannel(bool activate) { - if (mVoiceModule) + if (mSpatialVoiceModule) { - return mVoiceModule->getCurrentChannel(); - } - else - { - return std::string(); + mSpatialVoiceModule->processChannels(activate); } } +bool LLVoiceClient::isCurrentChannel(const LLSD& channelInfo) +{ + return +#if !__FreeBSD__ + LLWebRTCVoiceClient::getInstance()->isCurrentChannel(channelInfo) || +#endif + LLVivoxVoiceClient::getInstance()->isCurrentChannel(channelInfo); +} -//--------------------------------------- -// invitations - -void LLVoiceClient::callUser(const LLUUID &uuid) +bool LLVoiceClient::compareChannels(const LLSD &channelInfo1, const LLSD &channelInfo2) { - if (mVoiceModule) mVoiceModule->callUser(uuid); + return +#if !__FreeBSD__ + LLWebRTCVoiceClient::getInstance()->compareChannels(channelInfo1, channelInfo2) || +#endif + LLVivoxVoiceClient::getInstance()->compareChannels(channelInfo1, channelInfo2); } -bool LLVoiceClient::isValidChannel(std::string &session_handle) +LLVoiceP2PIncomingCallInterfacePtr LLVoiceClient::getIncomingCallInterface(const LLSD& voice_call_info) { - if (mVoiceModule) + LLVoiceModuleInterface *module = getVoiceModule(voice_call_info["voice_server_type"]); + if (module) { - return mVoiceModule->isValidChannel(session_handle); - } - else - { - return false; + return module->getIncomingCallInterface(voice_call_info); } + return nullptr; + } -bool LLVoiceClient::answerInvite(std::string &channelHandle) +//--------------------------------------- +// outgoing calls +LLVoiceP2POutgoingCallInterface *LLVoiceClient::getOutgoingCallInterface(const LLSD& voiceChannelInfo) { - if (mVoiceModule) + std::string voice_server_type = gSavedSettings.getString("VoiceServerType"); + if (voice_server_type.empty()) { - return mVoiceModule->answerInvite(channelHandle); + // default to the server type associated with the region we're on. + LLVoiceVersionInfo versionInfo = LLVoiceClient::getInstance()->getVersion(); + voice_server_type = versionInfo.internalVoiceServerType; } - else + if (voiceChannelInfo.has("voice_server_type") && voiceChannelInfo["voice_server_type"] != voice_server_type) { - return false; + // there's a mismatch between what the peer is offering and what our server + // can handle, so downgrade to vivox + voice_server_type = VIVOX_VOICE_SERVER_TYPE; } + LLVoiceModuleInterface *module = getVoiceModule(voice_server_type); + return dynamic_cast<LLVoiceP2POutgoingCallInterface *>(module); } -void LLVoiceClient::declineInvite(std::string &channelHandle) -{ - if (mVoiceModule) mVoiceModule->declineInvite(channelHandle); -} - - //------------------------------------------ // Volume/gain void LLVoiceClient::setVoiceVolume(F32 volume) { - if (mVoiceModule) mVoiceModule->setVoiceVolume(volume); +#if !__FreeBSD__ + LLWebRTCVoiceClient::getInstance()->setVoiceVolume(volume); +#endif + LLVivoxVoiceClient::getInstance()->setVoiceVolume(volume); } -void LLVoiceClient::setMicGain(F32 volume) +void LLVoiceClient::setMicGain(F32 gain) { - if (mVoiceModule) mVoiceModule->setMicGain(volume); +#if !__FreeBSD__ + LLWebRTCVoiceClient::getInstance()->setMicGain(gain); +#endif + LLVivoxVoiceClient::getInstance()->setMicGain(gain); } //------------------------------------------ // enable/disable voice features -bool LLVoiceClient::voiceEnabled() +// static +bool LLVoiceClient::onVoiceEffectsNotSupported(const LLSD ¬ification, const LLSD &response) { - if (mVoiceModule) + S32 option = LLNotificationsUtil::getSelectedOption(notification, response); + switch (option) { - return mVoiceModule->voiceEnabled(); + case 0: // "Okay" + gSavedPerAccountSettings.setString("VoiceEffectDefault", LLUUID::null.asString()); + break; + + case 1: // "Cancel" + break; + + default: + llassert(0); + break; } - else + return false; +} + +bool LLVoiceClient::voiceEnabled() +{ + static LLCachedControl<bool> enable_voice_chat(gSavedSettings, "EnableVoiceChat"); + static LLCachedControl<bool> cmd_line_disable_voice(gSavedSettings, "CmdLineDisableVoice"); + bool enabled = enable_voice_chat && !cmd_line_disable_voice && !gNonInteractive; + if (enabled && !mVoiceEffectSupportNotified && getVoiceEffectEnabled() && !getVoiceEffectDefault().isNull()) { - return false; + static const LLSD args = llsd::map( + "FAQ_URL", LLTrans::getString("no_voice_morphing_faq_url") + ); + + LLNotificationsUtil::add("VoiceEffectsNotSupported", args, LLSD(), &LLVoiceClient::onVoiceEffectsNotSupported); + mVoiceEffectSupportNotified = true; } + return enabled; } void LLVoiceClient::setVoiceEnabled(bool enabled) { - if (mVoiceModule) - { - mVoiceModule->setVoiceEnabled(enabled); - } +#if !__FreeBSD__ + LLWebRTCVoiceClient::getInstance()->setVoiceEnabled(enabled); +#endif + LLVivoxVoiceClient::getInstance()->setVoiceEnabled(enabled); } void LLVoiceClient::updateMicMuteLogic() @@ -571,34 +701,22 @@ void LLVoiceClient::updateMicMuteLogic() // Either of these always overrides any other PTT setting. new_mic_mute = true; } - - if (mVoiceModule) mVoiceModule->setMuteMic(new_mic_mute); -} - -void LLVoiceClient::setLipSyncEnabled(BOOL enabled) -{ - if (mVoiceModule) mVoiceModule->setLipSyncEnabled(enabled); +#if !__FreeBSD__ + LLWebRTCVoiceClient::getInstance()->setMuteMic(new_mic_mute); +#endif + LLVivoxVoiceClient::getInstance()->setMuteMic(new_mic_mute); } -BOOL LLVoiceClient::lipSyncEnabled() +void LLVoiceClient::setMuteMic(bool muted) { - if (mVoiceModule) - { - return mVoiceModule->lipSyncEnabled(); - } - else + if (mMuteMic != muted) { - return false; + mMuteMic = muted; + updateMicMuteLogic(); + mMicroChangedSignal(); } } -void LLVoiceClient::setMuteMic(bool muted) -{ - mMuteMic = muted; - updateMicMuteLogic(); - mMicroChangedSignal(); -} - // ---------------------------------------------- // PTT @@ -673,119 +791,97 @@ void LLVoiceClient::toggleUserPTTState(void) //------------------------------------------- // nearby speaker accessors -BOOL LLVoiceClient::getVoiceEnabled(const LLUUID& id) +bool LLVoiceClient::getVoiceEnabled(const LLUUID& id) const { - if (mVoiceModule) - { - return mVoiceModule->getVoiceEnabled(id); - } - else - { - return FALSE; - } + return isParticipant(id); } -std::string LLVoiceClient::getDisplayName(const LLUUID& id) +std::string LLVoiceClient::getDisplayName(const LLUUID& id) const { - if (mVoiceModule) - { - return mVoiceModule->getDisplayName(id); - } - else +#if __FreeBSD__ + return LLVivoxVoiceClient::getInstance()->getDisplayName(id); +#else + std::string result = LLWebRTCVoiceClient::getInstance()->getDisplayName(id); + if (result.empty()) { - return std::string(); + result = LLVivoxVoiceClient::getInstance()->getDisplayName(id); } + return result; +#endif } bool LLVoiceClient::isVoiceWorking() const { - if (mVoiceModule) - { - return mVoiceModule->isVoiceWorking(); - } - return false; +#if __FreeBSD__ + return LLVivoxVoiceClient::getInstance()->isVoiceWorking(); +#else + return LLVivoxVoiceClient::getInstance()->isVoiceWorking() || + LLWebRTCVoiceClient::getInstance()->isVoiceWorking(); +#endif } BOOL LLVoiceClient::isParticipantAvatar(const LLUUID& id) { - if (mVoiceModule) - { - return mVoiceModule->isParticipantAvatar(id); - } - else - { - return FALSE; - } + return TRUE; } BOOL LLVoiceClient::isOnlineSIP(const LLUUID& id) { - return FALSE; + return FALSE; } BOOL LLVoiceClient::getIsSpeaking(const LLUUID& id) { - if (mVoiceModule) - { - return mVoiceModule->getIsSpeaking(id); - } - else - { - return FALSE; - } + return +#if !__FreeBSD__ + LLWebRTCVoiceClient::getInstance()->getIsSpeaking(id) || +#endif + LLVivoxVoiceClient::getInstance()->getIsSpeaking(id); } BOOL LLVoiceClient::getIsModeratorMuted(const LLUUID& id) { - if (mVoiceModule) - { - return mVoiceModule->getIsModeratorMuted(id); - } - else - { - return FALSE; - } + // don't bother worrying about p2p calls, as + // p2p calls don't have mute. + return +#if !__FreeBSD__ + LLWebRTCVoiceClient::getInstance()->getIsModeratorMuted(id) || +#endif + LLVivoxVoiceClient::getInstance()->getIsModeratorMuted(id); } F32 LLVoiceClient::getCurrentPower(const LLUUID& id) { - if (mVoiceModule) - { - return mVoiceModule->getCurrentPower(id); - } - else - { - return 0.0; - } +#if __FreeBSD__ + return LLVivoxVoiceClient::getInstance()->getCurrentPower(id); +#else + return std::fmax(LLVivoxVoiceClient::getInstance()->getCurrentPower(id), + LLWebRTCVoiceClient::getInstance()->getCurrentPower(id)); +#endif } BOOL LLVoiceClient::getOnMuteList(const LLUUID& id) { - if (mVoiceModule) - { - return mVoiceModule->getOnMuteList(id); - } - else - { - return FALSE; - } + // don't bother worrying about p2p calls, as + // p2p calls don't have mute. + return LLMuteList::getInstance()->isMuted(id, LLMute::flagVoiceChat); } F32 LLVoiceClient::getUserVolume(const LLUUID& id) { - if (mVoiceModule) - { - return mVoiceModule->getUserVolume(id); - } - else - { - return 0.0; - } +#if __FreeBSD__ + return LLVivoxVoiceClient::getInstance()->getUserVolume(id); +#else + return std::fmax(LLVivoxVoiceClient::getInstance()->getUserVolume(id), LLWebRTCVoiceClient::getInstance()->getUserVolume(id)); +#endif } void LLVoiceClient::setUserVolume(const LLUUID& id, F32 volume) { - if (mVoiceModule) mVoiceModule->setUserVolume(id, volume); +#if !__FreeBSD__ + LLWebRTCVoiceClient::getInstance()->setUserVolume(id, volume); +#endif + LLVivoxVoiceClient::getInstance()->setUserVolume(id, volume); } //-------------------------------------------------- @@ -793,48 +889,79 @@ void LLVoiceClient::setUserVolume(const LLUUID& id, F32 volume) void LLVoiceClient::addObserver(LLVoiceClientStatusObserver* observer) { - if (mVoiceModule) mVoiceModule->addObserver(observer); + LLVivoxVoiceClient::getInstance()->addObserver(observer); +#if !__FreeBSD__ + LLWebRTCVoiceClient::getInstance()->addObserver(observer); +#endif } void LLVoiceClient::removeObserver(LLVoiceClientStatusObserver* observer) { - if (mVoiceModule) + if (LLVivoxVoiceClient::instanceExists()) + { + LLVivoxVoiceClient::getInstance()->removeObserver(observer); + } +#if !__FreeBSD__ + if (LLWebRTCVoiceClient::instanceExists()) { - mVoiceModule->removeObserver(observer); + LLWebRTCVoiceClient::getInstance()->removeObserver(observer); } +#endif } void LLVoiceClient::addObserver(LLFriendObserver* observer) { - if (mVoiceModule) mVoiceModule->addObserver(observer); + LLVivoxVoiceClient::getInstance()->addObserver(observer); +#if !__FreeBSD__ + LLWebRTCVoiceClient::getInstance()->addObserver(observer); +#endif } void LLVoiceClient::removeObserver(LLFriendObserver* observer) { - if (mVoiceModule) + if (LLVivoxVoiceClient::instanceExists()) { - mVoiceModule->removeObserver(observer); + LLVivoxVoiceClient::getInstance()->removeObserver(observer); } +#if !__FreeBSD__ + if (LLWebRTCVoiceClient::instanceExists()) + { + LLWebRTCVoiceClient::getInstance()->removeObserver(observer); + } +#endif } void LLVoiceClient::addObserver(LLVoiceClientParticipantObserver* observer) { - if (mVoiceModule) mVoiceModule->addObserver(observer); + LLVivoxVoiceClient::getInstance()->addObserver(observer); +#if !__FreeBSD__ + LLWebRTCVoiceClient::getInstance()->addObserver(observer); +#endif } void LLVoiceClient::removeObserver(LLVoiceClientParticipantObserver* observer) { - if (mVoiceModule) + if (LLVivoxVoiceClient::instanceExists()) + { + LLVivoxVoiceClient::getInstance()->removeObserver(observer); + } +#if !__FreeBSD__ + if (LLWebRTCVoiceClient::instanceExists()) { - mVoiceModule->removeObserver(observer); + LLWebRTCVoiceClient::getInstance()->removeObserver(observer); } +#endif } -std::string LLVoiceClient::sipURIFromID(const LLUUID &id) +std::string LLVoiceClient::sipURIFromID(const LLUUID &id) const { - if (mVoiceModule) + if (mNonSpatialVoiceModule) { - return mVoiceModule->sipURIFromID(id); + return mNonSpatialVoiceModule->sipURIFromID(id); + } + else if (mSpatialVoiceModule) + { + return mSpatialVoiceModule->sipURIFromID(id); } else { @@ -842,9 +969,25 @@ std::string LLVoiceClient::sipURIFromID(const LLUUID &id) } } +LLSD LLVoiceClient::getP2PChannelInfoTemplate(const LLUUID& id) const +{ + if (mNonSpatialVoiceModule) + { + return mNonSpatialVoiceModule->getP2PChannelInfoTemplate(id); + } + else if (mSpatialVoiceModule) + { + return mSpatialVoiceModule->getP2PChannelInfoTemplate(id); + } + else + { + return LLSD(); + } +} + LLVoiceEffectInterface* LLVoiceClient::getVoiceEffectInterface() const { - return getVoiceEffectEnabled() ? dynamic_cast<LLVoiceEffectInterface*>(mVoiceModule) : NULL; + return NULL; } /////////////////// @@ -852,31 +995,54 @@ LLVoiceEffectInterface* LLVoiceClient::getVoiceEffectInterface() const class LLViewerRequiredVoiceVersion : public LLHTTPNode { - static BOOL sAlertedUser; + static bool sAlertedUser; virtual void post( LLHTTPNode::ResponsePtr response, const LLSD& context, const LLSD& input) const { - //You received this messsage (most likely on region cross or - //teleport) - if ( input.has("body") && input["body"].has("major_version") ) + std::string voice_server_type = "vivox"; + if (input.has("body") && input["body"].has("voice_server_type")) + { + voice_server_type = input["body"]["voice_server_type"].asString(); + } + + LLVoiceModuleInterface *voiceModule = NULL; + + if (voice_server_type == "vivox" || voice_server_type.empty()) { - int major_voice_version = - input["body"]["major_version"].asInteger(); - // int minor_voice_version = - // input["body"]["minor_version"].asInteger(); - LLVoiceVersionInfo versionInfo = LLVoiceClient::getInstance()->getVersion(); + voiceModule = (LLVoiceModuleInterface *) LLVivoxVoiceClient::getInstance(); + } +#if !__FreeBSD__ + else if (voice_server_type == "webrtc") + { + voiceModule = (LLVoiceModuleInterface *) LLWebRTCVoiceClient::getInstance(); + } +#endif + else + { + LL_WARNS("Voice") << "Unknown voice server type " << voice_server_type << LL_ENDL; + if (!sAlertedUser) + { + // sAlertedUser = true; + LLNotificationsUtil::add("VoiceVersionMismatch"); + } + return; + } - if (major_voice_version > 1) + LLVoiceVersionInfo versionInfo = voiceModule->getVersion(); + if (input.has("body") && input["body"].has("major_version") && + input["body"]["major_version"].asInteger() > versionInfo.majorVersion) + { + if (!sAlertedUser) { - if (!sAlertedUser) - { - //sAlertedUser = TRUE; - LLNotificationsUtil::add("VoiceVersionMismatch"); - gSavedSettings.setBOOL("EnableVoiceChat", FALSE); // toggles listener - } + // sAlertedUser = true; + LLNotificationsUtil::add("VoiceVersionMismatch"); + LL_WARNS("Voice") << "Voice server version mismatch " << input["body"]["major_version"].asInteger() << "/" + << versionInfo.majorVersion + << LL_ENDL; } + return; } } }; @@ -910,21 +1076,7 @@ class LLViewerParcelVoiceInfo : public LLHTTPNode //we believe we're in if ( body.has("voice_credentials") ) { - LLSD voice_credentials = body["voice_credentials"]; - std::string uri; - std::string credentials; - - if ( voice_credentials.has("channel_uri") ) - { - uri = voice_credentials["channel_uri"].asString(); - } - if ( voice_credentials.has("channel_credentials") ) - { - credentials = - voice_credentials["channel_credentials"].asString(); - } - - LLVoiceClient::getInstance()->setSpatialChannel(uri, credentials); + LLVoiceClient::getInstance()->setSpatialChannel(body["voice_credentials"]); } } } @@ -1087,7 +1239,7 @@ void LLSpeakerVolumeStorage::save() } } -BOOL LLViewerRequiredVoiceVersion::sAlertedUser = FALSE; +bool LLViewerRequiredVoiceVersion::sAlertedUser = false; LLHTTPRegistration<LLViewerParcelVoiceInfo> gHTTPRegistrationMessageParcelVoiceInfo( diff --git a/indra/newview/llvoiceclient.h b/indra/newview/llvoiceclient.h index e0ca739034..19dd34f5e0 100644 --- a/indra/newview/llvoiceclient.h +++ b/indra/newview/llvoiceclient.h @@ -34,9 +34,11 @@ class LLVOAvatar; #include "lliosocket.h" #include "v3math.h" #include "llframetimer.h" +#include "llsingleton.h" #include "llcallingcard.h" // for LLFriendObserver #include "llsecapi.h" #include "llcontrol.h" +#include <boost/shared_ptr.hpp> // devices @@ -86,19 +88,54 @@ public: } EStatusType; virtual ~LLVoiceClientStatusObserver() { } - virtual void onChange(EStatusType status, const std::string &channelURI, bool proximal) = 0; + virtual void onChange(EStatusType status, const LLSD& channelInfo, bool proximal) = 0; static std::string status2string(EStatusType inStatus); }; struct LLVoiceVersionInfo { - std::string serverType; + std::string voiceServerType; + std::string internalVoiceServerType; + int majorVersion; + int minorVersion; std::string serverVersion; std::string mBuildVersion; }; ////////////////////////////////// +/// @class LLVoiceP2POutgoingCallInterface +/// @brief Outgoing call interface +/// +/// For providers that support P2P signaling (vivox) +///////////////////////////////// + +class LLVoiceP2POutgoingCallInterface +{ + public: + // initiate an outgoing call to a user + virtual void callUser(const LLUUID &agentID) = 0; + virtual void hangup() = 0; +}; + +////////////////////////////////// +/// @class LLVoiceP2PIncomingCallInterface +/// @brief Incoming call interface +/// +/// For providers that support P2P signaling (vivox) +///////////////////////////////// +class LLVoiceP2PIncomingCallInterface +{ + public: + virtual ~LLVoiceP2PIncomingCallInterface() {} + + virtual bool answerInvite() = 0; + virtual void declineInvite() = 0; +}; + +typedef std::shared_ptr<LLVoiceP2PIncomingCallInterface> LLVoiceP2PIncomingCallInterfacePtr; + +////////////////////////////////// /// @class LLVoiceModuleInterface /// @brief Voice module interface /// @@ -122,6 +159,8 @@ public: virtual const LLVoiceVersionInfo& getVersion()=0; + + ///////////////////// /// @name Tuning //@{ @@ -165,30 +204,32 @@ public: // Note that gestures should only fire if this returns true. virtual bool inProximalChannel()=0; - virtual void setNonSpatialChannel(const std::string &uri, - const std::string &credentials)=0; + virtual void setNonSpatialChannel(const LLSD& channelInfo, + bool notify_on_first_join, + bool hangup_on_last_leave)=0; - virtual bool setSpatialChannel(const std::string &uri, - const std::string &credentials)=0; + virtual bool setSpatialChannel(const LLSD& channelInfo)=0; - virtual void leaveNonSpatialChannel()=0; + virtual void leaveNonSpatialChannel() = 0; + virtual void processChannels(bool process) = 0; - virtual void leaveChannel(void)=0; + virtual bool isCurrentChannel(const LLSD &channelInfo) = 0; + virtual bool compareChannels(const LLSD &channelInfo1, const LLSD &channelInfo2) = 0; - // Returns the URI of the current channel, or an empty string if not currently in a channel. - // NOTE that it will return an empty string if it's in the process of joining a channel. - virtual std::string getCurrentChannel()=0; //@} ////////////////////////// - /// @name invitations + /// @name p2p //@{ - // start a voice channel with the specified user - virtual void callUser(const LLUUID &uuid)=0; - virtual bool isValidChannel(std::string& channelHandle)=0; - virtual bool answerInvite(std::string &channelHandle)=0; - virtual void declineInvite(std::string &channelHandle)=0; + + // initiate a call with a peer using the P2P interface, which only applies to some + // voice server types. Otherwise, a group call should be used for P2P + virtual LLVoiceP2POutgoingCallInterface* getOutgoingCallInterface() = 0; + + // an incoming call was received, and the incoming call dialogue is asking for an interface to + // answer or decline. + virtual LLVoiceP2PIncomingCallInterfacePtr getIncomingCallInterface(const LLSD &voice_call_info) = 0; //@} ///////////////////////// @@ -201,23 +242,18 @@ public: ///////////////////////// /// @name enable disable voice and features //@{ - virtual bool voiceEnabled()=0; virtual void setVoiceEnabled(bool enabled)=0; - virtual void setLipSyncEnabled(BOOL enabled)=0; - virtual BOOL lipSyncEnabled()=0; virtual void setMuteMic(bool muted)=0; // Set the mute state of the local mic. //@} ////////////////////////// /// @name nearby speaker accessors //@{ - virtual BOOL getVoiceEnabled(const LLUUID& id)=0; // true if we've received data for this avatar virtual std::string getDisplayName(const LLUUID& id)=0; virtual BOOL isParticipantAvatar(const LLUUID &id)=0; virtual BOOL getIsSpeaking(const LLUUID& id)=0; virtual BOOL getIsModeratorMuted(const LLUUID& id)=0; virtual F32 getCurrentPower(const LLUUID& id)=0; // "power" is related to "amplitude" in a defined way. I'm just not sure what the formula is... - virtual BOOL getOnMuteList(const LLUUID& id)=0; virtual F32 getUserVolume(const LLUUID& id)=0; virtual void setUserVolume(const LLUUID& id, F32 volume)=0; // set's volume for specified agent, from 0-1 (where .5 is nominal) //@} @@ -228,7 +264,6 @@ public: virtual BOOL isSessionTextIMPossible(const LLUUID& id)=0; virtual BOOL isSessionCallBackPossible(const LLUUID& id)=0; //virtual BOOL sendTextMessage(const LLUUID& participant_id, const std::string& message)=0; - virtual void endUserIMSession(const LLUUID &uuid)=0; //@} // authorize the user @@ -246,7 +281,8 @@ public: virtual void removeObserver(LLVoiceClientParticipantObserver* observer)=0; //@} - virtual std::string sipURIFromID(const LLUUID &id)=0; + virtual std::string sipURIFromID(const LLUUID &id) const=0; + virtual LLSD getP2PChannelInfoTemplate(const LLUUID& id) const=0; //@} }; @@ -369,24 +405,26 @@ public: // returns true iff the user is currently in a proximal (local spatial) channel. // Note that gestures should only fire if this returns true. bool inProximalChannel(); - void setNonSpatialChannel( - const std::string &uri, - const std::string &credentials); - void setSpatialChannel( - const std::string &uri, - const std::string &credentials); + + void setNonSpatialChannel(const LLSD& channelInfo, + bool notify_on_first_join, + bool hangup_on_last_leave); + + void setSpatialChannel(const LLSD &channelInfo); + + void activateSpatialChannel(bool activate); + void leaveNonSpatialChannel(); - // Returns the URI of the current channel, or an empty string if not currently in a channel. - // NOTE that it will return an empty string if it's in the process of joining a channel. - std::string getCurrentChannel(); - // start a voice channel with the specified user - void callUser(const LLUUID &uuid); - bool isValidChannel(std::string& channelHandle); - bool answerInvite(std::string &channelHandle); - void declineInvite(std::string &channelHandle); - void leaveChannel(void); // call this on logout or teleport begin + bool isCurrentChannel(const LLSD& channelInfo); + + bool compareChannels(const LLSD& channelInfo1, const LLSD& channelInfo2); + + // initiate a call with a peer using the P2P interface, which only applies to some + // voice server types. Otherwise, a group call should be used for P2P + LLVoiceP2POutgoingCallInterface* getOutgoingCallInterface(const LLSD& voiceChannelInfo = LLSD()); + LLVoiceP2PIncomingCallInterfacePtr getIncomingCallInterface(const LLSD &voiceCallInfo); ///////////////////////////// // Sending updates of current state @@ -396,7 +434,6 @@ public: void setMicGain(F32 volume); void setUserVolume(const LLUUID& id, F32 volume); // set's volume for specified agent, from 0-1 (where .5 is nominal) bool voiceEnabled(); - void setLipSyncEnabled(BOOL enabled); void setMuteMic(bool muted); // Use this to mute the local mic (for when the client is minimized, etc), ignoring user PTT state. void setUserPTTState(bool ptt); bool getUserPTTState(); @@ -410,15 +447,13 @@ public: void updateMicMuteLogic(); - BOOL lipSyncEnabled(); - boost::signals2::connection MicroChangedCallback(const micro_changed_signal_t::slot_type& cb ) { return mMicroChangedSignal.connect(cb); } ///////////////////////////// // Accessors for data related to nearby speakers - BOOL getVoiceEnabled(const LLUUID& id); // true if we've received data for this avatar - std::string getDisplayName(const LLUUID& id); + bool getVoiceEnabled(const LLUUID& id) const; // true if we've received data for this avatar + std::string getDisplayName(const LLUUID& id) const; BOOL isOnlineSIP(const LLUUID &id); BOOL isParticipantAvatar(const LLUUID &id); BOOL getIsSpeaking(const LLUUID& id); @@ -428,10 +463,8 @@ public: F32 getUserVolume(const LLUUID& id); ///////////////////////////// - BOOL getAreaVoiceDisabled(); // returns true if the area the avatar is in is speech-disabled. - // Use this to determine whether to show a "no speech" icon in the menu bar. - void getParticipantList(std::set<LLUUID> &participants); - bool isParticipant(const LLUUID& speaker_id); + void getParticipantList(std::set<LLUUID> &participants) const; + bool isParticipant(const LLUUID& speaker_id) const; ////////////////////////// /// @name text chat @@ -439,21 +472,25 @@ public: BOOL isSessionTextIMPossible(const LLUUID& id); BOOL isSessionCallBackPossible(const LLUUID& id); //BOOL sendTextMessage(const LLUUID& participant_id, const std::string& message) const {return true;} ; - void endUserIMSession(const LLUUID &uuid); //@} + void setSpatialVoiceModule(const std::string& voice_server_type); + void setNonSpatialVoiceModule(const std::string &voice_server_type); void userAuthorized(const std::string& user_id, - const LLUUID &agentID); + const LLUUID &agentID); + + void onRegionChanged(); - void addObserver(LLVoiceClientStatusObserver* observer); - void removeObserver(LLVoiceClientStatusObserver* observer); - void addObserver(LLFriendObserver* observer); - void removeObserver(LLFriendObserver* observer); - void addObserver(LLVoiceClientParticipantObserver* observer); - void removeObserver(LLVoiceClientParticipantObserver* observer); + static void addObserver(LLVoiceClientStatusObserver* observer); + static void removeObserver(LLVoiceClientStatusObserver* observer); + static void addObserver(LLFriendObserver* observer); + static void removeObserver(LLFriendObserver* observer); + static void addObserver(LLVoiceClientParticipantObserver* observer); + static void removeObserver(LLVoiceClientParticipantObserver* observer); - std::string sipURIFromID(const LLUUID &id); + std::string sipURIFromID(const LLUUID &id) const; + LLSD getP2PChannelInfoTemplate(const LLUUID& id) const; ////////////////////////// /// @name Voice effects @@ -464,16 +501,29 @@ public: // Returns NULL if voice effects are not supported, or not enabled. LLVoiceEffectInterface* getVoiceEffectInterface() const; //@} -private: + + void handleSimulatorFeaturesReceived(const LLSD &simulatorFeatures); + + private: + void init(LLPumpIO *pump); protected: - LLVoiceModuleInterface* mVoiceModule; + + static bool onVoiceEffectsNotSupported(const LLSD ¬ification, const LLSD &response); + + LLVoiceModuleInterface* mSpatialVoiceModule; + LLVoiceModuleInterface* mNonSpatialVoiceModule; + LLSD mSpatialCredentials; // used to store spatial credentials for vivox + // so they're available when the region voice + // server is retrieved. LLPumpIO *m_servicePump; + boost::signals2::connection mSimulatorFeaturesReceivedSlot; LLCachedControl<bool> mVoiceEffectEnabled; LLCachedControl<std::string> mVoiceEffectDefault; + bool mVoiceEffectSupportNotified; bool mPTTDirty; bool mPTT; diff --git a/indra/newview/llvoicevivox.cpp b/indra/newview/llvoicevivox.cpp index cf5ee3af54..06f5a5c948 100644 --- a/indra/newview/llvoicevivox.cpp +++ b/indra/newview/llvoicevivox.cpp @@ -80,12 +80,14 @@ extern LLMenuBarGL* gMenuBarView; extern void handle_voice_morphing_subscribe(); +const std::string VIVOX_VOICE_SERVER_TYPE = "vivox"; + namespace { const F32 VOLUME_SCALE_VIVOX = 0.01f; const F32 SPEAKING_TIMEOUT = 1.f; - static const std::string VOICE_SERVER_TYPE = "Vivox"; + static const std::string VISIBLE_VOICE_SERVER_TYPE = "Vivox"; // Don't retry connecting to the daemon more frequently than this: const F32 DAEMON_CONNECT_THROTTLE_SECONDS = 1.0f; @@ -293,9 +295,7 @@ LLVivoxVoiceClient::LLVivoxVoiceClient() : mTuningSpeakerVolumeDirty(true), mDevicesListUpdated(false), - mAreaVoiceDisabled(false), mAudioSession(), // TBD - should be NULL - mAudioSessionChanged(false), mNextAudioSession(), mCurrentParcelLocalID(0), @@ -325,10 +325,9 @@ LLVivoxVoiceClient::LLVivoxVoiceClient() : mMicVolumeDirty(true), mVoiceEnabled(false), + mProcessChannels(false), mWriteInProgress(false), - mLipSyncEnabled(false), - mVoiceFontsReceived(false), mVoiceFontsNew(false), mVoiceFontListDirty(false), @@ -358,7 +357,12 @@ LLVivoxVoiceClient::LLVivoxVoiceClient() : mSpeakerVolume = scale_speaker_volume(0); mVoiceVersion.serverVersion = ""; - mVoiceVersion.serverType = VOICE_SERVER_TYPE; + mVoiceVersion.voiceServerType = VISIBLE_VOICE_SERVER_TYPE; + mVoiceVersion.internalVoiceServerType = VIVOX_VOICE_SERVER_TYPE; + mVoiceVersion.majorVersion = 1; + mVoiceVersion.minorVersion = 0; + mVoiceVersion.mBuildVersion = ""; + mVoiceVersion.serverVersion = ""; // gMuteListp isn't set up at this point, so we defer this until later. // gMuteListp->addObserver(&mutelist_listener); @@ -455,7 +459,7 @@ const LLVoiceVersionInfo& LLVivoxVoiceClient::getVersion() void LLVivoxVoiceClient::updateSettings() { - setVoiceEnabled(voiceEnabled()); + setVoiceEnabled(LLVoiceClient::getInstance()->voiceEnabled()); setEarLocation(gSavedSettings.getS32("VoiceEarLocation")); std::string inputDevice = gSavedSettings.getString("VoiceInputAudioDevice"); @@ -464,7 +468,6 @@ void LLVivoxVoiceClient::updateSettings() setRenderDevice(outputDevice); F32 mic_level = gSavedSettings.getF32("AudioLevelMic"); setMicGain(mic_level); - setLipSyncEnabled(gSavedSettings.getBOOL("LipSyncEnabled")); } ///////////////////////////// @@ -914,7 +917,7 @@ bool LLVivoxVoiceClient::callbackEndDaemon(const LLSD& data) bool LLVivoxVoiceClient::startAndLaunchDaemon() { //--------------------------------------------------------------------- - if (!voiceEnabled()) + if (!LLVoiceClient::getInstance()->voiceEnabled()) { // Voice is locked out, we must not launch the vivox daemon. LL_WARNS("Voice") << "voice disabled; not starting daemon" << LL_ENDL; @@ -1179,7 +1182,9 @@ bool LLVivoxVoiceClient::provisionVoiceAccount() do { LLVoiceVivoxStats::getInstance()->provisionAttemptStart(); - result = httpAdapter->postAndSuspend(httpRequest, url, LLSD(), httpOpts); + LLSD body; + body["voice_server_type"] = "vivox"; + result = httpAdapter->postAndSuspend(httpRequest, url, body, httpOpts); if (sShuttingDown) { @@ -1223,7 +1228,7 @@ bool LLVivoxVoiceClient::provisionVoiceAccount() LL_WARNS("Voice") << "Could not access voice provision cap after " << retryCount << " attempts." << LL_ENDL; return false; } - + LL_DEBUGS("Voice") << "Voice Provision Result." << result << LL_ENDL; std::string voiceSipUriHostname; std::string voiceAccountServerUri; std::string voiceUserName = result["username"].asString(); @@ -1619,48 +1624,7 @@ bool LLVivoxVoiceClient::requestParcelVoiceInfo() return false; } - std::string uri; - std::string credentials; - - if (result.has("voice_credentials")) - { - LLSD voice_credentials = result["voice_credentials"]; - if (voice_credentials.has("channel_uri")) - { - LL_DEBUGS("Voice") << "got voice channel uri" << LL_ENDL; - uri = voice_credentials["channel_uri"].asString(); - } - else - { - LL_WARNS("Voice") << "No voice channel uri" << LL_ENDL; - } - - if (voice_credentials.has("channel_credentials")) - { - LL_DEBUGS("Voice") << "got voice channel credentials" << LL_ENDL; - credentials = - voice_credentials["channel_credentials"].asString(); - } - else - { - LLVoiceChannel* channel = LLVoiceChannel::getCurrentVoiceChannel(); - if (channel != NULL) - { - if (channel->getSessionName().empty() && channel->getSessionID().isNull()) - { - if (LLViewerParcelMgr::getInstance()->allowAgentVoice()) - { - LL_WARNS("Voice") << "No channel credentials for default channel" << LL_ENDL; - } - } - else - { - LL_WARNS("Voice") << "No voice channel credentials" << LL_ENDL; - } - } - } - } - else + if (!result.has("voice_credentials")) { if (LLViewerParcelMgr::getInstance()->allowAgentVoice()) { @@ -1674,7 +1638,7 @@ bool LLVivoxVoiceClient::requestParcelVoiceInfo() // set the spatial channel. If no voice credentials or uri are // available, then we simply drop out of voice spatially. - return !setSpatialChannel(uri, credentials); + return setSpatialChannel(result["voice_credentials"]); } bool LLVivoxVoiceClient::addAndJoinSession(const sessionStatePtr_t &nextSession) @@ -1686,7 +1650,6 @@ bool LLVivoxVoiceClient::addAndJoinSession(const sessionStatePtr_t &nextSession) LL_INFOS("Voice") << "Adding or joining voice session " << nextSession->mHandle << LL_ENDL; mAudioSession = nextSession; - mAudioSessionChanged = true; if (!mAudioSession || !mAudioSession->mReconnect) { mNextAudioSession.reset(); @@ -1813,7 +1776,7 @@ bool LLVivoxVoiceClient::addAndJoinSession(const sessionStatePtr_t &nextSession) } else if ((message == "failed") || (message == "removed") || (message == "timeout")) { // we will get a removed message if a voice call is declined. - + LL_INFOS("Voice") << "Result:" << result << LL_ENDL; if (message == "failed") { int reason = result["reason"].asInteger(); @@ -1943,9 +1906,8 @@ bool LLVivoxVoiceClient::terminateAudioSession(bool wait) sessionStatePtr_t oldSession = mAudioSession; + notifyStatusObservers(LLVoiceClientStatusObserver::STATUS_LEFT_CHANNEL); // needs mAudioSession for uri mAudioSession.reset(); - // We just notified status observers about this change. Don't do it again. - mAudioSessionChanged = false; // The old session may now need to be deleted. reapSession(oldSession); @@ -1953,9 +1915,9 @@ bool LLVivoxVoiceClient::terminateAudioSession(bool wait) else { LL_WARNS("Voice") << "terminateAudioSession(" << wait << ") with NULL mAudioSession" << LL_ENDL; + notifyStatusObservers(LLVoiceClientStatusObserver::STATUS_LEFT_CHANNEL); } - notifyStatusObservers(LLVoiceClientStatusObserver::STATUS_LEFT_CHANNEL); // Always reset the terminate request flag when we get here. // Some slower PCs have a race condition where they can switch to an incoming P2P call faster than the state machine leaves @@ -2037,7 +1999,6 @@ bool LLVivoxVoiceClient::waitForChannel() break; case VOICE_CHANNEL_STATE_START_CHANNEL_PROCESSING: - mIsProcessingChannels = true; llcoro::suspend(); state = VOICE_CHANNEL_STATE_PROCESS_CHANNEL; break; @@ -2051,7 +2012,7 @@ bool LLVivoxVoiceClient::waitForChannel() { recordingAndPlaybackMode(); } - else if (checkParcelChanged() || (mNextAudioSession == NULL)) + else if (mProcessChannels && (mNextAudioSession == NULL) && checkParcelChanged()) { // the parcel is changed, or we have no pending audio sessions, // so try to request the parcel voice info @@ -2066,15 +2027,23 @@ bool LLVivoxVoiceClient::waitForChannel() } else if (mNextAudioSession) { + if (!mNextAudioSession->mIsP2P && !mProcessChannels) + { + llcoro::suspend(); + break; + } sessionStatePtr_t joinSession = mNextAudioSession; mNextAudioSession.reset(); + mIsProcessingChannels = true; if (!runSession(joinSession)) //suspends { + mIsProcessingChannels = false; LL_DEBUGS("Voice") << "runSession returned false; leaving inner loop" << LL_ENDL; break; } else { + mIsProcessingChannels = false; LL_DEBUGS("Voice") << "runSession returned true to inner loop" << " RelogRequested=" << mRelogRequested @@ -2160,13 +2129,13 @@ bool LLVivoxVoiceClient::runSession(const sessionStatePtr_t &session) { LL_INFOS("Voice") << "running new voice session " << session->mHandle << LL_ENDL; - bool joined_session = addAndJoinSession(session); - - if (sShuttingDown) + if (sShuttingDown || !mProcessChannels) { return false; } + bool joined_session = addAndJoinSession(session); + if (!joined_session) { notifyStatusObservers(LLVoiceClientStatusObserver::ERROR_UNKNOWN); @@ -2195,7 +2164,8 @@ bool LLVivoxVoiceClient::runSession(const sessionStatePtr_t &session) && mVoiceEnabled && isGatewayRunning() && !mSessionTerminateRequested - && !mTuningMode) + && !mTuningMode + && mProcessChannels) { sendCaptureAndRenderDevices(); // suspends @@ -2447,95 +2417,11 @@ int LLVivoxVoiceClient::voicePlaybackBuffer() bool LLVivoxVoiceClient::performMicTuning() { LL_INFOS("Voice") << "Entering voice tuning mode." << LL_ENDL; - mIsInTuningMode = true; - llcoro::suspend(); while (mTuningMode && !sShuttingDown) { - - if (mCaptureDeviceDirty || mRenderDeviceDirty) - { - // These can't be changed while in tuning mode. Set them before starting. - std::ostringstream stream; - - buildSetCaptureDevice(stream); - buildSetRenderDevice(stream); - - if (!stream.str().empty()) - { - writeString(stream.str()); - } - - llcoro::suspendUntilTimeout(UPDATE_THROTTLE_SECONDS); - } - - // loop mic back to render device. - //setMuteMic(0); // make sure the mic is not muted - std::ostringstream stream; - - stream << "<Request requestId=\"" << mCommandCookie++ << "\" action=\"Connector.MuteLocalMic.1\">" - << "<ConnectorHandle>" << LLVivoxSecurity::getInstance()->connectorHandle() << "</ConnectorHandle>" - << "<Value>false</Value>" - << "</Request>\n\n\n"; - - // Dirty the mute mic state so that it will get reset when we finishing previewing - mMuteMicDirty = true; - mTuningSpeakerVolumeDirty = true; - - writeString(stream.str()); - tuningCaptureStartSendMessage(1); // 1-loop, zero, don't loop - - //--------------------------------------------------------------------- - if (!sShuttingDown) - { - llcoro::suspend(); - } - - while (mTuningMode && !mCaptureDeviceDirty && !mRenderDeviceDirty && !sShuttingDown) - { - // process mic/speaker volume changes - if (mTuningMicVolumeDirty || mTuningSpeakerVolumeDirty) - { - std::ostringstream stream; - - if (mTuningMicVolumeDirty) - { - LL_INFOS("Voice") << "setting tuning mic level to " << mTuningMicVolume << LL_ENDL; - stream - << "<Request requestId=\"" << mCommandCookie++ << "\" action=\"Aux.SetMicLevel.1\">" - << "<Level>" << mTuningMicVolume << "</Level>" - << "</Request>\n\n\n"; - } - - if (mTuningSpeakerVolumeDirty) - { - LL_INFOS("Voice") << "setting tuning speaker level to " << mTuningSpeakerVolume << LL_ENDL; - stream - << "<Request requestId=\"" << mCommandCookie++ << "\" action=\"Aux.SetSpeakerLevel.1\">" - << "<Level>" << mTuningSpeakerVolume << "</Level>" - << "</Request>\n\n\n"; - } - - mTuningMicVolumeDirty = false; - mTuningSpeakerVolumeDirty = false; - - if (!stream.str().empty()) - { - writeString(stream.str()); - } - } - llcoro::suspend(); - } - - //--------------------------------------------------------------------- - - // transition out of mic tuning - tuningCaptureStopSendMessage(); - if ((mCaptureDeviceDirty || mRenderDeviceDirty) && !sShuttingDown) - { - llcoro::suspendUntilTimeout(UPDATE_THROTTLE_SECONDS); - } + llcoro::suspendUntilTimeout(UPDATE_THROTTLE_SECONDS); } mIsInTuningMode = false; @@ -2658,9 +2544,9 @@ void LLVivoxVoiceClient::sessionCreateSendMessage(const sessionStatePtr_t &sessi stream << "<ConnectAudio>" << (startAudio?"true":"false") << "</ConnectAudio>" << "<ConnectText>" << (startText?"true":"false") << "</ConnectText>" - << "<VoiceFontID>" << font_index << "</VoiceFontID>" << "<Name>" << mChannelName << "</Name>" << "</Request>\n\n\n"; + LL_WARNS("Voice") << "Session.Create: " << stream.str() << LL_ENDL; writeString(stream.str()); } @@ -2696,7 +2582,6 @@ void LLVivoxVoiceClient::sessionGroupAddSessionSendMessage(const sessionStatePtr << "<Name>" << mChannelName << "</Name>" << "<ConnectAudio>" << (startAudio?"true":"false") << "</ConnectAudio>" << "<ConnectText>" << (startText?"true":"false") << "</ConnectText>" - << "<VoiceFontID>" << font_index << "</VoiceFontID>" << "<Password>" << password << "</Password>" << "<PasswordHashAlgorithm>SHA1UserName</PasswordHashAlgorithm>" << "</Request>\n\n\n" @@ -2720,7 +2605,6 @@ void LLVivoxVoiceClient::sessionMediaConnectSendMessage(const sessionStatePtr_t << "<Request requestId=\"" << session->mHandle << "\" action=\"Session.MediaConnect.1\">" << "<SessionGroupHandle>" << session->mGroupHandle << "</SessionGroupHandle>" << "<SessionHandle>" << session->mHandle << "</SessionHandle>" - << "<VoiceFontID>" << font_index << "</VoiceFontID>" << "<Media>Audio</Media>" << "</Request>\n\n\n"; @@ -2960,6 +2844,9 @@ void LLVivoxVoiceClient::tuningStart() void LLVivoxVoiceClient::tuningStop() { mTuningMode = false; + // force a renegotiation. + mCurrentParcelLocalID = 0; + mCurrentRegionName = ""; } bool LLVivoxVoiceClient::inTuningMode() @@ -3951,7 +3838,6 @@ void LLVivoxVoiceClient::joinedAudioSession(const sessionStatePtr_t &session) sessionStatePtr_t oldSession = mAudioSession; mAudioSession = session; - mAudioSessionChanged = true; // The old session may now need to be deleted. reapSession(oldSession); @@ -4840,7 +4726,7 @@ void LLVivoxVoiceClient::sessionState::VerifySessions() void LLVivoxVoiceClient::getParticipantList(std::set<LLUUID> &participants) { - if(mAudioSession) + if(mProcessChannels && mAudioSession) { for(participantUUIDMap::iterator iter = mAudioSession->mParticipantsByUUID.begin(); iter != mAudioSession->mParticipantsByUUID.end(); @@ -4853,11 +4739,11 @@ void LLVivoxVoiceClient::getParticipantList(std::set<LLUUID> &participants) bool LLVivoxVoiceClient::isParticipant(const LLUUID &speaker_id) { - if(mAudioSession) + if(mProcessChannels && mAudioSession) { - return (mAudioSession->mParticipantsByUUID.find(speaker_id) != mAudioSession->mParticipantsByUUID.end()); + return (mAudioSession->mParticipantsByUUID.find(speaker_id) != mAudioSession->mParticipantsByUUID.end()); } - return false; + return false; } @@ -4961,8 +4847,10 @@ bool LLVivoxVoiceClient::switchChannel( // If a terminate has been requested, we need to compare against where the URI we're already headed to. if(mNextAudioSession) { - if(mNextAudioSession->mSIPURI != uri) + if (mNextAudioSession->mSIPURI != uri) + { needsSwitch = true; + } } else { @@ -5052,22 +4940,23 @@ void LLVivoxVoiceClient::joinSession(const sessionStatePtr_t &session) } } -void LLVivoxVoiceClient::setNonSpatialChannel( - const std::string &uri, - const std::string &credentials) +void LLVivoxVoiceClient::setNonSpatialChannel(const LLSD& channelInfo, bool notify_on_first_join, bool hangup_on_last_leave) { - switchChannel(uri, false, false, false, credentials); + switchChannel(channelInfo["channel_uri"].asString(), false, false, false, channelInfo["channel_credentials"].asString()); } -bool LLVivoxVoiceClient::setSpatialChannel( - const std::string &uri, - const std::string &credentials) +bool LLVivoxVoiceClient::setSpatialChannel(const LLSD& channelInfo) { - mSpatialSessionURI = uri; - mSpatialSessionCredentials = credentials; - mAreaVoiceDisabled = mSpatialSessionURI.empty(); + mSpatialSessionURI = channelInfo["channel_uri"].asString(); + mSpatialSessionCredentials = channelInfo["channel_credentials"].asString(); + if (!mProcessChannels) + { + // we're not even processing channels (another provider is) so + // save the credentials aside and exit + return false; + } - LL_DEBUGS("Voice") << "got spatial channel uri: \"" << uri << "\"" << LL_ENDL; + LL_DEBUGS("Voice") << "got spatial channel uri: \"" << mSpatialSessionURI << "\"" << LL_ENDL; if((mIsInChannel && mAudioSession && !(mAudioSession->mIsSpatial)) || (mNextAudioSession && !(mNextAudioSession->mIsSpatial))) { @@ -5084,84 +4973,30 @@ bool LLVivoxVoiceClient::setSpatialChannel( void LLVivoxVoiceClient::callUser(const LLUUID &uuid) { std::string userURI = sipURIFromID(uuid); + mProcessChannels = true; switchChannel(userURI, false, true, true); } -#if 0 -// Vivox text IMs are not in use. -LLVivoxVoiceClient::sessionStatePtr_t LLVivoxVoiceClient::startUserIMSession(const LLUUID &uuid) -{ - // Figure out if a session with the user already exists - sessionStatePtr_t session(findSession(uuid)); - if(!session) - { - // No session with user, need to start one. - std::string uri = sipURIFromID(uuid); - session = addSession(uri); - - llassert(session); - if (!session) - return session; - - session->mIsSpatial = false; - session->mReconnect = false; - session->mIsP2P = true; - session->mCallerID = uuid; - } - - if(session->mHandle.empty()) - { - // Session isn't active -- start it up. - sessionCreateSendMessage(session, false, false); - } - else - { - // Session is already active -- start up text. - sessionTextConnectSendMessage(session); - } +void LLVivoxVoiceClient::hangup() { leaveChannel(); } - return session; -} -#endif -void LLVivoxVoiceClient::endUserIMSession(const LLUUID &uuid) +LLVoiceP2PIncomingCallInterfacePtr LLVivoxVoiceClient::getIncomingCallInterface(const LLSD &voice_call_info) { -#if 0 - // Vivox text IMs are not in use. - - // Figure out if a session with the user exists - sessionStatePtr_t session(findSession(uuid)); - if(session) - { - // found the session - if(!session->mHandle.empty()) - { - // sessionTextDisconnectSendMessage(session); // a SLim leftover, not used any more. - } - } - else - { - LL_DEBUGS("Voice") << "Session not found for participant ID " << uuid << LL_ENDL; - } -#endif + return std::make_shared<LLVivoxVoiceP2PIncomingCall>(voice_call_info); } -bool LLVivoxVoiceClient::isValidChannel(std::string &sessionHandle) -{ - return(findSession(sessionHandle) != NULL); -} -bool LLVivoxVoiceClient::answerInvite(std::string &sessionHandle) +bool LLVivoxVoiceClient::answerInvite(const std::string &sessionHandle) { // this is only ever used to answer incoming p2p call invites. sessionStatePtr_t session(findSession(sessionHandle)); - if(session) + if (session) { session->mIsSpatial = false; session->mReconnect = false; - session->mIsP2P = true; - + session->mIsP2P = true; + mProcessChannels = true; joinSession(session); return true; } @@ -5169,6 +5004,15 @@ bool LLVivoxVoiceClient::answerInvite(std::string &sessionHandle) return false; } +void LLVivoxVoiceClient::declineInvite(const std::string &sessionHandle) +{ + sessionStatePtr_t session(findSession(sessionHandle)); + if (session) + { + sessionMediaDisconnectSendMessage(session); + } +} + bool LLVivoxVoiceClient::isVoiceWorking() const { @@ -5238,16 +5082,6 @@ BOOL LLVivoxVoiceClient::isSessionTextIMPossible(const LLUUID &session_id) return result; } - -void LLVivoxVoiceClient::declineInvite(std::string &sessionHandle) -{ - sessionStatePtr_t session(findSession(sessionHandle)); - if(session) - { - sessionMediaDisconnectSendMessage(session); - } -} - void LLVivoxVoiceClient::leaveNonSpatialChannel() { LL_DEBUGS("Voice") << "Request to leave spacial channel." << LL_ENDL; @@ -5264,16 +5098,43 @@ void LLVivoxVoiceClient::leaveNonSpatialChannel() sessionTerminate(); } -std::string LLVivoxVoiceClient::getCurrentChannel() +void LLVivoxVoiceClient::processChannels(bool process) { - std::string result; + mCurrentParcelLocalID = -1; + mCurrentRegionName.clear(); + mProcessChannels = process; +} - if (mIsInChannel && !mSessionTerminateRequested) +bool LLVivoxVoiceClient::isCurrentChannel(const LLSD &channelInfo) +{ + if (!mProcessChannels + || (channelInfo.has("voice_server_type") && channelInfo["voice_server_type"].asString() != VIVOX_VOICE_SERVER_TYPE) + || mSessionTerminateRequested) + { + return false; + } + // favor the next audio session, as that's the one we're bringing up. + sessionStatePtr_t session = mNextAudioSession; + if (!session) { - result = getAudioSessionURI(); + session = mAudioSession; } + if (session) + { + if (!channelInfo["session_handle"].asString().empty()) + { + return session->mHandle == channelInfo["session_handle"].asString(); + } + return channelInfo["channel_uri"].asString() == session->mSIPURI; + } + return false; +} - return result; +bool LLVivoxVoiceClient::compareChannels(const LLSD& channelInfo1, const LLSD& channelInfo2) +{ + return (!channelInfo1.has("voice_server_type") || (channelInfo1["voice_server_type"] == VIVOX_VOICE_SERVER_TYPE)) && + (!channelInfo2.has("voice_server_type") || (channelInfo2["voice_server_type"] == VIVOX_VOICE_SERVER_TYPE)) && + (channelInfo1["channel_uri"] == channelInfo2["channel_uri"]); } bool LLVivoxVoiceClient::inProximalChannel() @@ -5288,7 +5149,7 @@ bool LLVivoxVoiceClient::inProximalChannel() return result; } -std::string LLVivoxVoiceClient::sipURIFromID(const LLUUID &id) +std::string LLVivoxVoiceClient::sipURIFromID(const LLUUID &id) const { std::string result; result = "sip:"; @@ -5299,6 +5160,14 @@ std::string LLVivoxVoiceClient::sipURIFromID(const LLUUID &id) return result; } +LLSD LLVivoxVoiceClient::getP2PChannelInfoTemplate(const LLUUID& id) const +{ + LLSD result; + result["channel_uri"] = sipURIFromID(id); + result["voice_server_type"] = VIVOX_VOICE_SERVER_TYPE; + return result; +} + std::string LLVivoxVoiceClient::sipURIFromAvatar(LLVOAvatar *avatar) { std::string result; @@ -5313,17 +5182,7 @@ std::string LLVivoxVoiceClient::sipURIFromAvatar(LLVOAvatar *avatar) return result; } -std::string LLVivoxVoiceClient::nameFromAvatar(LLVOAvatar *avatar) -{ - std::string result; - if(avatar) - { - result = nameFromID(avatar->getID()); - } - return result; -} - -std::string LLVivoxVoiceClient::nameFromID(const LLUUID &uuid) +std::string LLVivoxVoiceClient::nameFromID(const LLUUID &uuid) const { std::string result; @@ -5397,11 +5256,6 @@ bool LLVivoxVoiceClient::IDFromName(const std::string inName, LLUUID &uuid) return result; } -std::string LLVivoxVoiceClient::displayNameFromAvatar(LLVOAvatar *avatar) -{ - return avatar->getFullname(); -} - std::string LLVivoxVoiceClient::sipURIFromName(std::string &name) { std::string result; @@ -5442,12 +5296,15 @@ bool LLVivoxVoiceClient::inSpatialChannel(void) return result; } -std::string LLVivoxVoiceClient::getAudioSessionURI() + +LLSD LLVivoxVoiceClient::getAudioSessionChannelInfo() { - std::string result; + LLSD result; - if(mAudioSession) - result = mAudioSession->mSIPURI; + if (mAudioSession) + { + result = mAudioSession->getVoiceChannelInfo(); + } return result; } @@ -5589,8 +5446,8 @@ void LLVivoxVoiceClient::leaveChannel(void) { LL_DEBUGS("Voice") << "leaving channel for teleport/logout" << LL_ENDL; mChannelName.clear(); - sessionTerminate(); } + sessionTerminate(); } void LLVivoxVoiceClient::setMuteMic(bool muted) @@ -5639,6 +5496,8 @@ void LLVivoxVoiceClient::setVoiceEnabled(bool enabled) LLVoiceChannel::getCurrentVoiceChannel()->deactivate(); gAgent.setVoiceConnected(false); status = LLVoiceClientStatusObserver::STATUS_VOICE_DISABLED; + mCurrentParcelLocalID = -1; + mCurrentRegionName.clear(); } notifyStatusObservers(status); @@ -5649,31 +5508,6 @@ void LLVivoxVoiceClient::setVoiceEnabled(bool enabled) } } -bool LLVivoxVoiceClient::voiceEnabled() -{ - return gSavedSettings.getBOOL("EnableVoiceChat") && - !gSavedSettings.getBOOL("CmdLineDisableVoice") && - !gNonInteractive; -} - -void LLVivoxVoiceClient::setLipSyncEnabled(BOOL enabled) -{ - mLipSyncEnabled = enabled; -} - -BOOL LLVivoxVoiceClient::lipSyncEnabled() -{ - - if ( mVoiceEnabled ) - { - return mLipSyncEnabled; - } - else - { - return FALSE; - } -} - void LLVivoxVoiceClient::setEarLocation(S32 loc) { @@ -5716,27 +5550,17 @@ void LLVivoxVoiceClient::setMicGain(F32 volume) ///////////////////////////// // Accessors for data related to nearby speakers -BOOL LLVivoxVoiceClient::getVoiceEnabled(const LLUUID& id) -{ - BOOL result = FALSE; - participantStatePtr_t participant(findParticipantByID(id)); - if(participant) - { - // I'm not sure what the semantics of this should be. - // For now, if we have any data about the user that came through the chat channel, assume they're voice-enabled. - result = TRUE; - } - - return result; -} std::string LLVivoxVoiceClient::getDisplayName(const LLUUID& id) { std::string result; - participantStatePtr_t participant(findParticipantByID(id)); - if(participant) + if (mProcessChannels) { - result = participant->mDisplayName; + participantStatePtr_t participant(findParticipantByID(id)); + if (participant) + { + result = participant->mDisplayName; + } } return result; @@ -5747,15 +5571,17 @@ std::string LLVivoxVoiceClient::getDisplayName(const LLUUID& id) BOOL LLVivoxVoiceClient::getIsSpeaking(const LLUUID& id) { BOOL result = FALSE; - - participantStatePtr_t participant(findParticipantByID(id)); - if(participant) + if (mProcessChannels) { - if (participant->mSpeakingTimeout.getElapsedTimeF32() > SPEAKING_TIMEOUT) + participantStatePtr_t participant(findParticipantByID(id)); + if (participant) { - participant->mIsSpeaking = FALSE; + if (participant->mSpeakingTimeout.getElapsedTimeF32() > SPEAKING_TIMEOUT) + { + participant->mIsSpeaking = FALSE; + } + result = participant->mIsSpeaking; } - result = participant->mIsSpeaking; } return result; @@ -5764,7 +5590,10 @@ BOOL LLVivoxVoiceClient::getIsSpeaking(const LLUUID& id) BOOL LLVivoxVoiceClient::getIsModeratorMuted(const LLUUID& id) { BOOL result = FALSE; - + if (!mProcessChannels) + { + return FALSE; + } participantStatePtr_t participant(findParticipantByID(id)); if(participant) { @@ -5803,19 +5632,6 @@ BOOL LLVivoxVoiceClient::getUsingPTT(const LLUUID& id) return result; } -BOOL LLVivoxVoiceClient::getOnMuteList(const LLUUID& id) -{ - BOOL result = FALSE; - - participantStatePtr_t participant(findParticipantByID(id)); - if(participant) - { - result = participant->mOnMuteList; - } - - return result; -} - // External accessors. F32 LLVivoxVoiceClient::getUserVolume(const LLUUID& id) { @@ -5873,11 +5689,6 @@ std::string LLVivoxVoiceClient::getGroupID(const LLUUID& id) return result; } -BOOL LLVivoxVoiceClient::getAreaVoiceDisabled() -{ - return mAreaVoiceDisabled; -} - void LLVivoxVoiceClient::recordingLoopStart(int seconds, int deltaFramesPerControlFrame) { // LL_DEBUGS("Voice") << "sending SessionGroup.ControlRecording (Start)" << LL_ENDL; @@ -6004,6 +5815,18 @@ LLVivoxVoiceClient::sessionState::sessionState() : { } +LLSD LLVivoxVoiceClient::sessionState::getVoiceChannelInfo() +{ + LLSD result; + + result["voice_server_type"] = VIVOX_VOICE_SERVER_TYPE; + result["channel_credentials"] = mHash; + result["channel_uri"] = mSIPURI; + result["session_handle"] = mHandle; + + return result; +} + /*static*/ LLVivoxVoiceClient::sessionState::ptr_t LLVivoxVoiceClient::sessionState::createSession() { @@ -6339,7 +6162,6 @@ void LLVivoxVoiceClient::deleteSession(const sessionStatePtr_t &session) if(mAudioSession == session) { mAudioSession.reset(); - mAudioSessionChanged = true; } // ditto for the next audio session @@ -6448,18 +6270,25 @@ void LLVivoxVoiceClient::notifyStatusObservers(LLVoiceClientStatusObserver::ESta } } + LLSD channel_info = getAudioSessionChannelInfo(); LL_DEBUGS("Voice") << " " << LLVoiceClientStatusObserver::status2string(status) - << ", session URI " << getAudioSessionURI() + << ", session channelInfo " << channel_info << ", proximal is " << inSpatialChannel() << LL_ENDL; + if (!mProcessChannels) + { + // we're not processing...another voice module is. + // so nobody wants to hear from us. + return; + } for (status_observer_set_t::iterator it = mStatusObservers.begin(); it != mStatusObservers.end(); ) { LLVoiceClientStatusObserver* observer = *it; - observer->onChange(status, getAudioSessionURI(), inSpatialChannel()); + observer->onChange(status, channel_info, inSpatialChannel()); // In case onError() deleted an entry. it = mStatusObservers.upper_bound(observer); } @@ -6471,6 +6300,7 @@ void LLVivoxVoiceClient::notifyStatusObservers(LLVoiceClientStatusObserver::ESta { bool voice_status = LLVoiceClient::getInstance()->voiceEnabled() && LLVoiceClient::getInstance()->isVoiceWorking(); + LL_WARNS("Voice") << "Setting voice connected " << (voice_status ? "True" : "False") << LL_ENDL; gAgent.setVoiceConnected(voice_status); if (voice_status) @@ -6545,7 +6375,6 @@ void LLVivoxVoiceClient::predAvatarNameResolution(const LLVivoxVoiceClient::sess if (session->mVoiceInvitePending) { session->mVoiceInvitePending = false; - LLIMMgr::getInstance()->inviteToSession( session->mIMSessionID, session->mName, @@ -6553,10 +6382,8 @@ void LLVivoxVoiceClient::predAvatarNameResolution(const LLVivoxVoiceClient::sess session->mName, IM_SESSION_P2P_INVITE, LLIMMgr::INVITATION_TYPE_VOICE, - session->mHandle, - session->mSIPURI); + session->getVoiceChannelInfo()); } - } } @@ -8110,3 +7937,7 @@ LLVivoxSecurity::LLVivoxSecurity() LLVivoxSecurity::~LLVivoxSecurity() { } + +bool LLVivoxVoiceP2PIncomingCall::answerInvite() { return LLVivoxVoiceClient::getInstance()->answerInvite(mCallInfo["session_handle"]); } + +void LLVivoxVoiceP2PIncomingCall::declineInvite() { LLVivoxVoiceClient::getInstance()->declineInvite(mCallInfo["session_handle"]); } diff --git a/indra/newview/llvoicevivox.h b/indra/newview/llvoicevivox.h index 36d3e9d0a1..eecf0d0776 100644 --- a/indra/newview/llvoicevivox.h +++ b/indra/newview/llvoicevivox.h @@ -39,6 +39,8 @@ class LLVivoxProtocolParser; #include "llcallingcard.h" // for LLFriendObserver #include "lleventcoro.h" #include "llcoros.h" +#include "llparcel.h" +#include "llmutelist.h" #include <queue> #ifdef LL_USESYSTEMLIBS @@ -51,12 +53,27 @@ class LLVivoxProtocolParser; class LLAvatarName; class LLVivoxVoiceClientMuteListObserver; +extern const std::string VIVOX_VOICE_SERVER_TYPE; + +class LLVivoxVoiceP2PIncomingCall : public LLVoiceP2PIncomingCallInterface +{ + public: + LLVivoxVoiceP2PIncomingCall(const LLSD& call_info) : mCallInfo(call_info) {} + ~LLVivoxVoiceP2PIncomingCall() override {} + + bool answerInvite() override; + void declineInvite() override; + + protected: + LLSD mCallInfo; +}; class LLVivoxVoiceClient : public LLSingleton<LLVivoxVoiceClient>, virtual public LLVoiceModuleInterface, - virtual public LLVoiceEffectInterface + virtual public LLVoiceEffectInterface, + virtual public LLVoiceP2POutgoingCallInterface { - LLSINGLETON(LLVivoxVoiceClient); + LLSINGLETON_C11(LLVivoxVoiceClient); LOG_CLASS(LLVivoxVoiceClient); virtual ~LLVivoxVoiceClient(); @@ -64,26 +81,29 @@ public: /// @name LLVoiceModuleInterface virtual implementations /// @see LLVoiceModuleInterface //@{ - virtual void init(LLPumpIO *pump) override; // Call this once at application startup (creates connector) - virtual void terminate() override; // Call this to clean up during shutdown + void init(LLPumpIO *pump) override; // Call this once at application startup (creates connector) + void terminate() override; // Call this to clean up during shutdown - virtual const LLVoiceVersionInfo& getVersion() override; + const LLVoiceVersionInfo& getVersion() override; - virtual void updateSettings() override; // call after loading settings and whenever they change + void updateSettings() override; // call after loading settings and whenever they change // Returns true if vivox has successfully logged in and is not in error state - virtual bool isVoiceWorking() const override; + bool isVoiceWorking() const override; + + void setHidden(bool hidden) override; // virtual ///////////////////// /// @name Tuning //@{ - virtual void tuningStart() override; - virtual void tuningStop() override; - virtual bool inTuningMode() override; + void tuningStart() override; + void tuningStop() override; + bool inTuningMode() override; + + void tuningSetMicVolume(float volume) override; + void tuningSetSpeakerVolume(float volume) override; + float tuningGetEnergy(void) override; - virtual void tuningSetMicVolume(float volume) override; - virtual void tuningSetSpeakerVolume(float volume) override; - virtual float tuningGetEnergy(void) override; //@} ///////////////////// @@ -91,122 +111,122 @@ public: //@{ // This returns true when it's safe to bring up the "device settings" dialog in the prefs. // i.e. when the daemon is running and connected, and the device lists are populated. - virtual bool deviceSettingsAvailable() override; - virtual bool deviceSettingsUpdated() override; //return if the list has been updated and never fetched, only to be called from the voicepanel. + bool deviceSettingsAvailable() override; + bool deviceSettingsUpdated() override; //return if the list has been updated and never fetched, only to be called from the voicepanel. // Requery the vivox daemon for the current list of input/output devices. // If you pass true for clearCurrentList, deviceSettingsAvailable() will be false until the query has completed // (use this if you want to know when it's done). // If you pass false, you'll have no way to know when the query finishes, but the device lists will not appear empty in the interim. - virtual void refreshDeviceLists(bool clearCurrentList = true) override; + void refreshDeviceLists(bool clearCurrentList = true) override; - virtual void setCaptureDevice(const std::string& name) override; - virtual void setRenderDevice(const std::string& name) override; + void setCaptureDevice(const std::string& name) override; + void setRenderDevice(const std::string& name) override; - virtual LLVoiceDeviceList& getCaptureDevices() override; - virtual LLVoiceDeviceList& getRenderDevices() override; + LLVoiceDeviceList& getCaptureDevices() override; + LLVoiceDeviceList& getRenderDevices() override; //@} - virtual void getParticipantList(std::set<LLUUID> &participants) override; - virtual bool isParticipant(const LLUUID& speaker_id) override; + void getParticipantList(std::set<LLUUID> &participants) override; + bool isParticipant(const LLUUID& speaker_id) override; // Send a text message to the specified user, initiating the session if necessary. // virtual BOOL sendTextMessage(const LLUUID& participant_id, const std::string& message) const {return false;}; - // close any existing text IM session with the specified user - virtual void endUserIMSession(const LLUUID &uuid) override; - // Returns true if calling back the session URI after the session has closed is possible. // Currently this will be false only for PSTN P2P calls. // NOTE: this will return true if the session can't be found. - virtual BOOL isSessionCallBackPossible(const LLUUID &session_id) override; + BOOL isSessionCallBackPossible(const LLUUID &session_id) override; // Returns true if the session can accepte text IM's. // Currently this will be false only for PSTN P2P calls. // NOTE: this will return true if the session can't be found. - virtual BOOL isSessionTextIMPossible(const LLUUID &session_id) override; - + BOOL isSessionTextIMPossible(const LLUUID &session_id) override; //////////////////////////// /// @name Channel stuff //@{ // returns true iff the user is currently in a proximal (local spatial) channel. // Note that gestures should only fire if this returns true. - virtual bool inProximalChannel() override; + bool inProximalChannel() override; + + void setNonSpatialChannel(const LLSD& channelInfo, + bool notify_on_first_join, + bool hangup_on_last_leave) override; - virtual void setNonSpatialChannel(const std::string &uri, - const std::string &credentials) override; + bool setSpatialChannel(const LLSD& channelInfo) override; - virtual bool setSpatialChannel(const std::string &uri, - const std::string &credentials) override; + void leaveNonSpatialChannel() override; - virtual void leaveNonSpatialChannel() override; + void processChannels(bool process) override; - virtual void leaveChannel(void) override; + void leaveChannel(void); + + bool isCurrentChannel(const LLSD &channelInfo) override; + bool compareChannels(const LLSD &channelInfo1, const LLSD &channelInfo2) override; - // Returns the URI of the current channel, or an empty string if not currently in a channel. - // NOTE that it will return an empty string if it's in the process of joining a channel. - virtual std::string getCurrentChannel() override; //@} ////////////////////////// - /// @name invitations + /// @name LLVoiceP2POutgoingCallInterface //@{ // start a voice channel with the specified user - virtual void callUser(const LLUUID &uuid) override; - virtual bool isValidChannel(std::string &channelHandle) override; - virtual bool answerInvite(std::string &channelHandle) override; - virtual void declineInvite(std::string &channelHandle) override; + void callUser(const LLUUID &uuid) override; + void hangup() override; + //@} + LLVoiceP2POutgoingCallInterface *getOutgoingCallInterface() override { return this; } + + LLVoiceP2PIncomingCallInterfacePtr getIncomingCallInterface(const LLSD &voice_call_info) override; + + bool answerInvite(const std::string &sessionHandle); + void declineInvite(const std::string &sessionHandle); + ///////////////////////// /// @name Volume/gain //@{ - virtual void setVoiceVolume(F32 volume) override; - virtual void setMicGain(F32 volume) override; + void setVoiceVolume(F32 volume) override; + void setMicGain(F32 volume) override; //@} ///////////////////////// /// @name enable disable voice and features //@{ - virtual bool voiceEnabled() override; - virtual void setVoiceEnabled(bool enabled) override; - virtual BOOL lipSyncEnabled() override; - virtual void setLipSyncEnabled(BOOL enabled) override; - virtual void setMuteMic(bool muted) override; // Set the mute state of the local mic. + void setVoiceEnabled(bool enabled) override; + void setMuteMic(bool muted) override; // Set the mute state of the local mic. //@} ////////////////////////// /// @name nearby speaker accessors //@{ - virtual BOOL getVoiceEnabled(const LLUUID& id) override; // true if we've received data for this avatar - virtual std::string getDisplayName(const LLUUID& id) override; - virtual BOOL isParticipantAvatar(const LLUUID &id) override; - virtual BOOL getIsSpeaking(const LLUUID& id) override; - virtual BOOL getIsModeratorMuted(const LLUUID& id) override; - virtual F32 getCurrentPower(const LLUUID& id) override; // "power" is related to "amplitude" in a defined way. I'm just not sure what the formula is... - virtual BOOL getOnMuteList(const LLUUID& id) override; - virtual F32 getUserVolume(const LLUUID& id) override; - virtual void setUserVolume(const LLUUID& id, F32 volume) override; // set's volume for specified agent, from 0-1 (where .5 is nominal) + std::string getDisplayName(const LLUUID& id) override; + BOOL isParticipantAvatar(const LLUUID &id) override; + BOOL getIsSpeaking(const LLUUID& id) override; + BOOL getIsModeratorMuted(const LLUUID& id) override; + F32 getCurrentPower(const LLUUID& id) override; // "power" is related to "amplitude" in a defined way. I'm just not sure what the formula is... + F32 getUserVolume(const LLUUID& id) override; + void setUserVolume(const LLUUID& id, F32 volume) override; // set's volume for specified agent, from 0-1 (where .5 is nominal) //@} // authorize the user - virtual void userAuthorized(const std::string& user_id, - const LLUUID &agentID) override; + void userAuthorized(const std::string& user_id, + const LLUUID &agentID) override; ////////////////////////////// /// @name Status notification //@{ - virtual void addObserver(LLVoiceClientStatusObserver* observer) override; - virtual void removeObserver(LLVoiceClientStatusObserver* observer) override; - virtual void addObserver(LLFriendObserver* observer) override; - virtual void removeObserver(LLFriendObserver* observer) override; - virtual void addObserver(LLVoiceClientParticipantObserver* observer) override; - virtual void removeObserver(LLVoiceClientParticipantObserver* observer) override; + void addObserver(LLVoiceClientStatusObserver* observer) override; + void removeObserver(LLVoiceClientStatusObserver* observer) override; + void addObserver(LLFriendObserver* observer) override; + void removeObserver(LLFriendObserver* observer) override; + void addObserver(LLVoiceClientParticipantObserver* observer) override; + void removeObserver(LLVoiceClientParticipantObserver* observer) override; //@} - virtual std::string sipURIFromID(const LLUUID &id) override; + std::string sipURIFromID(const LLUUID &id) const override; + LLSD getP2PChannelInfoTemplate(const LLUUID& id) const override; //@} /// @name LLVoiceEffectInterface virtual implementations @@ -216,32 +236,32 @@ public: ////////////////////////// /// @name Accessors //@{ - virtual bool setVoiceEffect(const LLUUID& id) override; - virtual const LLUUID getVoiceEffect() override; - virtual LLSD getVoiceEffectProperties(const LLUUID& id) override; + bool setVoiceEffect(const LLUUID& id) override; + const LLUUID getVoiceEffect() override; + LLSD getVoiceEffectProperties(const LLUUID& id) override; - virtual void refreshVoiceEffectLists(bool clear_lists) override; - virtual const voice_effect_list_t& getVoiceEffectList() const override; - virtual const voice_effect_list_t& getVoiceEffectTemplateList() const override; + void refreshVoiceEffectLists(bool clear_lists) override; + const voice_effect_list_t& getVoiceEffectList() const override; + const voice_effect_list_t& getVoiceEffectTemplateList() const override; //@} ////////////////////////////// /// @name Status notification //@{ - virtual void addObserver(LLVoiceEffectObserver* observer) override; - virtual void removeObserver(LLVoiceEffectObserver* observer) override; + void addObserver(LLVoiceEffectObserver* observer) override; + void removeObserver(LLVoiceEffectObserver* observer) override; //@} ////////////////////////////// /// @name Effect preview buffer //@{ - virtual void enablePreviewBuffer(bool enable) override; - virtual void recordPreviewBuffer() override; - virtual void playPreviewBuffer(const LLUUID& effect_id = LLUUID::null) override; - virtual void stopPreviewBuffer() override; + void enablePreviewBuffer(bool enable) override; + void recordPreviewBuffer() override; + void playPreviewBuffer(const LLUUID& effect_id = LLUUID::null) override; + void stopPreviewBuffer() override; - virtual bool isPreviewRecording() override; - virtual bool isPreviewPlaying() override; + bool isPreviewRecording() override; + bool isPreviewPlaying() override; //@} //@} @@ -311,6 +331,8 @@ protected: static ptr_t createSession(); ~sessionState(); + LLSD getVoiceChannelInfo(); + participantStatePtr_t addParticipant(const std::string &uri); void removeParticipant(const participantStatePtr_t &participant); void removeAllParticipants(); @@ -325,6 +347,7 @@ protected: bool isCallBackPossible(); bool isTextIMPossible(); + bool isSpatial() { return mIsSpatial; } static void for_each(sessionFunc_t func); @@ -494,11 +517,6 @@ protected: std::string getGroupID(const LLUUID& id); // group ID if the user is in group chat (empty string if not applicable) ///////////////////////////// - BOOL getAreaVoiceDisabled(); // returns true if the area the avatar is in is speech-disabled. - // Use this to determine whether to show a "no speech" icon in the menu bar. - - - ///////////////////////////// // Recording controls void recordingLoopStart(int seconds = 3600, int deltaFramesPerControlFrame = 200); void recordingLoopSave(const std::string& filename); @@ -692,9 +710,7 @@ private: std::string mMainSessionGroupHandle; // handle of the "main" session group. std::string mChannelName; // Name of the channel to be looked up - bool mAreaVoiceDisabled; sessionStatePtr_t mAudioSession; // Session state for the current audio session - bool mAudioSessionChanged; // set to true when the above pointer gets changed, so observers can be notified. sessionStatePtr_t mNextAudioSession; // Session state for the audio session we're trying to join @@ -736,10 +752,8 @@ private: bool switchChannel(std::string uri = std::string(), bool spatial = true, bool no_reconnect = false, bool is_p2p = false, std::string hash = ""); void joinSession(const sessionStatePtr_t &session); - std::string nameFromAvatar(LLVOAvatar *avatar); - std::string nameFromID(const LLUUID &id); + std::string nameFromID(const LLUUID &id) const; bool IDFromName(const std::string name, LLUUID &uuid); - std::string displayNameFromAvatar(LLVOAvatar *avatar); std::string sipURIFromAvatar(LLVOAvatar *avatar); std::string sipURIFromName(std::string &name); @@ -747,10 +761,9 @@ private: std::string nameFromsipURI(const std::string &uri); bool inSpatialChannel(void); - std::string getAudioSessionURI(); + LLSD getAudioSessionChannelInfo(); std::string getAudioSessionHandle(); - void setHidden(bool hidden) override; //virtual void sendPositionAndVolumeUpdate(void); void sendCaptureAndRenderDevices(); @@ -803,12 +816,11 @@ private: bool mMicVolumeDirty; bool mVoiceEnabled; + bool mProcessChannels; bool mWriteInProgress; std::string mWriteString; size_t mWriteOffset; - BOOL mLipSyncEnabled; - typedef std::set<LLVoiceClientParticipantObserver*> observer_set_t; observer_set_t mParticipantObservers; diff --git a/indra/newview/llvoicewebrtc.cpp b/indra/newview/llvoicewebrtc.cpp new file mode 100644 index 0000000000..b4f5ba6cfe --- /dev/null +++ b/indra/newview/llvoicewebrtc.cpp @@ -0,0 +1,3180 @@ + /** + * @file llvoicewebrtc.cpp + * @brief Implementation of LLWebRTCVoiceClient class which is the interface to the voice client process. + * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ + * Second Life Viewer Source Code + * Copyright (C) 2023, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation + * version 2.1 of the License only. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA + * $/LicenseInfo$ + */ +#include <algorithm> +#if !LL_LINUX +#include <format> +#endif +#include "llvoicewebrtc.h" + +#include "llsdutil.h" + +// Linden library includes +#include "llavatarnamecache.h" +#include "llvoavatarself.h" +#include "llbufferstream.h" +#include "llfile.h" +#include "llmenugl.h" +#ifdef LL_USESYSTEMLIBS +# include "expat.h" +#else +# include "expat/expat.h" +#endif +#include "llcallbacklist.h" +#include "llviewernetwork.h" // for gGridChoice +#include "llbase64.h" +#include "llviewercontrol.h" +#include "llappviewer.h" // for gDisconnected, gDisableVoice +#include "llprocess.h" + +// Viewer includes +#include "llmutelist.h" // to check for muted avatars +#include "llagent.h" +#include "llcachename.h" +#include "llimview.h" // for LLIMMgr +#include "llworld.h" +#include "llparcel.h" +#include "llviewerparcelmgr.h" +#include "llfirstuse.h" +#include "llspeakers.h" +#include "lltrans.h" +#include "llrand.h" +#include "llviewerwindow.h" +#include "llviewercamera.h" +#include "llversioninfo.h" + +#include "llviewernetwork.h" +#include "llnotificationsutil.h" + +#include "llcorehttputil.h" +#include "lleventfilter.h" + +#include "stringize.h" + +#include "llwebrtc.h" + +// for base64 decoding +#include "apr_base64.h" + +#include "json/reader.h" +#include "json/writer.h" + +const std::string WEBRTC_VOICE_SERVER_TYPE = "webrtc"; + +namespace { + + const F32 MAX_AUDIO_DIST = 50.0f; + const F32 VOLUME_SCALE_WEBRTC = 0.01f; + const F32 LEVEL_SCALE_WEBRTC = 0.008f; + + const F32 SPEAKING_AUDIO_LEVEL = 0.30; + + static const std::string REPORTED_VOICE_SERVER_TYPE = "Secondlife WebRTC Gateway"; + + // Don't send positional updates more frequently than this: + const F32 UPDATE_THROTTLE_SECONDS = 0.1f; + const F32 MAX_RETRY_WAIT_SECONDS = 10.0f; + + // Cosine of a "trivially" small angle + const F32 FOUR_DEGREES = 4.0f * (F_PI / 180.0f); + const F32 MINUSCULE_ANGLE_COS = (F32) cos(0.5f * FOUR_DEGREES); + +} // namespace + + +/////////////////////////////////////////////////////////////////////////////////////////////// + +void LLVoiceWebRTCStats::reset() +{ + mStartTime = -1.0f; + mConnectCycles = 0; + mConnectTime = -1.0f; + mConnectAttempts = 0; + mProvisionTime = -1.0f; + mProvisionAttempts = 0; + mEstablishTime = -1.0f; + mEstablishAttempts = 0; +} + +LLVoiceWebRTCStats::LLVoiceWebRTCStats() +{ + reset(); +} + +LLVoiceWebRTCStats::~LLVoiceWebRTCStats() +{ +} + +void LLVoiceWebRTCStats::connectionAttemptStart() +{ + if (!mConnectAttempts) + { + mStartTime = LLTimer::getTotalTime(); + mConnectCycles++; + } + mConnectAttempts++; +} + +void LLVoiceWebRTCStats::connectionAttemptEnd(bool success) +{ + if ( success ) + { + mConnectTime = (LLTimer::getTotalTime() - mStartTime) / USEC_PER_SEC; + } +} + +void LLVoiceWebRTCStats::provisionAttemptStart() +{ + if (!mProvisionAttempts) + { + mStartTime = LLTimer::getTotalTime(); + } + mProvisionAttempts++; +} + +void LLVoiceWebRTCStats::provisionAttemptEnd(bool success) +{ + if ( success ) + { + mProvisionTime = (LLTimer::getTotalTime() - mStartTime) / USEC_PER_SEC; + } +} + +void LLVoiceWebRTCStats::establishAttemptStart() +{ + if (!mEstablishAttempts) + { + mStartTime = LLTimer::getTotalTime(); + } + mEstablishAttempts++; +} + +void LLVoiceWebRTCStats::establishAttemptEnd(bool success) +{ + if ( success ) + { + mEstablishTime = (LLTimer::getTotalTime() - mStartTime) / USEC_PER_SEC; + } +} + +LLSD LLVoiceWebRTCStats::read() +{ + LLSD stats(LLSD::emptyMap()); + + stats["connect_cycles"] = LLSD::Integer(mConnectCycles); + stats["connect_attempts"] = LLSD::Integer(mConnectAttempts); + stats["connect_time"] = LLSD::Real(mConnectTime); + + stats["provision_attempts"] = LLSD::Integer(mProvisionAttempts); + stats["provision_time"] = LLSD::Real(mProvisionTime); + + stats["establish_attempts"] = LLSD::Integer(mEstablishAttempts); + stats["establish_time"] = LLSD::Real(mEstablishTime); + + return stats; +} + +/////////////////////////////////////////////////////////////////////////////////////////////// + +bool LLWebRTCVoiceClient::sShuttingDown = false; + +LLWebRTCVoiceClient::LLWebRTCVoiceClient() : + mHidden(false), + mTuningMode(false), + mTuningMicGain(0.0), + mTuningSpeakerVolume(50), // Set to 50 so the user can hear themselves when he sets his mic volume + mDevicesListUpdated(false), + + mSpatialCoordsDirty(false), + + mMuteMic(false), + + mEarLocation(0), + mMicGain(0.0), + + mVoiceEnabled(false), + mProcessChannels(false), + + mAvatarNameCacheConnection(), + mIsInTuningMode(false), + mIsProcessingChannels(false), + mIsCoroutineActive(false), + mWebRTCPump("WebRTCClientPump"), + mWebRTCDeviceInterface(nullptr) +{ + sShuttingDown = false; + + mSpeakerVolume = 0.0; + + mVoiceVersion.serverVersion = ""; + mVoiceVersion.voiceServerType = REPORTED_VOICE_SERVER_TYPE; + mVoiceVersion.internalVoiceServerType = WEBRTC_VOICE_SERVER_TYPE; + mVoiceVersion.minorVersion = 0; + mVoiceVersion.majorVersion = 2; + mVoiceVersion.mBuildVersion = ""; +} + +//--------------------------------------------------- + +LLWebRTCVoiceClient::~LLWebRTCVoiceClient() +{ +} + +void LLWebRTCVoiceClient::cleanupSingleton() +{ + if (mAvatarNameCacheConnection.connected()) + { + mAvatarNameCacheConnection.disconnect(); + } + + sShuttingDown = true; + if (mSession) + { + mSession->shutdownAllConnections(); + } + if (mNextSession) + { + mNextSession->shutdownAllConnections(); + } + cleanUp(); + sessionState::clearSessions(); +} + +//--------------------------------------------------- + +void LLWebRTCVoiceClient::init(LLPumpIO* pump) +{ + // constructor will set up LLVoiceClient::getInstance() + llwebrtc::init(this); + + mWebRTCDeviceInterface = llwebrtc::getDeviceInterface(); + mWebRTCDeviceInterface->setDevicesObserver(this); + mMainQueue = LL::WorkQueue::getInstance("mainloop"); +} + +void LLWebRTCVoiceClient::terminate() +{ + if (sShuttingDown) + { + return; + } + + mVoiceEnabled = false; + llwebrtc::terminate(); + + sShuttingDown = true; +} + +//--------------------------------------------------- + +void LLWebRTCVoiceClient::cleanUp() +{ + mNextSession.reset(); + mSession.reset(); + mNeighboringRegions.clear(); + sessionState::for_each(boost::bind(predShutdownSession, _1)); + LL_DEBUGS("Voice") << "Exiting" << LL_ENDL; +} + +void LLWebRTCVoiceClient::LogMessage(llwebrtc::LLWebRTCLogCallback::LogLevel level, const std::string& message) +{ + switch (level) + { + case llwebrtc::LLWebRTCLogCallback::LOG_LEVEL_VERBOSE: + LL_DEBUGS("Voice") << message << LL_ENDL; + break; + case llwebrtc::LLWebRTCLogCallback::LOG_LEVEL_INFO: + LL_INFOS("Voice") << message << LL_ENDL; + break; + case llwebrtc::LLWebRTCLogCallback::LOG_LEVEL_WARNING: + LL_WARNS("Voice") << message << LL_ENDL; + break; + case llwebrtc::LLWebRTCLogCallback::LOG_LEVEL_ERROR: + // use WARN so that we don't crash on a webrtc error. + // webrtc will force a crash on a fatal error. + LL_WARNS("Voice") << message << LL_ENDL; + break; + default: + break; + } +} + +// -------------------------------------------------- + +const LLVoiceVersionInfo& LLWebRTCVoiceClient::getVersion() +{ + return mVoiceVersion; +} + +//--------------------------------------------------- + +void LLWebRTCVoiceClient::updateSettings() +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + setVoiceEnabled(LLVoiceClient::getInstance()->voiceEnabled()); + static LLCachedControl<S32> sVoiceEarLocation(gSavedSettings, "VoiceEarLocation"); + setEarLocation(sVoiceEarLocation); + + static LLCachedControl<std::string> sInputDevice(gSavedSettings, "VoiceInputAudioDevice"); + setCaptureDevice(sInputDevice); + + static LLCachedControl<std::string> sOutputDevice(gSavedSettings, "VoiceOutputAudioDevice"); + setRenderDevice(sOutputDevice); + + static LLCachedControl<F32> sMicLevel(gSavedSettings, "AudioLevelMic"); + setMicGain(sMicLevel); + + llwebrtc::LLWebRTCDeviceInterface::AudioConfig config; + + static LLCachedControl<bool> sEchoCancellation(gSavedSettings, "VoiceEchoCancellation", true); + config.mEchoCancellation = sEchoCancellation; + + static LLCachedControl<bool> sAGC(gSavedSettings, "VoiceAutomaticGainControl", true); + config.mAGC = sAGC; + + static LLCachedControl<U32> sNoiseSuppressionLevel(gSavedSettings, + "VoiceNoiseSuppressionLevel", + llwebrtc::LLWebRTCDeviceInterface::AudioConfig::ENoiseSuppressionLevel::NOISE_SUPPRESSION_LEVEL_VERY_HIGH); + config.mNoiseSuppressionLevel = (llwebrtc::LLWebRTCDeviceInterface::AudioConfig::ENoiseSuppressionLevel) (U32)sNoiseSuppressionLevel; + + mWebRTCDeviceInterface->setAudioConfig(config); + +} + +// Observers +void LLWebRTCVoiceClient::addObserver(LLVoiceClientParticipantObserver *observer) +{ + mParticipantObservers.insert(observer); +} + +void LLWebRTCVoiceClient::removeObserver(LLVoiceClientParticipantObserver *observer) +{ + mParticipantObservers.erase(observer); +} + +void LLWebRTCVoiceClient::notifyParticipantObservers() +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + for (observer_set_t::iterator it = mParticipantObservers.begin(); it != mParticipantObservers.end();) + { + LLVoiceClientParticipantObserver *observer = *it; + observer->onParticipantsChanged(); + // In case onParticipantsChanged() deleted an entry. + it = mParticipantObservers.upper_bound(observer); + } +} + +void LLWebRTCVoiceClient::addObserver(LLVoiceClientStatusObserver *observer) +{ + mStatusObservers.insert(observer); +} + +void LLWebRTCVoiceClient::removeObserver(LLVoiceClientStatusObserver *observer) +{ + mStatusObservers.erase(observer); +} + +void LLWebRTCVoiceClient::notifyStatusObservers(LLVoiceClientStatusObserver::EStatusType status) +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + LL_DEBUGS("Voice") << "( " << LLVoiceClientStatusObserver::status2string(status) << " )" + << " mSession=" << mSession << LL_ENDL; + + LL_DEBUGS("Voice") << " " << LLVoiceClientStatusObserver::status2string(status) << ", session channelInfo " + << getAudioSessionChannelInfo() << ", proximal is " << inSpatialChannel() << LL_ENDL; + + mIsProcessingChannels = status == LLVoiceClientStatusObserver::STATUS_JOINED; + + LLSD channelInfo = getAudioSessionChannelInfo(); + for (status_observer_set_t::iterator it = mStatusObservers.begin(); it != mStatusObservers.end();) + { + LLVoiceClientStatusObserver *observer = *it; + observer->onChange(status, channelInfo, inSpatialChannel()); + // In case onError() deleted an entry. + it = mStatusObservers.upper_bound(observer); + } + + // skipped to avoid speak button blinking + if (status != LLVoiceClientStatusObserver::STATUS_JOINING && + status != LLVoiceClientStatusObserver::STATUS_LEFT_CHANNEL && + status != LLVoiceClientStatusObserver::STATUS_VOICE_DISABLED) + { + bool voice_status = LLVoiceClient::getInstance()->voiceEnabled() && LLVoiceClient::getInstance()->isVoiceWorking(); + + gAgent.setVoiceConnected(voice_status); + + if (voice_status) + { + LLFirstUse::speak(true); + } + } +} + +void LLWebRTCVoiceClient::addObserver(LLFriendObserver *observer) +{ +} + +void LLWebRTCVoiceClient::removeObserver(LLFriendObserver *observer) +{ +} + +//--------------------------------------------------- +// Primary voice loop. +// This voice loop is called every 100ms plus the time it +// takes to process the various functions called in the loop +// The loop does the following: +// * gates whether we do channel processing depending on +// whether we're running a WebRTC voice channel or +// one from another voice provider. +// * If in spatial voice, it determines whether we've changed +// parcels, whether region/parcel voice settings have changed, +// etc. and manages whether the voice channel needs to change. +// * calls the state machines for the sessions to negotiate +// connection to various voice channels. +// * Sends updates to the voice server when this agent's +// voice levels, or positions have changed. +void LLWebRTCVoiceClient::voiceConnectionCoro() +{ + LL_DEBUGS("Voice") << "starting" << LL_ENDL; + mIsCoroutineActive = true; + LLCoros::set_consuming(true); + try + { + LLMuteList::getInstance()->addObserver(this); + while (!sShuttingDown) + { + LL_PROFILE_ZONE_NAMED_CATEGORY_VOICE("voiceConnectionCoroLoop") + // TODO: Doing some measurement and calculation here, + // we could reduce the timeout to take into account the + // time spent on the previous loop to have the loop + // cycle at exactly 100ms, instead of 100ms + loop + // execution time. + // Could help with voice updates making for smoother + // voice when we're busy. + llcoro::suspendUntilTimeout(UPDATE_THROTTLE_SECONDS); + if (sShuttingDown) return; // 'this' migh already be invalid + bool voiceEnabled = mVoiceEnabled; + + if (!isAgentAvatarValid()) + { + continue; + } + + LLViewerRegion *regionp = gAgent.getRegion(); + if (!regionp) + { + continue; + } + + if (!mProcessChannels) + { + // we've switched away from webrtc voice, so shut all channels down. + // leave channel can be called again and again without adverse effects. + // it merely tells channels to shut down if they're not already doing so. + leaveChannel(false); + } + else if (inSpatialChannel()) + { + bool useEstateVoice = true; + // add session for region or parcel voice. + if (!regionp || regionp->getRegionID().isNull()) + { + // no region, no voice. + continue; + } + + voiceEnabled = voiceEnabled && regionp->isVoiceEnabled(); + + if (voiceEnabled) + { + LLParcel *parcel = LLViewerParcelMgr::getInstance()->getAgentParcel(); + // check to see if parcel changed. + if (parcel && parcel->getLocalID() != INVALID_PARCEL_ID) + { + // parcel voice + if (!parcel->getParcelFlagAllowVoice()) + { + voiceEnabled = false; + } + else if (!parcel->getParcelFlagUseEstateVoiceChannel()) + { + // use the parcel-specific voice channel. + S32 parcel_local_id = parcel->getLocalID(); + std::string channelID = regionp->getRegionID().asString() + "-" + std::to_string(parcel->getLocalID()); + + useEstateVoice = false; + if (!inOrJoiningChannel(channelID)) + { + startParcelSession(channelID, parcel_local_id); + } + } + } + if (voiceEnabled && useEstateVoice && !inEstateChannel()) + { + // estate voice + startEstateSession(); + } + } + if (!voiceEnabled) + { + // voice is disabled, so leave and disable PTT + leaveChannel(true); + } + else + { + // we're in spatial voice, and voice is enabled, so determine positions in order + // to send position updates. + updatePosition(); + } + } + + sessionState::processSessionStates(); + if (mProcessChannels && voiceEnabled && !mHidden) + { + sendPositionUpdate(false); + updateOwnVolume(); + } + } + } + catch (const LLCoros::Stop&) + { + LL_DEBUGS("LLWebRTCVoiceClient") << "Received a shutdown exception" << LL_ENDL; + } + catch (const LLContinueError&) + { + LOG_UNHANDLED_EXCEPTION("LLWebRTCVoiceClient"); + } + catch (...) + { + // Ideally for Windows need to log SEH exception instead or to set SEH + // handlers but bugsplat shows local variables for windows, which should + // be enough + LL_WARNS("Voice") << "voiceConnectionStateMachine crashed" << LL_ENDL; + throw; + } + + cleanUp(); +} + +// For spatial, determine which neighboring regions to connect to +// for cross-region voice. +void LLWebRTCVoiceClient::updateNeighboringRegions() +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + static const std::vector<LLVector3d> neighbors {LLVector3d(0.0f, 1.0f, 0.0f), LLVector3d(0.707f, 0.707f, 0.0f), + LLVector3d(1.0f, 0.0f, 0.0f), LLVector3d(0.707f, -0.707f, 0.0f), + LLVector3d(0.0f, -1.0f, 0.0f), LLVector3d(-0.707f, -0.707f, 0.0f), + LLVector3d(-1.0f, 0.0f, 0.0f), LLVector3d(-0.707f, 0.707f, 0.0f)}; + + // Estate voice requires connection to neighboring regions. + mNeighboringRegions.clear(); + + // add current region. + mNeighboringRegions.insert(gAgent.getRegion()->getRegionID()); + + // base off of speaker position as it'll move more slowly than camera position. + // Once we have hysteresis, we may be able to track off of speaker and camera position at 50m + // TODO: Add hysteresis so we don't flip-flop connections to neighbors + LLVector3d speaker_pos = LLWebRTCVoiceClient::getInstance()->getSpeakerPosition(); + for (auto &neighbor_pos : neighbors) + { + // include every region within 100m (2*MAX_AUDIO_DIST) to deal witht he fact that the camera + // can stray 50m away from the avatar. + LLViewerRegion *neighbor = LLWorld::instance().getRegionFromPosGlobal(speaker_pos + 2 * MAX_AUDIO_DIST * neighbor_pos); + if (neighbor && !neighbor->getRegionID().isNull()) + { + mNeighboringRegions.insert(neighbor->getRegionID()); + } + } +} + +//========================================================================= +// shut down the current audio session to make room for the next one. +void LLWebRTCVoiceClient::leaveAudioSession() +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + if(mSession) + { + LL_DEBUGS("Voice") << "leaving session: " << mSession->mChannelID << LL_ENDL; + mSession->shutdownAllConnections(); + } + else + { + LL_WARNS("Voice") << "called with no active session" << LL_ENDL; + } +} + +//========================================================================= +// Device Management +void LLWebRTCVoiceClient::clearCaptureDevices() +{ + LL_DEBUGS("Voice") << "called" << LL_ENDL; + mCaptureDevices.clear(); +} + +void LLWebRTCVoiceClient::addCaptureDevice(const LLVoiceDevice& device) +{ + LL_DEBUGS("Voice") << "display: '" << device.display_name << "' device: '" << device.full_name << "'" << LL_ENDL; + mCaptureDevices.push_back(device); +} + +LLVoiceDeviceList& LLWebRTCVoiceClient::getCaptureDevices() +{ + return mCaptureDevices; +} + +void LLWebRTCVoiceClient::setCaptureDevice(const std::string& name) +{ + mWebRTCDeviceInterface->setCaptureDevice(name); +} +void LLWebRTCVoiceClient::setDevicesListUpdated(bool state) +{ + mDevicesListUpdated = state; +} + +// the singleton 'this' pointer will outlive the work queue. +void LLWebRTCVoiceClient::OnDevicesChanged(const llwebrtc::LLWebRTCVoiceDeviceList& render_devices, + const llwebrtc::LLWebRTCVoiceDeviceList& capture_devices) +{ + + LL::WorkQueue::postMaybe(mMainQueue, + [=] + { + OnDevicesChangedImpl(render_devices, capture_devices); + }); +} + +void LLWebRTCVoiceClient::OnDevicesChangedImpl(const llwebrtc::LLWebRTCVoiceDeviceList &render_devices, + const llwebrtc::LLWebRTCVoiceDeviceList &capture_devices) +{ + if (sShuttingDown) + { + return; + } + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + std::string inputDevice = gSavedSettings.getString("VoiceInputAudioDevice"); + std::string outputDevice = gSavedSettings.getString("VoiceOutputAudioDevice"); + + LL_DEBUGS("Voice") << "Setting devices to-input: '" << inputDevice << "' output: '" << outputDevice << "'" << LL_ENDL; + clearRenderDevices(); + for (auto &device : render_devices) + { + addRenderDevice(LLVoiceDevice(device.mDisplayName, device.mID)); + } + setRenderDevice(outputDevice); + + clearCaptureDevices(); + for (auto &device : capture_devices) + { + LL_DEBUGS("Voice") << "Checking capture device:'" << device.mID << "'" << LL_ENDL; + + addCaptureDevice(LLVoiceDevice(device.mDisplayName, device.mID)); + } + setCaptureDevice(inputDevice); + + setDevicesListUpdated(true); +} + +void LLWebRTCVoiceClient::clearRenderDevices() +{ + LL_DEBUGS("Voice") << "called" << LL_ENDL; + mRenderDevices.clear(); +} + +void LLWebRTCVoiceClient::addRenderDevice(const LLVoiceDevice& device) +{ + LL_DEBUGS("Voice") << "display: '" << device.display_name << "' device: '" << device.full_name << "'" << LL_ENDL; + mRenderDevices.push_back(device); + +} + +LLVoiceDeviceList& LLWebRTCVoiceClient::getRenderDevices() +{ + return mRenderDevices; +} + +void LLWebRTCVoiceClient::setRenderDevice(const std::string& name) +{ + mWebRTCDeviceInterface->setRenderDevice(name); +} + +void LLWebRTCVoiceClient::tuningStart() +{ + if (!mIsInTuningMode) + { + mWebRTCDeviceInterface->setTuningMode(true); + mIsInTuningMode = true; + } +} + +void LLWebRTCVoiceClient::tuningStop() +{ + if (mIsInTuningMode) + { + mWebRTCDeviceInterface->setTuningMode(false); + mIsInTuningMode = false; + } +} + +bool LLWebRTCVoiceClient::inTuningMode() +{ + return mIsInTuningMode; +} + +void LLWebRTCVoiceClient::tuningSetMicVolume(float volume) +{ + mTuningMicGain = volume; +} + +void LLWebRTCVoiceClient::tuningSetSpeakerVolume(float volume) +{ + + if (volume != mTuningSpeakerVolume) + { + mTuningSpeakerVolume = volume; + } +} + +float LLWebRTCVoiceClient::getAudioLevel() +{ + if (mIsInTuningMode) + { + return (1.0 - mWebRTCDeviceInterface->getTuningAudioLevel() * LEVEL_SCALE_WEBRTC) * mTuningMicGain / 2.1; + } + else + { + return (1.0 - mWebRTCDeviceInterface->getPeerConnectionAudioLevel() * LEVEL_SCALE_WEBRTC) * mMicGain / 2.1; + } +} + +float LLWebRTCVoiceClient::tuningGetEnergy(void) +{ + return getAudioLevel(); +} + +bool LLWebRTCVoiceClient::deviceSettingsAvailable() +{ + bool result = true; + + if(mRenderDevices.empty() || mCaptureDevices.empty()) + result = false; + + return result; +} +bool LLWebRTCVoiceClient::deviceSettingsUpdated() +{ + bool updated = mDevicesListUpdated; + mDevicesListUpdated = false; + return updated; +} + +void LLWebRTCVoiceClient::refreshDeviceLists(bool clearCurrentList) +{ + if(clearCurrentList) + { + clearCaptureDevices(); + clearRenderDevices(); + } + mWebRTCDeviceInterface->refreshDevices(); +} + + +void LLWebRTCVoiceClient::setHidden(bool hidden) +{ + mHidden = hidden; + + if (inSpatialChannel()) + { + if (mHidden) + { + // get out of the channel entirely + // mute the microphone. + sessionState::for_each(boost::bind(predSetMuteMic, _1, true)); + } + else + { + // and put it back + sessionState::for_each(boost::bind(predSetMuteMic, _1, mMuteMic)); + updatePosition(); + sendPositionUpdate(true); + } + } +} + +///////////////////////////// +// session control messages. +// +// these are called by the sessions to report +// status for a given channel. By filtering +// on channel and region, these functions +// can send various notifications to +// other parts of the viewer, as well as +// managing housekeeping + +// A connection to a channel was successfully established, +// so shut down the current session and move on to the next +// if one is available. +// if the current session is the one that was established, +// notify the observers. +void LLWebRTCVoiceClient::OnConnectionEstablished(const std::string &channelID, const LLUUID ®ionID) +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + if (gAgent.getRegion()->getRegionID() == regionID) + { + if (mNextSession && mNextSession->mChannelID == channelID) + { + if (mSession) + { + mSession->shutdownAllConnections(); + } + mSession = mNextSession; + mNextSession.reset(); + } + + if (mSession) + { + // Add ourselves as a participant. + mSession->addParticipant(gAgentID, gAgent.getRegion()->getRegionID()); + } + + // The current session was established. + if (mSession && mSession->mChannelID == channelID) + { + LLWebRTCVoiceClient::getInstance()->notifyStatusObservers(LLVoiceClientStatusObserver::STATUS_LOGGED_IN); + + // only set status to joined if asked to. This will happen in the case where we're not + // doing an ad-hoc based p2p session. Those sessions expect a STATUS_JOINED when the peer + // has, in fact, joined, which we detect elsewhere. + if (!mSession->mNotifyOnFirstJoin) + { + LLWebRTCVoiceClient::getInstance()->notifyStatusObservers(LLVoiceClientStatusObserver::STATUS_JOINED); + } + } + } +} + +void LLWebRTCVoiceClient::OnConnectionShutDown(const std::string &channelID, const LLUUID ®ionID) +{ + if (mSession && (mSession->mChannelID == channelID)) + { + if (gAgent.getRegion()->getRegionID() == regionID) + { + if (mSession && mSession->mChannelID == channelID) + { + LL_DEBUGS("Voice") << "Main WebRTC Connection Shut Down." << LL_ENDL; + } + } + mSession->removeAllParticipants(regionID); + } +} + +void LLWebRTCVoiceClient::OnConnectionFailure(const std::string &channelID, + const LLUUID ®ionID, + LLVoiceClientStatusObserver::EStatusType status_type) +{ + LL_DEBUGS("Voice") << "A connection failed. channel:" << channelID << LL_ENDL; + if (gAgent.getRegion()->getRegionID() == regionID) + { + if (mNextSession && mNextSession->mChannelID == channelID) + { + LLWebRTCVoiceClient::getInstance()->notifyStatusObservers(status_type); + } + else if (mSession && mSession->mChannelID == channelID) + { + LLWebRTCVoiceClient::getInstance()->notifyStatusObservers(status_type); + } + } +} + +// ----------------------------------------------------------- +// positional functionality. +void LLWebRTCVoiceClient::setEarLocation(S32 loc) +{ + if (mEarLocation != loc) + { + LL_DEBUGS("Voice") << "Setting mEarLocation to " << loc << LL_ENDL; + + mEarLocation = loc; + mSpatialCoordsDirty = true; + } +} + +void LLWebRTCVoiceClient::updatePosition(void) +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + LLViewerRegion *region = gAgent.getRegion(); + if (region && isAgentAvatarValid()) + { + // get the avatar position. + LLVector3d avatar_pos = gAgentAvatarp->getPositionGlobal(); + LLQuaternion avatar_qrot = gAgentAvatarp->getRootJoint()->getWorldRotation(); + + avatar_pos += LLVector3d(0.f, 0.f, 1.f); // bump it up to head height + + LLVector3d earPosition; + LLQuaternion earRot; + switch (mEarLocation) + { + case earLocCamera: + default: + earPosition = region->getPosGlobalFromRegion(LLViewerCamera::getInstance()->getOrigin()); + earRot = LLViewerCamera::getInstance()->getQuaternion(); + break; + + case earLocAvatar: + earPosition = mAvatarPosition; + earRot = mAvatarRot; + break; + + case earLocMixed: + earPosition = mAvatarPosition; + earRot = LLViewerCamera::getInstance()->getQuaternion(); + break; + } + setListenerPosition(earPosition, // position + LLVector3::zero, // velocity + earRot); // rotation matrix + + setAvatarPosition(avatar_pos, // position + LLVector3::zero, // velocity + avatar_qrot); // rotation matrix + + enforceTether(); + + updateNeighboringRegions(); + + // update own region id to be the region id avatar is currently in. + LLWebRTCVoiceClient::participantStatePtr_t participant = findParticipantByID("Estate", gAgentID); + if(participant) + { + participant->mRegion = gAgent.getRegion()->getRegionID(); + } + } +} + +void LLWebRTCVoiceClient::setListenerPosition(const LLVector3d &position, const LLVector3 &velocity, const LLQuaternion &rot) +{ + mListenerRequestedPosition = position; + + if (mListenerVelocity != velocity) + { + mListenerVelocity = velocity; + mSpatialCoordsDirty = true; + } + + if (mListenerRot != rot) + { + mListenerRot = rot; + mSpatialCoordsDirty = true; + } +} + +void LLWebRTCVoiceClient::setAvatarPosition(const LLVector3d &position, const LLVector3 &velocity, const LLQuaternion &rot) +{ + if (dist_vec_squared(mAvatarPosition, position) > 0.01) + { + mAvatarPosition = position; + mSpatialCoordsDirty = true; + } + + if (mAvatarVelocity != velocity) + { + mAvatarVelocity = velocity; + mSpatialCoordsDirty = true; + } + + // If the two rotations are not exactly equal test their dot product + // to get the cos of the angle between them. + // If it is too small, don't update. + F32 rot_cos_diff = llabs(dot(mAvatarRot, rot)); + if ((mAvatarRot != rot) && (rot_cos_diff < MINUSCULE_ANGLE_COS)) + { + mAvatarRot = rot; + mSpatialCoordsDirty = true; + } +} + +// The listener (camera) must be within 50m of the +// avatar. Enforce it on the client. +// This will also be enforced on the voice server +// based on position sent from the simulator to the +// voice server. +void LLWebRTCVoiceClient::enforceTether() +{ + LLVector3d tethered = mListenerRequestedPosition; + + // constrain 'tethered' to within 50m of mAvatarPosition. + { + LLVector3d camera_offset = mListenerRequestedPosition - mAvatarPosition; + F32 camera_distance = (F32) camera_offset.magVec(); + if (camera_distance > MAX_AUDIO_DIST) + { + tethered = mAvatarPosition + (MAX_AUDIO_DIST / camera_distance) * camera_offset; + } + } + + if (dist_vec_squared(mListenerPosition, tethered) > 0.01) + { + mListenerPosition = tethered; + mSpatialCoordsDirty = true; + } +} + +// We send our position via a WebRTC data channel to the WebRTC +// server for fine-grained, low latency updates. On the server, +// these updates will be 'tethered' to the actual position of the avatar. +// Those updates are higher latency, however. +// This mechanism gives low latency spatial updates and server-enforced +// prevention of 'evesdropping' by sending camera updates beyond the +// standard 50m +void LLWebRTCVoiceClient::sendPositionUpdate(bool force) +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + Json::FastWriter writer; + std::string spatial_data; + + if (mSpatialCoordsDirty || force) + { + Json::Value spatial = Json::objectValue; + + spatial["sp"] = Json::objectValue; + spatial["sp"]["x"] = (int) (mAvatarPosition[0] * 100); + spatial["sp"]["y"] = (int) (mAvatarPosition[1] * 100); + spatial["sp"]["z"] = (int) (mAvatarPosition[2] * 100); + spatial["sh"] = Json::objectValue; + spatial["sh"]["x"] = (int) (mAvatarRot[0] * 100); + spatial["sh"]["y"] = (int) (mAvatarRot[1] * 100); + spatial["sh"]["z"] = (int) (mAvatarRot[2] * 100); + spatial["sh"]["w"] = (int) (mAvatarRot[3] * 100); + + spatial["lp"] = Json::objectValue; + spatial["lp"]["x"] = (int) (mListenerPosition[0] * 100); + spatial["lp"]["y"] = (int) (mListenerPosition[1] * 100); + spatial["lp"]["z"] = (int) (mListenerPosition[2] * 100); + spatial["lh"] = Json::objectValue; + spatial["lh"]["x"] = (int) (mListenerRot[0] * 100); + spatial["lh"]["y"] = (int) (mListenerRot[1] * 100); + spatial["lh"]["z"] = (int) (mListenerRot[2] * 100); + spatial["lh"]["w"] = (int) (mListenerRot[3] * 100); + + mSpatialCoordsDirty = false; + spatial_data = writer.write(spatial); + + sessionState::for_each(boost::bind(predSendData, _1, spatial_data)); + } +} + +// Update our own volume on our participant, so it'll show up +// in the UI. This is done on all sessions, so switching +// sessions retains consistent volume levels. +void LLWebRTCVoiceClient::updateOwnVolume() { + F32 audio_level = 0.0; + if (!mMuteMic && !mTuningMode) + { + audio_level = getAudioLevel(); + } + + sessionState::for_each(boost::bind(predUpdateOwnVolume, _1, audio_level)); +} + +//////////////////////////////////// +// Managing list of participants + +// Provider-level participant management + +BOOL LLWebRTCVoiceClient::isParticipantAvatar(const LLUUID &id) +{ + // WebRTC participants are always SL avatars. + return TRUE; +} + +void LLWebRTCVoiceClient::getParticipantList(std::set<LLUUID> &participants) +{ + if (mProcessChannels && mSession) + { + for (participantUUIDMap::iterator iter = mSession->mParticipantsByUUID.begin(); + iter != mSession->mParticipantsByUUID.end(); + iter++) + { + participants.insert(iter->first); + } + } +} + +bool LLWebRTCVoiceClient::isParticipant(const LLUUID &speaker_id) +{ + if (mProcessChannels && mSession) + { + return (mSession->mParticipantsByUUID.find(speaker_id) != mSession->mParticipantsByUUID.end()); + } + return false; +} + +// protected provider-level participant management. +LLWebRTCVoiceClient::participantStatePtr_t LLWebRTCVoiceClient::findParticipantByID(const std::string &channelID, const LLUUID &id) +{ + participantStatePtr_t result; + LLWebRTCVoiceClient::sessionState::ptr_t session = sessionState::matchSessionByChannelID(channelID); + + if (session) + { + result = session->findParticipantByID(id); + } + + return result; +} + +LLWebRTCVoiceClient::participantStatePtr_t LLWebRTCVoiceClient::addParticipantByID(const std::string &channelID, const LLUUID &id, const LLUUID& region) +{ + participantStatePtr_t result; + LLWebRTCVoiceClient::sessionState::ptr_t session = sessionState::matchSessionByChannelID(channelID); + if (session) + { + result = session->addParticipant(id, region); + if (session->mNotifyOnFirstJoin && (id != gAgentID)) + { + notifyStatusObservers(LLVoiceClientStatusObserver::STATUS_JOINED); + } + } + return result; +} + +void LLWebRTCVoiceClient::removeParticipantByID(const std::string &channelID, const LLUUID &id, const LLUUID& region) +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + participantStatePtr_t result; + LLWebRTCVoiceClient::sessionState::ptr_t session = sessionState::matchSessionByChannelID(channelID); + if (session) + { + participantStatePtr_t participant = session->findParticipantByID(id); + if (participant && (participant->mRegion == region)) + { + session->removeParticipant(participant); + } + } +} + + +// participantState level participant management +LLWebRTCVoiceClient::participantState::participantState(const LLUUID& agent_id, const LLUUID& region) : + mURI(agent_id.asString()), + mAvatarID(agent_id), + mIsSpeaking(false), + mIsModeratorMuted(false), + mLevel(0.f), + mVolume(LLVoiceClient::VOLUME_DEFAULT), + mRegion(region) +{ +} + +LLWebRTCVoiceClient::participantStatePtr_t LLWebRTCVoiceClient::sessionState::addParticipant(const LLUUID& agent_id, const LLUUID& region) +{ + + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + participantStatePtr_t result; + + participantUUIDMap::iterator iter = mParticipantsByUUID.find(agent_id); + + if (iter != mParticipantsByUUID.end()) + { + result = iter->second; + result->mRegion = region; + } + + if (!result) + { + // participant isn't already in one list or the other. + result.reset(new participantState(agent_id, region)); + mParticipantsByUUID.insert(participantUUIDMap::value_type(agent_id, result)); + result->mAvatarID = agent_id; + } + + LLWebRTCVoiceClient::getInstance()->lookupName(agent_id); + + LLSpeakerVolumeStorage::getInstance()->getSpeakerVolume(result->mAvatarID, result->mVolume); + if (!LLWebRTCVoiceClient::sShuttingDown) + { + LLWebRTCVoiceClient::getInstance()->notifyParticipantObservers(); + } + + LL_DEBUGS("Voice") << "Participant \"" << result->mURI << "\" added." << LL_ENDL; + + return result; +} + + +// session-level participant management + +LLWebRTCVoiceClient::participantStatePtr_t LLWebRTCVoiceClient::sessionState::findParticipantByID(const LLUUID& id) +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + participantStatePtr_t result; + participantUUIDMap::iterator iter = mParticipantsByUUID.find(id); + + if(iter != mParticipantsByUUID.end()) + { + result = iter->second; + } + + return result; +} + +void LLWebRTCVoiceClient::sessionState::removeParticipant(const LLWebRTCVoiceClient::participantStatePtr_t &participant) +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + if (participant) + { + LLUUID participantID = participant->mAvatarID; + participantUUIDMap::iterator iter = mParticipantsByUUID.find(participant->mAvatarID); + + LL_DEBUGS("Voice") << "participant \"" << participant->mURI << "\" (" << participantID << ") removed." << LL_ENDL; + + if (iter == mParticipantsByUUID.end()) + { + LL_WARNS("Voice") << "Internal error: participant ID " << participantID << " not in UUID map" << LL_ENDL; + } + else + { + mParticipantsByUUID.erase(iter); + if (!LLWebRTCVoiceClient::sShuttingDown) + { + LLWebRTCVoiceClient::getInstance()->notifyParticipantObservers(); + } + } + if (mHangupOnLastLeave && (participantID != gAgentID) && (mParticipantsByUUID.size() <= 1) && LLWebRTCVoiceClient::instanceExists()) + { + LLWebRTCVoiceClient::getInstance()->notifyStatusObservers(LLVoiceClientStatusObserver::STATUS_LEFT_CHANNEL); + } + } +} + +void LLWebRTCVoiceClient::sessionState::removeAllParticipants(const LLUUID ®ion) +{ + std::vector<participantStatePtr_t> participantsToRemove; + + for (auto& participantEntry : mParticipantsByUUID) + { + if (region.isNull() || (participantEntry.second->mRegion == region)) + { + participantsToRemove.push_back(participantEntry.second); + } + } + for (auto& participant : participantsToRemove) + { + removeParticipant(participant); + } +} + +// Initiated the various types of sessions. +bool LLWebRTCVoiceClient::startEstateSession() +{ + leaveChannel(false); + mNextSession = addSession("Estate", sessionState::ptr_t(new estateSessionState())); + return true; +} + +bool LLWebRTCVoiceClient::startParcelSession(const std::string &channelID, S32 parcelID) +{ + leaveChannel(false); + mNextSession = addSession(channelID, sessionState::ptr_t(new parcelSessionState(channelID, parcelID))); + return true; +} + +bool LLWebRTCVoiceClient::startAdHocSession(const LLSD& channelInfo, bool notify_on_first_join, bool hangup_on_last_leave) +{ + leaveChannel(false); + LL_WARNS("Voice") << "Start AdHoc Session " << channelInfo << LL_ENDL; + std::string channelID = channelInfo["channel_uri"]; + std::string credentials = channelInfo["channel_credentials"]; + mNextSession = addSession(channelID, + sessionState::ptr_t(new adhocSessionState(channelID, + credentials, + notify_on_first_join, + hangup_on_last_leave))); + return true; +} + +bool LLWebRTCVoiceClient::isVoiceWorking() const +{ + return mIsProcessingChannels; +} + +// Returns true if calling back the session URI after the session has closed is possible. +// Currently this will be false only for PSTN P2P calls. +BOOL LLWebRTCVoiceClient::isSessionCallBackPossible(const LLUUID &session_id) +{ + sessionStatePtr_t session(findP2PSession(session_id)); + return session && session->isCallbackPossible() ? TRUE : FALSE; +} + +// Channel Management + +bool LLWebRTCVoiceClient::setSpatialChannel(const LLSD &channelInfo) +{ + LL_INFOS("Voice") << "SetSpatialChannel " << channelInfo << LL_ENDL; + LLViewerRegion *regionp = gAgent.getRegion(); + if (!regionp) + { + return false; + } + LLParcel *parcel = LLViewerParcelMgr::getInstance()->getAgentParcel(); + + // we don't really have credentials for a spatial channel in webrtc, + // it's all handled by the sim. + if (channelInfo.isMap() && channelInfo.has("channel_uri")) + { + bool allow_voice = !channelInfo["channel_uri"].asString().empty(); + if (parcel) + { + parcel->setParcelFlag(PF_ALLOW_VOICE_CHAT, allow_voice); + parcel->setParcelFlag(PF_USE_ESTATE_VOICE_CHAN, channelInfo["channel_uri"].asUUID() == regionp->getRegionID()); + } + else + { + regionp->setRegionFlag(REGION_FLAGS_ALLOW_VOICE, allow_voice); + } + } + return true; +} + +void LLWebRTCVoiceClient::leaveNonSpatialChannel() +{ + LL_DEBUGS("Voice") << "Request to leave non-spatial channel." << LL_ENDL; + + // make sure we're not simply rejoining the current session + deleteSession(mNextSession); + + leaveChannel(true); +} + +// determine whether we're processing channels, or whether +// another voice provider is. +void LLWebRTCVoiceClient::processChannels(bool process) +{ + mProcessChannels = process; +} + +bool LLWebRTCVoiceClient::inProximalChannel() +{ + return inSpatialChannel(); +} + +bool LLWebRTCVoiceClient::inOrJoiningChannel(const std::string& channelID) +{ + return (mSession && mSession->mChannelID == channelID) || (mNextSession && mNextSession->mChannelID == channelID); +} + +bool LLWebRTCVoiceClient::inEstateChannel() +{ + return (mSession && mSession->isEstate()) || (mNextSession && mNextSession->isEstate()); +} + +bool LLWebRTCVoiceClient::inSpatialChannel() +{ + bool result = true; + + if (mNextSession) + { + result = mNextSession->isSpatial(); + } + else if(mSession) + { + result = mSession->isSpatial(); + } + + return result; +} + +// retrieves information used to negotiate p2p, adhoc, and group +// channels +LLSD LLWebRTCVoiceClient::getAudioSessionChannelInfo() +{ + LLSD result; + + if (mSession) + { + result["voice_server_type"] = WEBRTC_VOICE_SERVER_TYPE; + result["channel_uri"] = mSession->mChannelID; + } + + return result; +} + +void LLWebRTCVoiceClient::leaveChannel(bool stopTalking) +{ + if (mSession) + { + deleteSession(mSession); + } + + if (mNextSession) + { + deleteSession(mNextSession); + } + + // If voice was on, turn it off + if (stopTalking && LLVoiceClient::getInstance()->getUserPTTState()) + { + LLVoiceClient::getInstance()->setUserPTTState(false); + } +} + +bool LLWebRTCVoiceClient::isCurrentChannel(const LLSD &channelInfo) +{ + if (!mProcessChannels || (channelInfo["voice_server_type"].asString() != WEBRTC_VOICE_SERVER_TYPE)) + { + return false; + } + + sessionStatePtr_t session = mSession; + if (!session) + { + session = mNextSession; + } + + if (session) + { + if (!channelInfo["session_handle"].asString().empty()) + { + return session->mHandle == channelInfo["session_handle"].asString(); + } + return channelInfo["channel_uri"].asString() == session->mChannelID; + } + return false; +} + +bool LLWebRTCVoiceClient::compareChannels(const LLSD &channelInfo1, const LLSD &channelInfo2) +{ + return (channelInfo1["voice_server_type"] == WEBRTC_VOICE_SERVER_TYPE) && + (channelInfo1["voice_server_type"] == channelInfo2["voice_server_type"]) && + (channelInfo1["sip_uri"] == channelInfo2["sip_uri"]); +} + + +//---------------------------------------------- +// Audio muting, volume, gain, etc. + +// we're muting the mic, so tell each session such +void LLWebRTCVoiceClient::setMuteMic(bool muted) +{ + mMuteMic = muted; + // when you're hidden, your mic is always muted. + if (!mHidden) + { + sessionState::for_each(boost::bind(predSetMuteMic, _1, muted)); + } +} + +void LLWebRTCVoiceClient::predSetMuteMic(const LLWebRTCVoiceClient::sessionStatePtr_t &session, bool muted) +{ + participantStatePtr_t participant = session->findParticipantByID(gAgentID); + if (participant) + { + participant->mLevel = 0.0; + } + session->setMuteMic(muted); +} + +void LLWebRTCVoiceClient::setVoiceVolume(F32 volume) +{ + if (volume != mSpeakerVolume) + { + { + mSpeakerVolume = volume; + } + sessionState::for_each(boost::bind(predSetSpeakerVolume, _1, volume)); + } +} + +void LLWebRTCVoiceClient::predSetSpeakerVolume(const LLWebRTCVoiceClient::sessionStatePtr_t &session, F32 volume) +{ + session->setSpeakerVolume(volume); +} + +void LLWebRTCVoiceClient::setMicGain(F32 gain) +{ + if (gain != mMicGain) + { + mMicGain = gain; + mWebRTCDeviceInterface->setPeerConnectionGain(gain); + } +} + + +void LLWebRTCVoiceClient::setVoiceEnabled(bool enabled) +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + LL_DEBUGS("Voice") + << "( " << (enabled ? "enabled" : "disabled") << " )" + << " was "<< (mVoiceEnabled ? "enabled" : "disabled") + << " coro "<< (mIsCoroutineActive ? "active" : "inactive") + << LL_ENDL; + + if (enabled != mVoiceEnabled) + { + // TODO: Refactor this so we don't call into LLVoiceChannel, but simply + // use the status observer + mVoiceEnabled = enabled; + LLVoiceClientStatusObserver::EStatusType status; + + if (enabled) + { + LL_DEBUGS("Voice") << "enabling" << LL_ENDL; + LLVoiceChannel::getCurrentVoiceChannel()->activate(); + status = LLVoiceClientStatusObserver::STATUS_VOICE_ENABLED; + mSpatialCoordsDirty = true; + updatePosition(); + if (!mIsCoroutineActive) + { + LLCoros::instance().launch("LLWebRTCVoiceClient::voiceConnectionCoro", + boost::bind(&LLWebRTCVoiceClient::voiceConnectionCoro, LLWebRTCVoiceClient::getInstance())); + } + else + { + LL_DEBUGS("Voice") << "coro should be active.. not launching" << LL_ENDL; + } + } + else + { + // Turning voice off looses your current channel -- this makes sure the UI isn't out of sync when you re-enable it. + LLVoiceChannel::getCurrentVoiceChannel()->deactivate(); + gAgent.setVoiceConnected(false); + status = LLVoiceClientStatusObserver::STATUS_VOICE_DISABLED; + cleanUp(); + } + + notifyStatusObservers(status); + } + else + { + LL_DEBUGS("Voice") << " no-op" << LL_ENDL; + } +} + + +///////////////////////////// +// Accessors for data related to nearby speakers + +std::string LLWebRTCVoiceClient::getDisplayName(const LLUUID& id) +{ + std::string result; + if (mProcessChannels && mSession) + { + participantStatePtr_t participant(mSession->findParticipantByID(id)); + if (participant) + { + result = participant->mDisplayName; + } + } + return result; +} + +BOOL LLWebRTCVoiceClient::getIsSpeaking(const LLUUID& id) +{ + BOOL result = FALSE; + if (mProcessChannels && mSession) + { + participantStatePtr_t participant(mSession->findParticipantByID(id)); + if (participant) + { + result = participant->mIsSpeaking; + } + } + return result; +} + +// TODO: Need to pull muted status from the webrtc server +BOOL LLWebRTCVoiceClient::getIsModeratorMuted(const LLUUID& id) +{ + BOOL result = FALSE; + if (mProcessChannels && mSession) + { + participantStatePtr_t participant(mSession->findParticipantByID(id)); + if (participant) + { + result = participant->mIsModeratorMuted; + } + } + return result; +} + +F32 LLWebRTCVoiceClient::getCurrentPower(const LLUUID &id) +{ + F32 result = 0.0; + if (!mProcessChannels || !mSession) + { + return result; + } + participantStatePtr_t participant(mSession->findParticipantByID(id)); + if (participant) + { + if (participant->mIsSpeaking) + { + result = participant->mLevel; + } + } + return result; +} + +// External accessors. +F32 LLWebRTCVoiceClient::getUserVolume(const LLUUID& id) +{ + // Minimum volume will be returned for users with voice disabled + F32 result = LLVoiceClient::VOLUME_MIN; + + if (mSession) + { + participantStatePtr_t participant(mSession->findParticipantByID(id)); + if (participant) + { + result = participant->mVolume; + } + } + + return result; +} + +void LLWebRTCVoiceClient::setUserVolume(const LLUUID& id, F32 volume) +{ + F32 clamped_volume = llclamp(volume, LLVoiceClient::VOLUME_MIN, LLVoiceClient::VOLUME_MAX); + if(mSession) + { + participantStatePtr_t participant(mSession->findParticipantByID(id)); + if (participant && (participant->mAvatarID != gAgentID)) + { + if (!is_approx_equal(volume, LLVoiceClient::VOLUME_DEFAULT)) + { + // Store this volume setting for future sessions if it has been + // changed from the default + LLSpeakerVolumeStorage::getInstance()->storeSpeakerVolume(id, volume); + } + else + { + // Remove stored volume setting if it is returned to the default + LLSpeakerVolumeStorage::getInstance()->removeSpeakerVolume(id); + } + + participant->mVolume = clamped_volume; + } + } + sessionState::for_each(boost::bind(predSetUserVolume, _1, id, clamped_volume)); +} + +// set volume level (gain level) for another user. +void LLWebRTCVoiceClient::predSetUserVolume(const LLWebRTCVoiceClient::sessionStatePtr_t &session, const LLUUID &id, F32 volume) +{ + session->setUserVolume(id, volume); +} + +//////////////////////// +///LLMuteListObserver +/// + +void LLWebRTCVoiceClient::onChange() +{ +} + +void LLWebRTCVoiceClient::onChangeDetailed(const LLMute& mute) +{ + if (mute.mType == LLMute::AGENT) + { + bool muted = ((mute.mFlags & LLMute::flagVoiceChat) == 0); + sessionState::for_each(boost::bind(predSetUserMute, _1, mute.mID, muted)); + } +} + +void LLWebRTCVoiceClient::predSetUserMute(const LLWebRTCVoiceClient::sessionStatePtr_t &session, const LLUUID &id, bool mute) +{ + session->setUserMute(id, mute); +} + +//------------------------------------------------------------------------ +// Sessions + +std::map<std::string, LLWebRTCVoiceClient::sessionState::ptr_t> LLWebRTCVoiceClient::sessionState::sSessions; + + +LLWebRTCVoiceClient::sessionState::sessionState() : + mHangupOnLastLeave(false), + mNotifyOnFirstJoin(false), + mMuted(false), + mSpeakerVolume(1.0), + mShuttingDown(false) +{ +} +// ------------------------------------------------------------------ +// Predicates, for calls to all sessions + +void LLWebRTCVoiceClient::predUpdateOwnVolume(const LLWebRTCVoiceClient::sessionStatePtr_t &session, F32 audio_level) +{ + participantStatePtr_t participant = session->findParticipantByID(gAgentID); + if (participant) + { + participant->mLevel = audio_level; + // TODO: Add VAD for our own voice. + participant->mIsSpeaking = audio_level > SPEAKING_AUDIO_LEVEL; + } +} + +void LLWebRTCVoiceClient::predSendData(const LLWebRTCVoiceClient::sessionStatePtr_t &session, const std::string &spatial_data) +{ + if (session->isSpatial() && !spatial_data.empty()) + { + session->sendData(spatial_data); + } +} + +void LLWebRTCVoiceClient::sessionState::sendData(const std::string &data) +{ + for (auto &connection : mWebRTCConnections) + { + connection->sendData(data); + } +} + +void LLWebRTCVoiceClient::sessionState::setMuteMic(bool muted) +{ + mMuted = muted; + for (auto &connection : mWebRTCConnections) + { + connection->setMuteMic(muted); + } +} + +void LLWebRTCVoiceClient::sessionState::setSpeakerVolume(F32 volume) +{ + mSpeakerVolume = volume; + for (auto &connection : mWebRTCConnections) + { + connection->setSpeakerVolume(volume); + } +} + +void LLWebRTCVoiceClient::sessionState::setUserVolume(const LLUUID &id, F32 volume) +{ + if (mParticipantsByUUID.find(id) == mParticipantsByUUID.end()) + { + return; + } + for (auto &connection : mWebRTCConnections) + { + connection->setUserVolume(id, volume); + } +} + +void LLWebRTCVoiceClient::sessionState::setUserMute(const LLUUID &id, bool mute) +{ + if (mParticipantsByUUID.find(id) == mParticipantsByUUID.end()) + { + return; + } + for (auto &connection : mWebRTCConnections) + { + connection->setUserMute(id, mute); + } +} +/*static*/ +void LLWebRTCVoiceClient::sessionState::addSession( + const std::string & channelID, + LLWebRTCVoiceClient::sessionState::ptr_t& session) +{ + sSessions[channelID] = session; +} + +LLWebRTCVoiceClient::sessionState::~sessionState() +{ + LL_DEBUGS("Voice") << "Destroying session CHANNEL=" << mChannelID << LL_ENDL; + + if (!mShuttingDown) + { + shutdownAllConnections(); + } + mWebRTCConnections.clear(); + + removeAllParticipants(); +} + +/*static*/ +LLWebRTCVoiceClient::sessionState::ptr_t LLWebRTCVoiceClient::sessionState::matchSessionByChannelID(const std::string& channel_id) +{ + sessionStatePtr_t result; + + // *TODO: My kingdom for a lambda! + std::map<std::string, ptr_t>::iterator it = sSessions.find(channel_id); + if (it != sSessions.end()) + { + result = (*it).second; + } + return result; +} + +void LLWebRTCVoiceClient::sessionState::for_each(sessionFunc_t func) +{ + std::for_each(sSessions.begin(), sSessions.end(), boost::bind(for_eachPredicate, _1, func)); +} + +void LLWebRTCVoiceClient::sessionState::reapEmptySessions() +{ + std::map<std::string, ptr_t>::iterator iter; + for (iter = sSessions.begin(); iter != sSessions.end();) + { + if (iter->second->isEmpty()) + { + iter = sSessions.erase(iter); + } + else + { + ++iter; + } + } +} + +/*static*/ +void LLWebRTCVoiceClient::sessionState::for_eachPredicate(const std::pair<std::string, LLWebRTCVoiceClient::sessionState::wptr_t> &a, sessionFunc_t func) +{ + ptr_t aLock(a.second.lock()); + + if (aLock) + func(aLock); + else + { + LL_WARNS("Voice") << "Stale handle in session map!" << LL_ENDL; + } +} + +LLWebRTCVoiceClient::sessionStatePtr_t LLWebRTCVoiceClient::addSession(const std::string &channel_id, sessionState::ptr_t session) +{ + sessionStatePtr_t existingSession = sessionState::matchSessionByChannelID(channel_id); + if (!existingSession) + { + // No existing session found. + + LL_DEBUGS("Voice") << "adding new session with channel: " << channel_id << LL_ENDL; + session->setMuteMic(mMuteMic); + session->setSpeakerVolume(mSpeakerVolume); + + sessionState::addSession(channel_id, session); + return session; + } + else + { + // Found an existing session + LL_DEBUGS("Voice") << "Attempting to add already-existing session " << channel_id << LL_ENDL; + existingSession->revive(); + + return existingSession; + } +} + +void LLWebRTCVoiceClient::sessionState::clearSessions() +{ + sSessions.clear(); +} + +LLWebRTCVoiceClient::sessionStatePtr_t LLWebRTCVoiceClient::findP2PSession(const LLUUID &agent_id) +{ + sessionStatePtr_t result = sessionState::matchSessionByChannelID(agent_id.asString()); + if (result && !result->isSpatial()) + { + return result; + } + + result.reset(); + return result; +} + +void LLWebRTCVoiceClient::sessionState::shutdownAllConnections() +{ + mShuttingDown = true; + for (auto &&connection : mWebRTCConnections) + { + connection->shutDown(); + } +} + +// in case we drop into a session (spatial, etc.) right after +// telling the session to shut down, revive it so it reconnects. +void LLWebRTCVoiceClient::sessionState::revive() +{ + mShuttingDown = false; +} + +//========================================================================= +// the following are methods to support the coroutine implementation of the +// voice connection and processing. They should only be called in the context +// of a coroutine. +// +// + +void LLWebRTCVoiceClient::sessionState::processSessionStates() +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + auto iter = sSessions.begin(); + while (iter != sSessions.end()) + { + if (!iter->second->processConnectionStates() && iter->second->mShuttingDown) + { + // if the connections associated with a session are gone, + // and this session is shutting down, remove it. + iter = sSessions.erase(iter); + } + else + { + iter++; + } + } +} + +// process the states on each connection associated with a session. +bool LLWebRTCVoiceClient::sessionState::processConnectionStates() +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + std::list<connectionPtr_t>::iterator iter = mWebRTCConnections.begin(); + while (iter != mWebRTCConnections.end()) + { + if (!iter->get()->connectionStateMachine()) + { + // if the state machine returns false, the connection is shut down + // so delete it. + iter = mWebRTCConnections.erase(iter); + } + else + { + ++iter; + } + } + return !mWebRTCConnections.empty(); +} + +// processing of spatial voice connection states requires special handling. +// as neighboring regions need to be started up or shut down depending +// on our location. +bool LLWebRTCVoiceClient::estateSessionState::processConnectionStates() +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + if (!mShuttingDown) + { + // Estate voice requires connection to neighboring regions. + std::set<LLUUID> neighbor_ids = LLWebRTCVoiceClient::getInstance()->getNeighboringRegions(); + + for (auto &connection : mWebRTCConnections) + { + std::shared_ptr<LLVoiceWebRTCSpatialConnection> spatialConnection = + std::static_pointer_cast<LLVoiceWebRTCSpatialConnection>(connection); + + LLUUID regionID = spatialConnection.get()->getRegionID(); + + if (neighbor_ids.find(regionID) == neighbor_ids.end()) + { + // shut down connections to neighbors that are too far away. + spatialConnection.get()->shutDown(); + } + neighbor_ids.erase(regionID); + } + + // add new connections for new neighbors + for (auto &neighbor : neighbor_ids) + { + connectionPtr_t connection(new LLVoiceWebRTCSpatialConnection(neighbor, INVALID_PARCEL_ID, mChannelID)); + + mWebRTCConnections.push_back(connection); + connection->setMuteMic(mMuted); + connection->setSpeakerVolume(mSpeakerVolume); + } + } + return LLWebRTCVoiceClient::sessionState::processConnectionStates(); +} + +// Various session state constructors. + +LLWebRTCVoiceClient::estateSessionState::estateSessionState() +{ + mHangupOnLastLeave = false; + mNotifyOnFirstJoin = false; + mChannelID = "Estate"; + LLUUID region_id = gAgent.getRegion()->getRegionID(); + + mWebRTCConnections.emplace_back(new LLVoiceWebRTCSpatialConnection(region_id, INVALID_PARCEL_ID, "Estate")); +} + +LLWebRTCVoiceClient::parcelSessionState::parcelSessionState(const std::string &channelID, S32 parcel_local_id) +{ + mHangupOnLastLeave = false; + mNotifyOnFirstJoin = false; + LLUUID region_id = gAgent.getRegion()->getRegionID(); + mChannelID = channelID; + mWebRTCConnections.emplace_back(new LLVoiceWebRTCSpatialConnection(region_id, parcel_local_id, channelID)); +} + +LLWebRTCVoiceClient::adhocSessionState::adhocSessionState(const std::string &channelID, + const std::string &credentials, + bool notify_on_first_join, + bool hangup_on_last_leave) : + mCredentials(credentials) +{ + mHangupOnLastLeave = hangup_on_last_leave; + mNotifyOnFirstJoin = notify_on_first_join; + LLUUID region_id = gAgent.getRegion()->getRegionID(); + mChannelID = channelID; + mWebRTCConnections.emplace_back(new LLVoiceWebRTCAdHocConnection(region_id, channelID, credentials)); +} + +void LLWebRTCVoiceClient::predShutdownSession(const LLWebRTCVoiceClient::sessionStatePtr_t& session) +{ + session->shutdownAllConnections(); +} + +void LLWebRTCVoiceClient::deleteSession(const sessionStatePtr_t &session) +{ + if (!session) + { + return; + } + + // At this point, the session should be unhooked from all lists and all state should be consistent. + session->shutdownAllConnections(); + // If this is the current audio session, clean up the pointer which will soon be dangling. + bool deleteAudioSession = mSession == session; + bool deleteNextAudioSession = mNextSession == session; + if (deleteAudioSession) + { + mSession.reset(); + } + + // ditto for the next audio session + if (deleteNextAudioSession) + { + mNextSession.reset(); + } +} + + +// Name resolution +void LLWebRTCVoiceClient::lookupName(const LLUUID &id) +{ + if (mAvatarNameCacheConnection.connected()) + { + mAvatarNameCacheConnection.disconnect(); + } + mAvatarNameCacheConnection = LLAvatarNameCache::get(id, boost::bind(&LLWebRTCVoiceClient::onAvatarNameCache, this, _1, _2)); +} + +void LLWebRTCVoiceClient::onAvatarNameCache(const LLUUID& agent_id, + const LLAvatarName& av_name) +{ + mAvatarNameCacheConnection.disconnect(); + std::string display_name = av_name.getDisplayName(); + avatarNameResolved(agent_id, display_name); +} + +void LLWebRTCVoiceClient::predAvatarNameResolution(const LLWebRTCVoiceClient::sessionStatePtr_t &session, LLUUID id, std::string name) +{ + participantStatePtr_t participant(session->findParticipantByID(id)); + if (participant) + { + // Found -- fill in the name + participant->mDisplayName = name; + // and post a "participants updated" message to listeners later. + LLWebRTCVoiceClient::getInstance()->notifyParticipantObservers(); + } +} + +void LLWebRTCVoiceClient::avatarNameResolved(const LLUUID &id, const std::string &name) +{ + sessionState::for_each(boost::bind(predAvatarNameResolution, _1, id, name)); +} + +// Leftover from vivox PTSN +std::string LLWebRTCVoiceClient::sipURIFromID(const LLUUID& id) const +{ + return id.asString(); +} + +LLSD LLWebRTCVoiceClient::getP2PChannelInfoTemplate(const LLUUID& id) const +{ + return LLSD(); +} + + +///////////////////////////// +// LLVoiceWebRTCConnection +// These connections manage state transitions, negotiating webrtc connections, +// and other such things for a single connection to a Secondlife WebRTC server. +// Multiple of these connections may be active at once, in the case of +// cross-region voice, or when a new connection is being created before the old +// has a chance to shut down. +LLVoiceWebRTCConnection::LLVoiceWebRTCConnection(const LLUUID ®ionID, const std::string &channelID) : + mWebRTCAudioInterface(nullptr), + mWebRTCDataInterface(nullptr), + mVoiceConnectionState(VOICE_STATE_START_SESSION), + mCurrentStatus(LLVoiceClientStatusObserver::STATUS_VOICE_ENABLED), + mMuted(true), + mShutDown(false), + mIceCompleted(false), + mSpeakerVolume(0.0), + mOutstandingRequests(0), + mChannelID(channelID), + mRegionID(regionID), + mPrimary(true), + mRetryWaitPeriod(0) +{ + + // retries wait a short period...randomize it so + // all clients don't try to reconnect at once. + mRetryWaitSecs = ((F32) rand() / (RAND_MAX)) + 0.5; + + mWebRTCPeerConnectionInterface = llwebrtc::newPeerConnection(); + mWebRTCPeerConnectionInterface->setSignalingObserver(this); + mMainQueue = LL::WorkQueue::getInstance("mainloop"); +} + +LLVoiceWebRTCConnection::~LLVoiceWebRTCConnection() +{ + if (LLWebRTCVoiceClient::isShuttingDown()) + { + // peer connection and observers will be cleaned up + // by llwebrtc::terminate() on shutdown. + return; + } + mWebRTCPeerConnectionInterface->unsetSignalingObserver(this); + llwebrtc::freePeerConnection(mWebRTCPeerConnectionInterface); +} + + +// ICE (Interactive Connectivity Establishment) +// When WebRTC tries to negotiate a connection to the Secondlife WebRTC Server, +// the negotiation will result in a few updates about the best path +// to which to connect. +// The Secondlife servers are configured for ICE trickling, where, after a session is partially +// negotiated, updates about the best connectivity paths may trickle in. These need to be +// sent to the Secondlife WebRTC server via the simulator so that both sides have a clear +// view of the network environment. + +// callback from llwebrtc +void LLVoiceWebRTCConnection::OnIceGatheringState(llwebrtc::LLWebRTCSignalingObserver::EIceGatheringState state) +{ + LL::WorkQueue::postMaybe(mMainQueue, + [=] { + LL_DEBUGS("Voice") << "Ice Gathering voice account. " << state << LL_ENDL; + + switch (state) + { + case llwebrtc::LLWebRTCSignalingObserver::EIceGatheringState::ICE_GATHERING_COMPLETE: + { + mIceCompleted = true; + break; + } + case llwebrtc::LLWebRTCSignalingObserver::EIceGatheringState::ICE_GATHERING_NEW: + { + mIceCompleted = false; + } + default: + break; + } + }); +} + +// callback from llwebrtc +void LLVoiceWebRTCConnection::OnIceCandidate(const llwebrtc::LLWebRTCIceCandidate& candidate) +{ + LL::WorkQueue::postMaybe(mMainQueue, [=] { mIceCandidates.push_back(candidate); }); +} + +void LLVoiceWebRTCConnection::processIceUpdates() +{ + mOutstandingRequests++; + LLCoros::getInstance()->launch("LLVoiceWebRTCConnection::processIceUpdatesCoro", + boost::bind(&LLVoiceWebRTCConnection::processIceUpdatesCoro, this->shared_from_this())); +} + +// Ice candidates may be streamed in before or after the SDP offer is available (see below) +// This function determines whether candidates are available to send to the Secondlife WebRTC +// server via the simulator. If so, and there are no more candidates, this code +// will make the cap call to the server sending up the ICE candidates. +void LLVoiceWebRTCConnection::processIceUpdatesCoro(connectionPtr_t connection) +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + if (connection->mShutDown || LLWebRTCVoiceClient::isShuttingDown()) + { + connection->mOutstandingRequests--; + return; + } + + bool iceCompleted = false; + LLSD body; + if (!connection->mIceCandidates.empty() || connection->mIceCompleted) + { + LLViewerRegion *regionp = LLWorld::instance().getRegionFromID(connection->mRegionID); + if (!regionp || !regionp->capabilitiesReceived()) + { + LL_DEBUGS("Voice") << "no capabilities for ice gathering; waiting " << LL_ENDL; + connection->mOutstandingRequests--; + return; + } + + std::string url = regionp->getCapability("VoiceSignalingRequest"); + if (url.empty()) + { + connection->mOutstandingRequests--; + return; + } + + LL_DEBUGS("Voice") << "region ready to complete voice signaling; url=" << url << LL_ENDL; + if (!connection->mIceCandidates.empty()) + { + LLSD candidates = LLSD::emptyArray(); + for (auto &ice_candidate : connection->mIceCandidates) + { + LLSD body_candidate; + body_candidate["sdpMid"] = ice_candidate.mSdpMid; + body_candidate["sdpMLineIndex"] = ice_candidate.mMLineIndex; + body_candidate["candidate"] = ice_candidate.mCandidate; + candidates.append(body_candidate); + } + body["candidates"] = candidates; + connection->mIceCandidates.clear(); + } + else if (connection->mIceCompleted) + { + LLSD body_candidate; + body_candidate["completed"] = true; + body["candidate"] = body_candidate; + iceCompleted = connection->mIceCompleted; + connection->mIceCompleted = false; + } + + body["viewer_session"] = connection->mViewerSession; + body["voice_server_type"] = WEBRTC_VOICE_SERVER_TYPE; + + LLCoreHttpUtil::HttpCoroutineAdapter::ptr_t httpAdapter( + new LLCoreHttpUtil::HttpCoroutineAdapter("LLVoiceWebRTCAdHocConnection::processIceUpdatesCoro", + LLCore::HttpRequest::DEFAULT_POLICY_ID)); + LLCore::HttpRequest::ptr_t httpRequest(new LLCore::HttpRequest); + LLCore::HttpOptions::ptr_t httpOpts(new LLCore::HttpOptions); + + httpOpts->setWantHeaders(true); + + LLSD result = httpAdapter->postAndSuspend(httpRequest, url, body, httpOpts); + + if (LLWebRTCVoiceClient::isShuttingDown()) + { + connection->mOutstandingRequests--; + return; + } + + LLSD httpResults = result[LLCoreHttpUtil::HttpCoroutineAdapter::HTTP_RESULTS]; + LLCore::HttpStatus status = LLCoreHttpUtil::HttpCoroutineAdapter::getStatusFromLLSD(httpResults); + + if (!status) + { + // couldn't trickle the candidates, so restart the session. + connection->setVoiceConnectionState(VOICE_STATE_SESSION_RETRY); + } + } + connection->mOutstandingRequests--; +} + + +// An 'Offer' comes in the form of a SDP (Session Description Protocol) +// which contains all sorts of info about the session, from network paths +// to the type of session (audio, video) to characteristics (the encoder type.) +// This SDP also serves as the 'ticket' to the server, security-wise. +// The Offer is retrieved from the WebRTC library on the client, +// and is passed to the simulator via a CAP, which then passes +// it on to the Secondlife WebRTC server. + +// +// The LLWebRTCVoiceConnection object will not be deleted +// before the webrtc connection itself is shut down, so +// we shouldn't be getting this callback on a nonexistant +// this pointer. + +// callback from llwebrtc +void LLVoiceWebRTCConnection::OnOfferAvailable(const std::string &sdp) +{ + LL::WorkQueue::postMaybe(mMainQueue, + [=] { + if (mShutDown) + { + return; + } + LL_DEBUGS("Voice") << "On Offer Available." << LL_ENDL; + mChannelSDP = sdp; + if (mVoiceConnectionState == VOICE_STATE_WAIT_FOR_SESSION_START) + { + mVoiceConnectionState = VOICE_STATE_REQUEST_CONNECTION; + } + }); +} + + +// +// The LLWebRTCVoiceConnection object will not be deleted +// before the webrtc connection itself is shut down, so +// we shouldn't be getting this callback on a nonexistant +// this pointer. +// nor should audio_interface be invalid if the LLWebRTCVoiceConnection +// is shut down. + +// callback from llwebrtc +void LLVoiceWebRTCConnection::OnAudioEstablished(llwebrtc::LLWebRTCAudioInterface* audio_interface) +{ + LL::WorkQueue::postMaybe(mMainQueue, + [=] { + if (mShutDown) + { + return; + } + LL_DEBUGS("Voice") << "On AudioEstablished." << LL_ENDL; + mWebRTCAudioInterface = audio_interface; + setVoiceConnectionState(VOICE_STATE_SESSION_ESTABLISHED); + }); +} + + +// +// The LLWebRTCVoiceConnection object will not be deleted +// before the webrtc connection itself is shut down, so +// we shouldn't be getting this callback on a nonexistant +// this pointer. + +// callback from llwebrtc +void LLVoiceWebRTCConnection::OnRenegotiationNeeded() +{ + LL::WorkQueue::postMaybe(mMainQueue, + [=] { + LL_DEBUGS("Voice") << "Voice channel requires renegotiation." << LL_ENDL; + if (!mShutDown) + { + setVoiceConnectionState(VOICE_STATE_SESSION_RETRY); + } + mCurrentStatus = LLVoiceClientStatusObserver::ERROR_UNKNOWN; + }); +} + +// callback from llwebrtc +void LLVoiceWebRTCConnection::OnPeerConnectionClosed() +{ + LL::WorkQueue::postMaybe(mMainQueue, + [=] { + LL_DEBUGS("Voice") << "Peer connection has closed." << LL_ENDL; + if (mVoiceConnectionState == VOICE_STATE_WAIT_FOR_CLOSE) + { + setVoiceConnectionState(VOICE_STATE_CLOSED); + mOutstandingRequests--; + } + else if (LLWebRTCVoiceClient::isShuttingDown()) + { + // disconnect was initialized by llwebrtc::terminate() instead of connectionStateMachine + LL_INFOS("Voice") << "Peer connection has closed, but state is " << mVoiceConnectionState << LL_ENDL; + setVoiceConnectionState(VOICE_STATE_CLOSED); + } + }); +} + +void LLVoiceWebRTCConnection::setMuteMic(bool muted) +{ + mMuted = muted; + if (mWebRTCAudioInterface) + { + mWebRTCAudioInterface->setMute(muted); + } +} + +void LLVoiceWebRTCConnection::setSpeakerVolume(F32 volume) +{ + mSpeakerVolume = volume; + if (mWebRTCAudioInterface) + { + mWebRTCAudioInterface->setReceiveVolume(volume); + } +} + +void LLVoiceWebRTCConnection::setUserVolume(const LLUUID& id, F32 volume) +{ + Json::Value root = Json::objectValue; + Json::Value user_gain = Json::objectValue; + user_gain[id.asString()] = (uint32_t)(volume*200); // give it two decimal places with a range from 0-200, where 100 is normal + root["ug"] = user_gain; + Json::FastWriter writer; + std::string json_data = writer.write(root); + if (mWebRTCDataInterface) + { + mWebRTCDataInterface->sendData(json_data, false); + } +} + +void LLVoiceWebRTCConnection::setUserMute(const LLUUID& id, bool mute) +{ + Json::Value root = Json::objectValue; + Json::Value muted = Json::objectValue; + muted[id.asString()] = mute; + root["m"] = muted; + Json::FastWriter writer; + std::string json_data = writer.write(root); + if (mWebRTCDataInterface) + { + mWebRTCDataInterface->sendData(json_data, false); + } +} + + +// Send data to the Secondlife WebRTC server via the webrtc +// data channel. +void LLVoiceWebRTCConnection::sendData(const std::string &data) +{ + if (getVoiceConnectionState() == VOICE_STATE_SESSION_UP && mWebRTCDataInterface) + { + mWebRTCDataInterface->sendData(data, false); + } +} + +// Tell the simulator that we're shutting down a voice connection. +// The simulator will pass this on to the Secondlife WebRTC server. +void LLVoiceWebRTCConnection::breakVoiceConnectionCoro(connectionPtr_t connection) +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + LL_DEBUGS("Voice") << "Disconnecting voice." << LL_ENDL; + if (connection->mWebRTCDataInterface) + { + connection->mWebRTCDataInterface->unsetDataObserver(connection.get()); + connection->mWebRTCDataInterface = nullptr; + } + connection->mWebRTCAudioInterface = nullptr; + LLViewerRegion *regionp = LLWorld::instance().getRegionFromID(connection->mRegionID); + if (!regionp || !regionp->capabilitiesReceived()) + { + LL_DEBUGS("Voice") << "no capabilities for voice provisioning; waiting " << LL_ENDL; + connection->setVoiceConnectionState(VOICE_STATE_SESSION_RETRY); + connection->mOutstandingRequests--; + return; + } + + std::string url = regionp->getCapability("ProvisionVoiceAccountRequest"); + if (url.empty()) + { + connection->setVoiceConnectionState(VOICE_STATE_SESSION_RETRY); + connection->mOutstandingRequests--; + return; + } + + LL_DEBUGS("Voice") << "region ready for voice break; url=" << url << LL_ENDL; + + LLVoiceWebRTCStats::getInstance()->provisionAttemptStart(); + LLSD body; + body["logout"] = TRUE; + body["viewer_session"] = connection->mViewerSession; + body["voice_server_type"] = WEBRTC_VOICE_SERVER_TYPE; + + LLCoreHttpUtil::HttpCoroutineAdapter::ptr_t httpAdapter( + new LLCoreHttpUtil::HttpCoroutineAdapter("LLVoiceWebRTCAdHocConnection::breakVoiceConnection", + LLCore::HttpRequest::DEFAULT_POLICY_ID)); + LLCore::HttpRequest::ptr_t httpRequest(new LLCore::HttpRequest); + LLCore::HttpOptions::ptr_t httpOpts(new LLCore::HttpOptions); + + httpOpts->setWantHeaders(true); + + connection->mOutstandingRequests++; + + // tell the server to shut down the connection as a courtesy. + // shutdownConnection will drop the WebRTC connection which will + // also shut things down. + LLSD result = httpAdapter->postAndSuspend(httpRequest, url, body, httpOpts); + + connection->mOutstandingRequests--; + + if (connection->getVoiceConnectionState() == VOICE_STATE_WAIT_FOR_EXIT) + { + connection->setVoiceConnectionState(VOICE_STATE_SESSION_EXIT); + } +} + +// Tell the simulator to tell the Secondlife WebRTC server that we want a voice +// connection. The SDP is sent up as part of this, and the simulator will respond +// with an 'answer' which is in the form of another SDP. The webrtc library +// will use the offer and answer to negotiate the session. +void LLVoiceWebRTCSpatialConnection::requestVoiceConnection() +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + LLViewerRegion *regionp = LLWorld::instance().getRegionFromID(mRegionID); + + LL_DEBUGS("Voice") << "Requesting voice connection." << LL_ENDL; + if (!regionp || !regionp->capabilitiesReceived()) + { + LL_DEBUGS("Voice") << "no capabilities for voice provisioning; waiting " << LL_ENDL; + + // try again. + setVoiceConnectionState(VOICE_STATE_REQUEST_CONNECTION); + return; + } + + std::string url = regionp->getCapability("ProvisionVoiceAccountRequest"); + if (url.empty()) + { + setVoiceConnectionState(VOICE_STATE_SESSION_RETRY); + return; + } + + LL_DEBUGS("Voice") << "region ready for voice provisioning; url=" << url << LL_ENDL; + + LLVoiceWebRTCStats::getInstance()->provisionAttemptStart(); + LLSD body; + LLSD jsep; + jsep["type"] = "offer"; + jsep["sdp"] = mChannelSDP; + body["jsep"] = jsep; + if (mParcelLocalID != INVALID_PARCEL_ID) + { + body["parcel_local_id"] = mParcelLocalID; + } + body["channel_type"] = "local"; + body["voice_server_type"] = WEBRTC_VOICE_SERVER_TYPE; + LLCoreHttpUtil::HttpCoroutineAdapter::ptr_t httpAdapter( + new LLCoreHttpUtil::HttpCoroutineAdapter("LLVoiceWebRTCAdHocConnection::requestVoiceConnection", + LLCore::HttpRequest::DEFAULT_POLICY_ID)); + LLCore::HttpRequest::ptr_t httpRequest(new LLCore::HttpRequest); + LLCore::HttpOptions::ptr_t httpOpts(new LLCore::HttpOptions); + + httpOpts->setWantHeaders(true); + mOutstandingRequests++; + LLSD result = httpAdapter->postAndSuspend(httpRequest, url, body, httpOpts); + + LLSD httpResults = result[LLCoreHttpUtil::HttpCoroutineAdapter::HTTP_RESULTS]; + LLCore::HttpStatus status = LLCoreHttpUtil::HttpCoroutineAdapter::getStatusFromLLSD(httpResults); + + if (status) + { + OnVoiceConnectionRequestSuccess(result); + } + else + { + switch (status.getType()) + { + case HTTP_CONFLICT: + mCurrentStatus = LLVoiceClientStatusObserver::ERROR_CHANNEL_FULL; + break; + case HTTP_UNAUTHORIZED: + mCurrentStatus = LLVoiceClientStatusObserver::ERROR_CHANNEL_LOCKED; + break; + default: + mCurrentStatus = LLVoiceClientStatusObserver::ERROR_UNKNOWN; + break; + } + setVoiceConnectionState(VOICE_STATE_SESSION_EXIT); + } + mOutstandingRequests--; +} + +void LLVoiceWebRTCConnection::OnVoiceConnectionRequestSuccess(const LLSD &result) +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + if (LLWebRTCVoiceClient::isShuttingDown()) + { + return; + } + LLVoiceWebRTCStats::getInstance()->provisionAttemptEnd(true); + + if (result.has("viewer_session") && + result.has("jsep") && + result["jsep"].has("type") && + result["jsep"]["type"] == "answer" && + result["jsep"].has("sdp")) + { + mRemoteChannelSDP = result["jsep"]["sdp"].asString(); + mViewerSession = result["viewer_session"]; + } + else + { + LL_WARNS("Voice") << "Invalid voice provision request result:" << result << LL_ENDL; + setVoiceConnectionState(VOICE_STATE_SESSION_EXIT); + return; + } + + LL_DEBUGS("Voice") << "ProvisionVoiceAccountRequest response" + << " channel sdp " << mRemoteChannelSDP << LL_ENDL; + mWebRTCPeerConnectionInterface->AnswerAvailable(mRemoteChannelSDP); +} + +static llwebrtc::LLWebRTCPeerConnectionInterface::InitOptions getConnectionOptions() +{ + llwebrtc::LLWebRTCPeerConnectionInterface::InitOptions options; + llwebrtc::LLWebRTCPeerConnectionInterface::InitOptions::IceServers servers; + + // TODO: Pull these from login + std::string grid = LLGridManager::getInstance()->getGridLoginID(); + std::transform(grid.begin(), grid.end(), grid.begin(), [](unsigned char c){ return std::tolower(c); }); + int num_servers = 2; + if (grid == "agni") + { + num_servers = 3; + } + for (int i=1; i <= num_servers; i++) + { + servers.mUrls.push_back(llformat("stun:stun%d.%s.secondlife.io:3478", i, grid.c_str())); + } + options.mServers.push_back(servers); + return options; +} + + +// Primary state machine for negotiating a single voice connection to the +// Secondlife WebRTC server. +bool LLVoiceWebRTCConnection::connectionStateMachine() +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + processIceUpdates(); + + switch (getVoiceConnectionState()) + { + case VOICE_STATE_START_SESSION: + { + LL_PROFILE_ZONE_NAMED_CATEGORY_VOICE("VOICE_STATE_START_SESSION") + if (mShutDown) + { + setVoiceConnectionState(VOICE_STATE_SESSION_EXIT); + break; + } + mIceCompleted = false; + setVoiceConnectionState(VOICE_STATE_WAIT_FOR_SESSION_START); + + // tell the webrtc library that we want a connection. The library will + // respond with an offer on a separate thread, which will cause + // the session state to change. + if (!mWebRTCPeerConnectionInterface->initializeConnection(getConnectionOptions())) + { + setVoiceConnectionState(VOICE_STATE_SESSION_RETRY); + } + break; + } + + case VOICE_STATE_WAIT_FOR_SESSION_START: + { + if (mShutDown) + { + setVoiceConnectionState(VOICE_STATE_SESSION_EXIT); + } + break; + } + + case VOICE_STATE_REQUEST_CONNECTION: + if (mShutDown) + { + setVoiceConnectionState(VOICE_STATE_SESSION_EXIT); + break; + } + // Ask the sim to ask the Secondlife WebRTC server for a connection to + // a given voice channel. On completion, we'll move on to the + // VOICE_STATE_SESSION_ESTABLISHED via a callback on a webrtc thread. + setVoiceConnectionState(VOICE_STATE_CONNECTION_WAIT); + LLCoros::getInstance()->launch("LLVoiceWebRTCConnection::requestVoiceConnectionCoro", + boost::bind(&LLVoiceWebRTCConnection::requestVoiceConnectionCoro, this->shared_from_this())); + break; + + case VOICE_STATE_CONNECTION_WAIT: + if (mShutDown) + { + setVoiceConnectionState(VOICE_STATE_DISCONNECT); + } + break; + + case VOICE_STATE_SESSION_ESTABLISHED: + { + if (mShutDown) + { + setVoiceConnectionState(VOICE_STATE_DISCONNECT); + break; + } + // update the peer connection with the various characteristics of + // this connection. + mWebRTCAudioInterface->setMute(mMuted); + mWebRTCAudioInterface->setReceiveVolume(mSpeakerVolume); + LLWebRTCVoiceClient::getInstance()->OnConnectionEstablished(mChannelID, mRegionID); + setVoiceConnectionState(VOICE_STATE_WAIT_FOR_DATA_CHANNEL); + break; + } + + case VOICE_STATE_WAIT_FOR_DATA_CHANNEL: + { + if (mShutDown) + { + setVoiceConnectionState(VOICE_STATE_DISCONNECT); + break; + } + if (mWebRTCDataInterface) // the interface will be set when the session is negotiated. + { + sendJoin(); // tell the Secondlife WebRTC server that we're here via the data channel. + setVoiceConnectionState(VOICE_STATE_SESSION_UP); + if (isSpatial()) + { + LLWebRTCVoiceClient::getInstance()->updatePosition(); + LLWebRTCVoiceClient::getInstance()->sendPositionUpdate(true); + } + } + break; + } + + case VOICE_STATE_SESSION_UP: + { + mRetryWaitPeriod = 0; + mRetryWaitSecs = ((F32) rand() / (RAND_MAX)) + 0.5; + LLUUID agentRegionID; + if (isSpatial() && gAgent.getRegion()) + { + + bool primary = (mRegionID == gAgent.getRegion()->getRegionID()); + if (primary != mPrimary) + { + mPrimary = primary; + sendJoin(); + } + } + + // we'll stay here as long as the session remains up. + if (mShutDown) + { + setVoiceConnectionState(VOICE_STATE_DISCONNECT); + } + break; + } + + case VOICE_STATE_SESSION_RETRY: + // only retry ever 'n' seconds + if (mRetryWaitPeriod++ * UPDATE_THROTTLE_SECONDS > mRetryWaitSecs) + { + // something went wrong, so notify that the connection has failed. + LLWebRTCVoiceClient::getInstance()->OnConnectionFailure(mChannelID, mRegionID, mCurrentStatus); + setVoiceConnectionState(VOICE_STATE_DISCONNECT); + mRetryWaitPeriod = 0; + if (mRetryWaitSecs < MAX_RETRY_WAIT_SECONDS) + { + // back off the retry period, and do it by a small random + // bit so all clients don't reconnect at once. + mRetryWaitSecs += ((F32) rand() / (RAND_MAX)) + 0.5; + mRetryWaitPeriod = 0; + } + } + break; + + case VOICE_STATE_DISCONNECT: + if (!LLWebRTCVoiceClient::isShuttingDown()) + { + setVoiceConnectionState(VOICE_STATE_WAIT_FOR_EXIT); + LLCoros::instance().launch("LLVoiceWebRTCConnection::breakVoiceConnectionCoro", + boost::bind(&LLVoiceWebRTCConnection::breakVoiceConnectionCoro, this->shared_from_this())); + } + else + { + // llwebrtc::terminate() is already shuting down the connection. + setVoiceConnectionState(VOICE_STATE_WAIT_FOR_CLOSE); + mOutstandingRequests++; + } + break; + + case VOICE_STATE_WAIT_FOR_EXIT: + break; + + case VOICE_STATE_SESSION_EXIT: + { + setVoiceConnectionState(VOICE_STATE_WAIT_FOR_CLOSE); + mOutstandingRequests++; + if (!LLWebRTCVoiceClient::isShuttingDown()) + { + mWebRTCPeerConnectionInterface->shutdownConnection(); + } + // else was already posted by llwebrtc::terminate(). + break; + case VOICE_STATE_WAIT_FOR_CLOSE: + break; + case VOICE_STATE_CLOSED: + if (!mShutDown) + { + mVoiceConnectionState = VOICE_STATE_START_SESSION; + } + else + { + // if we still have outstanding http or webrtc calls, wait for them to + // complete so we don't delete objects while they still may be used. + if (mOutstandingRequests <= 0) + { + LLWebRTCVoiceClient::getInstance()->OnConnectionShutDown(mChannelID, mRegionID); + return false; + } + } + break; + } + + default: + { + LL_WARNS("Voice") << "Unknown voice control state " << getVoiceConnectionState() << LL_ENDL; + return false; + } + } + return true; +} + +// Data has been received on the webrtc data channel +// incoming data will be a json structure (if it's not binary.) We may pack +// binary for size reasons. Most of the keys in the json objects are +// single or double characters for size reasons. +// The primary element is: +// An object where each key is an agent id. (in the future, we may allow +// integer indices into an agentid list, populated on join commands. For size. +// Each key will point to a json object with keys identifying what's updated. +// 'p' - audio source power (level/volume) (int8 as int) +// 'j' - object of join data (currently only a boolean 'p' marking a primary participant) +// 'l' - boolean, always true if exists. +// 'v' - boolean - voice activity has been detected. + +// llwebrtc callback +void LLVoiceWebRTCConnection::OnDataReceived(const std::string& data, bool binary) +{ + LL::WorkQueue::postMaybe(mMainQueue, [=] { LLVoiceWebRTCConnection::OnDataReceivedImpl(data, binary); }); +} + +// +// The LLWebRTCVoiceConnection object will not be deleted +// before the webrtc connection itself is shut down, so +// we shouldn't be getting this callback on a nonexistant +// this pointer. +void LLVoiceWebRTCConnection::OnDataReceivedImpl(const std::string &data, bool binary) +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + if (mShutDown) + { + return; + } + + if (binary) + { + LL_WARNS("Voice") << "Binary data received from data channel." << LL_ENDL; + return; + } + + Json::Reader reader; + Json::Value voice_data; + if (reader.parse(data, voice_data, false)) // don't collect comments + { + if (!voice_data.isObject()) + { + LL_WARNS("Voice") << "Expected object from data channel:" << data << LL_ENDL; + return; + } + bool new_participant = false; + Json::Value mute = Json::objectValue; + Json::Value user_gain = Json::objectValue; + for (auto &participant_id : voice_data.getMemberNames()) + { + LLUUID agent_id(participant_id); + if (agent_id.isNull()) + { + // probably a test client. + continue; + } + + LLWebRTCVoiceClient::participantStatePtr_t participant = + LLWebRTCVoiceClient::getInstance()->findParticipantByID(mChannelID, agent_id); + bool joined = false; + bool primary = false; // we ignore any 'joins' reported about participants + // that come from voice servers that aren't their primary + // voice server. This will happen with cross-region voice + // where a participant on a neighboring region may be + // connected to multiple servers. We don't want to + // add new identical participants from all of those servers. + if (voice_data[participant_id].isMember("j")) + { + // a new participant has announced that they're joining. + joined = true; + primary = voice_data[participant_id]["j"].get("p", Json::Value(false)).asBool(); + + // track incoming participants that are muted so we can mute their connections (or set their volume) + bool isMuted = LLMuteList::getInstance()->isMuted(agent_id, LLMute::flagVoiceChat); + if (isMuted) + { + mute[participant_id] = true; + } + F32 volume; + if(LLSpeakerVolumeStorage::getInstance()->getSpeakerVolume(agent_id, volume)) + { + user_gain[participant_id] = (uint32_t)(volume * 200); + } + } + + new_participant |= joined; + if (!participant && joined && (primary || !isSpatial())) + { + participant = LLWebRTCVoiceClient::getInstance()->addParticipantByID(mChannelID, agent_id, mRegionID); + } + + if (participant) + { + if (voice_data[participant_id].get("l", Json::Value(false)).asBool()) + { + // an existing participant is leaving. + if (agent_id != gAgentID) + { + LLWebRTCVoiceClient::getInstance()->removeParticipantByID(mChannelID, agent_id, mRegionID); + } + } + else + { + // we got a 'power' update. + F32 level = (F32) (voice_data[participant_id].get("p", Json::Value(participant->mLevel)).asInt()) / 128; + // convert to decibles + participant->mLevel = level; + + if (voice_data[participant_id].isMember("v")) + { + participant->mIsSpeaking = voice_data[participant_id].get("v", Json::Value(false)).asBool(); + } + + if (voice_data[participant_id].isMember("m")) + { + participant->mIsModeratorMuted = voice_data[participant_id].get("m", Json::Value(false)).asBool(); + } + } + } + } + + // tell the simulator to set the mute and volume data for this + // participant, if there are any updates. + Json::FastWriter writer; + Json::Value root = Json::objectValue; + if (mute.size() > 0) + { + root["m"] = mute; + } + if (user_gain.size() > 0) + { + root["ug"] = user_gain; + } + if (root.size() > 0) + { + std::string json_data = writer.write(root); + mWebRTCDataInterface->sendData(json_data, false); + } + } +} + +// +// The LLWebRTCVoiceConnection object will not be deleted +// before the webrtc connection itself is shut down, so +// we shouldn't be getting this callback on a nonexistant +// this pointer. +// nor should data_interface be invalid if the LLWebRTCVoiceConnection +// is shut down. + +// llwebrtc callback +void LLVoiceWebRTCConnection::OnDataChannelReady(llwebrtc::LLWebRTCDataInterface *data_interface) +{ + LL::WorkQueue::postMaybe(mMainQueue, + [=] { + if (mShutDown) + { + return; + } + + if (data_interface) + { + mWebRTCDataInterface = data_interface; + mWebRTCDataInterface->setDataObserver(this); + } + }); +} + +// tell the Secondlife WebRTC server that +// we're joining and whether we're +// joining a server associated with the +// the region we currently occupy or not (primary) +// The WebRTC voice server will pass this info +// to peers. +void LLVoiceWebRTCConnection::sendJoin() +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + Json::FastWriter writer; + Json::Value root = Json::objectValue; + Json::Value join_obj = Json::objectValue; + LLUUID regionID = gAgent.getRegion()->getRegionID(); + if (mPrimary) + { + join_obj["p"] = true; + } + root["j"] = join_obj; + std::string json_data = writer.write(root); + mWebRTCDataInterface->sendData(json_data, false); +} + +///////////////////////////// +// WebRTC Spatial Connection + +LLVoiceWebRTCSpatialConnection::LLVoiceWebRTCSpatialConnection(const LLUUID ®ionID, + S32 parcelLocalID, + const std::string &channelID) : + LLVoiceWebRTCConnection(regionID, channelID), + mParcelLocalID(parcelLocalID) +{ + if (gAgent.getRegion()) + { + mPrimary = (regionID == gAgent.getRegion()->getRegionID()); + } +} + +LLVoiceWebRTCSpatialConnection::~LLVoiceWebRTCSpatialConnection() +{ +} + +void LLVoiceWebRTCSpatialConnection::setMuteMic(bool muted) +{ + if (mMuted != muted) + { + mMuted = muted; + if (mWebRTCAudioInterface) + { + LLViewerRegion *regionp = gAgent.getRegion(); + if (regionp && mRegionID == regionp->getRegionID()) + { + mWebRTCAudioInterface->setMute(muted); + } + else + { + // Always mute this agent with respect to neighboring regions. + // Peers don't want to hear this agent from multiple regions + // as that'll echo. + mWebRTCAudioInterface->setMute(true); + } + } + } +} + +///////////////////////////// +// WebRTC Spatial Connection + +LLVoiceWebRTCAdHocConnection::LLVoiceWebRTCAdHocConnection(const LLUUID ®ionID, + const std::string& channelID, + const std::string& credentials) : + LLVoiceWebRTCConnection(regionID, channelID), + mCredentials(credentials) +{ +} + +LLVoiceWebRTCAdHocConnection::~LLVoiceWebRTCAdHocConnection() +{ +} + +// Add-hoc connections require a different channel type +// as they go to a different set of Secondlife WebRTC servers. +// They also require credentials for the given channels. +// So, we have a separate requestVoiceConnection call. +void LLVoiceWebRTCAdHocConnection::requestVoiceConnection() +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_VOICE + + LLViewerRegion *regionp = LLWorld::instance().getRegionFromID(mRegionID); + + LL_DEBUGS("Voice") << "Requesting voice connection." << LL_ENDL; + if (!regionp || !regionp->capabilitiesReceived()) + { + LL_DEBUGS("Voice") << "no capabilities for voice provisioning; retrying " << LL_ENDL; + // try again. + setVoiceConnectionState(VOICE_STATE_REQUEST_CONNECTION); + return; + } + + std::string url = regionp->getCapability("ProvisionVoiceAccountRequest"); + if (url.empty()) + { + setVoiceConnectionState(VOICE_STATE_SESSION_RETRY); + return; + } + + LLVoiceWebRTCStats::getInstance()->provisionAttemptStart(); + LLSD body; + LLSD jsep; + jsep["type"] = "offer"; + { + jsep["sdp"] = mChannelSDP; + } + body["jsep"] = jsep; + body["credentials"] = mCredentials; + body["channel"] = mChannelID; + body["channel_type"] = "multiagent"; + body["voice_server_type"] = WEBRTC_VOICE_SERVER_TYPE; + + LLCoreHttpUtil::HttpCoroutineAdapter::ptr_t httpAdapter( + new LLCoreHttpUtil::HttpCoroutineAdapter("LLVoiceWebRTCAdHocConnection::requestVoiceConnection", + LLCore::HttpRequest::DEFAULT_POLICY_ID)); + LLCore::HttpRequest::ptr_t httpRequest(new LLCore::HttpRequest); + LLCore::HttpOptions::ptr_t httpOpts(new LLCore::HttpOptions); + + httpOpts->setWantHeaders(true); + mOutstandingRequests++; + LLSD result = httpAdapter->postAndSuspend(httpRequest, url, body, httpOpts); + + LLSD httpResults = result[LLCoreHttpUtil::HttpCoroutineAdapter::HTTP_RESULTS]; + LLCore::HttpStatus status = LLCoreHttpUtil::HttpCoroutineAdapter::getStatusFromLLSD(httpResults); + + if (!status) + { + switch (status.getType()) + { + case HTTP_CONFLICT: + mCurrentStatus = LLVoiceClientStatusObserver::ERROR_CHANNEL_FULL; + break; + case HTTP_UNAUTHORIZED: + mCurrentStatus = LLVoiceClientStatusObserver::ERROR_CHANNEL_LOCKED; + break; + default: + mCurrentStatus = LLVoiceClientStatusObserver::ERROR_UNKNOWN; + break; + } + setVoiceConnectionState(VOICE_STATE_SESSION_EXIT); + } + else + { + OnVoiceConnectionRequestSuccess(result); + } + mOutstandingRequests--; +} diff --git a/indra/newview/llvoicewebrtc.h b/indra/newview/llvoicewebrtc.h new file mode 100644 index 0000000000..324b4c5b9c --- /dev/null +++ b/indra/newview/llvoicewebrtc.h @@ -0,0 +1,755 @@ +/** + * @file llvoicewebrtc.h + * @brief Declaration of LLWebRTCVoiceClient class which is the interface to the voice client process. + * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ + * Second Life Viewer Source Code + * Copyright (C) 2023, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA + * $/LicenseInfo$ + */ +#ifndef LL_VOICE_WEBRTC_H +#define LL_VOICE_WEBRTC_H + +class LLWebRTCProtocolParser; + +#include "lliopipe.h" +#include "llpumpio.h" +#include "llchainio.h" +#include "lliosocket.h" +#include "v3math.h" +#include "llframetimer.h" +#include "llviewerregion.h" +#include "llcallingcard.h" // for LLFriendObserver +#include "lleventcoro.h" +#include "llcoros.h" +#include "llparcel.h" +#include "llmutelist.h" +#include <queue> +#include "json/reader.h" + +#ifdef LL_USESYSTEMLIBS +# include "expat.h" +#else +# include "expat/expat.h" +#endif +#include "llvoiceclient.h" + +// WebRTC Includes +#include <llwebrtc.h> + +class LLAvatarName; +class LLVoiceWebRTCConnection; +typedef std::shared_ptr<LLVoiceWebRTCConnection> connectionPtr_t; + +extern const std::string WEBRTC_VOICE_SERVER_TYPE; + +class LLWebRTCVoiceClient : public LLSingleton<LLWebRTCVoiceClient>, + virtual public LLVoiceModuleInterface, + public llwebrtc::LLWebRTCDevicesObserver, + public LLMuteListObserver, + public llwebrtc::LLWebRTCLogCallback +{ + LLSINGLETON_C11(LLWebRTCVoiceClient); + LOG_CLASS(LLWebRTCVoiceClient); + virtual ~LLWebRTCVoiceClient(); + +public: + void cleanupSingleton() override; + /// @name LLVoiceModuleInterface virtual implementations + /// @see LLVoiceModuleInterface + //@{ + void init(LLPumpIO *pump) override; // Call this once at application startup (creates connector) + void terminate() override; // Call this to clean up during shutdown + + static bool isShuttingDown() { return sShuttingDown; } + + const LLVoiceVersionInfo& getVersion() override; + + void updateSettings() override; // call after loading settings and whenever they change + + // Returns true if WebRTC has successfully logged in and is not in error state + bool isVoiceWorking() const override; + + std::string sipURIFromID(const LLUUID &id) const override; + LLSD getP2PChannelInfoTemplate(const LLUUID& id) const override; + + void setHidden(bool hidden) override; // virtual + + /////////////////// + /// @name Logging + /// @{ + void LogMessage(llwebrtc::LLWebRTCLogCallback::LogLevel level, const std::string& message) override; + //@} + + ///////////////////// + /// @name Tuning + //@{ + void tuningStart() override; + void tuningStop() override; + bool inTuningMode() override; + + void tuningSetMicVolume(float volume) override; + void tuningSetSpeakerVolume(float volume) override; + float tuningGetEnergy(void) override; + //@} + + ///////////////////// + /// @name Devices + //@{ + // This returns true when it's safe to bring up the "device settings" dialog in the prefs. + bool deviceSettingsAvailable() override; + bool deviceSettingsUpdated() override; //return if the list has been updated and never fetched, only to be called from the voicepanel. + + // Requery the WebRTC daemon for the current list of input/output devices. + // If you pass true for clearCurrentList, deviceSettingsAvailable() will be false until the query has completed + // (use this if you want to know when it's done). + // If you pass false, you'll have no way to know when the query finishes, but the device lists will not appear empty in the interim. + void refreshDeviceLists(bool clearCurrentList = true) override; + + void setCaptureDevice(const std::string& name) override; + void setRenderDevice(const std::string& name) override; + + LLVoiceDeviceList& getCaptureDevices() override; + LLVoiceDeviceList& getRenderDevices() override; + //@} + + void getParticipantList(std::set<LLUUID> &participants) override; + bool isParticipant(const LLUUID& speaker_id) override; + + // Send a text message to the specified user, initiating the session if necessary. + // virtual BOOL sendTextMessage(const LLUUID& participant_id, const std::string& message) const {return false;}; + + // Returns true if calling back the session URI after the session has closed is possible. + // Currently this will be false only for PSTN P2P calls. + // NOTE: this will return true if the session can't be found. + BOOL isSessionCallBackPossible(const LLUUID &session_id) override; + + // WebRTC doesn't preclude text im + BOOL isSessionTextIMPossible(const LLUUID &session_id) override { return TRUE; } + + //////////////////////////// + /// @name Channel stuff + //@{ + // returns true iff the user is currently in a proximal (local spatial) channel. + // Note that gestures should only fire if this returns true. + bool inProximalChannel() override; + + void setNonSpatialChannel(const LLSD& channelInfo, bool notify_on_first_join, bool hangup_on_last_leave) override + { + startAdHocSession(channelInfo, notify_on_first_join, hangup_on_last_leave); + } + + bool setSpatialChannel(const LLSD &channelInfo) override; + + void leaveNonSpatialChannel() override; + + void processChannels(bool process) override; + + void leaveChannel(bool stopTalking); + + bool isCurrentChannel(const LLSD &channelInfo) override; + bool compareChannels(const LLSD &channelInfo1, const LLSD &channelInfo2) override; + //@} + + LLVoiceP2POutgoingCallInterface *getOutgoingCallInterface() override { return nullptr; } + + LLVoiceP2PIncomingCallInterfacePtr getIncomingCallInterface(const LLSD &voice_call_info) override { return nullptr; } + + ///////////////////////// + /// @name Volume/gain + //@{ + void setVoiceVolume(F32 volume) override; + void setMicGain(F32 volume) override; + //@} + + ///////////////////////// + /// @name enable disable voice and features + //@{ + void setVoiceEnabled(bool enabled) override; + void setMuteMic(bool muted) override; // Set the mute state of the local mic. + //@} + + ////////////////////////// + /// @name nearby speaker accessors + std::string getDisplayName(const LLUUID& id) override; + BOOL isParticipantAvatar(const LLUUID &id) override; + BOOL getIsSpeaking(const LLUUID& id) override; + BOOL getIsModeratorMuted(const LLUUID& id) override; + F32 getCurrentPower(const LLUUID& id) override; // "power" is related to "amplitude" in a defined way. I'm just not sure what the formula is... + F32 getUserVolume(const LLUUID& id) override; + void setUserVolume(const LLUUID& id, F32 volume) override; // set's volume for specified agent, from 0-1 (where .5 is nominal) + //@} + + ////////////////// + /// @name LLMuteListObserver + //@{ + void onChange() override; + void onChangeDetailed(const LLMute& ) override; + //@} + + // authorize the user + void userAuthorized(const std::string &user_id, const LLUUID &agentID) override {}; + + + void OnConnectionEstablished(const std::string& channelID, const LLUUID& regionID); + void OnConnectionShutDown(const std::string &channelID, const LLUUID ®ionID); + void OnConnectionFailure(const std::string &channelID, + const LLUUID ®ionID, + LLVoiceClientStatusObserver::EStatusType status_type = LLVoiceClientStatusObserver::ERROR_UNKNOWN); + void updatePosition(void); // update the internal position state + void sendPositionUpdate(bool force); // send the position to the voice server. + void updateOwnVolume(); + + ////////////////////////////// + /// @name Status notification + //@{ + void addObserver(LLVoiceClientStatusObserver* observer) override; + void removeObserver(LLVoiceClientStatusObserver* observer) override; + void addObserver(LLFriendObserver* observer) override; + void removeObserver(LLFriendObserver* observer) override; + void addObserver(LLVoiceClientParticipantObserver* observer) override; + void removeObserver(LLVoiceClientParticipantObserver* observer) override; + //@} + + ////////////////////////////// + /// @name Devices change notification + // LLWebRTCDevicesObserver + //@{ + void OnDevicesChanged(const llwebrtc::LLWebRTCVoiceDeviceList &render_devices, + const llwebrtc::LLWebRTCVoiceDeviceList &capture_devices) override; + //@} + void OnDevicesChangedImpl(const llwebrtc::LLWebRTCVoiceDeviceList &render_devices, + const llwebrtc::LLWebRTCVoiceDeviceList &capture_devices); + + struct participantState + { + public: + participantState(const LLUUID& agent_id, const LLUUID& region); + + bool isAvatar(); + + std::string mURI; + LLUUID mAvatarID; + std::string mDisplayName; + LLFrameTimer mSpeakingTimeout; + F32 mLevel; // the current audio level of the participant + F32 mVolume; // the gain applied to the participant + bool mIsSpeaking; + bool mIsModeratorMuted; + LLUUID mRegion; + }; + typedef std::shared_ptr<participantState> participantStatePtr_t; + + participantStatePtr_t findParticipantByID(const std::string &channelID, const LLUUID &id); + participantStatePtr_t addParticipantByID(const std::string& channelID, const LLUUID &id, const LLUUID& region); + void removeParticipantByID(const std::string& channelID, const LLUUID &id, const LLUUID& region); + + protected: + + typedef std::map<const LLUUID, participantStatePtr_t> participantUUIDMap; + + class sessionState + { + public: + typedef std::shared_ptr<sessionState> ptr_t; + typedef std::weak_ptr<sessionState> wptr_t; + + typedef std::function<void(const ptr_t &)> sessionFunc_t; + + static void addSession(const std::string &channelID, ptr_t& session); + virtual ~sessionState(); + + participantStatePtr_t addParticipant(const LLUUID& agent_id, const LLUUID& region); + void removeParticipant(const participantStatePtr_t &participant); + void removeAllParticipants(const LLUUID& region = LLUUID()); + + participantStatePtr_t findParticipantByID(const LLUUID& id); + + static ptr_t matchSessionByChannelID(const std::string& channel_id); + + void shutdownAllConnections(); + void revive(); + + static void processSessionStates(); + + virtual bool processConnectionStates(); + + virtual void sendData(const std::string &data); + + void setMuteMic(bool muted); + void setSpeakerVolume(F32 volume); + void setUserVolume(const LLUUID& id, F32 volume); + + void setUserMute(const LLUUID& id, bool mute); + + static void for_each(sessionFunc_t func); + + static void reapEmptySessions(); + static void clearSessions(); + + bool isEmpty() { return mWebRTCConnections.empty(); } + + virtual bool isSpatial() = 0; + virtual bool isEstate() = 0; + virtual bool isCallbackPossible() = 0; + + std::string mHandle; + std::string mChannelID; + std::string mName; + + bool mMuted; // this session is muted. + F32 mSpeakerVolume; // volume for this session. + + bool mShuttingDown; + + participantUUIDMap mParticipantsByUUID; + + static bool hasSession(const std::string &sessionID) + { return sSessions.find(sessionID) != sSessions.end(); } + + bool mHangupOnLastLeave; // notify observers after the session becomes empty. + bool mNotifyOnFirstJoin; // notify observers when the first peer joins. + + protected: + sessionState(); + std::list<connectionPtr_t> mWebRTCConnections; + + private: + + static std::map<std::string, ptr_t> sSessions; // canonical list of outstanding sessions. + + static void for_eachPredicate(const std::pair<std::string, + LLWebRTCVoiceClient::sessionState::wptr_t> &a, + sessionFunc_t func); + }; + + typedef std::shared_ptr<sessionState> sessionStatePtr_t; + typedef std::map<std::string, sessionStatePtr_t> sessionMap; + + class estateSessionState : public sessionState + { + public: + estateSessionState(); + bool processConnectionStates() override; + + bool isSpatial() override { return true; } + bool isEstate() override { return true; } + bool isCallbackPossible() override { return false; } + }; + + class parcelSessionState : public sessionState + { + public: + parcelSessionState(const std::string& channelID, S32 parcel_local_id); + + bool isSpatial() override { return true; } + bool isEstate() override { return false; } + bool isCallbackPossible() override { return false; } + }; + + class adhocSessionState : public sessionState + { + public: + adhocSessionState(const std::string &channelID, + const std::string& credentials, + bool notify_on_first_join, + bool hangup_on_last_leave); + + bool isSpatial() override { return false; } + bool isEstate() override { return false; } + + // only p2p-type adhoc sessions allow callback + bool isCallbackPossible() override { return mNotifyOnFirstJoin && mHangupOnLastLeave; } + + // don't send spatial data to adhoc sessions. + void sendData(const std::string &data) override { } + + protected: + std::string mCredentials; + }; + + + /////////////////////////////////////////////////////// + // Private Member Functions + ////////////////////////////////////////////////////// + + static void predSendData(const LLWebRTCVoiceClient::sessionStatePtr_t &session, const std::string& spatial_data); + static void predUpdateOwnVolume(const LLWebRTCVoiceClient::sessionStatePtr_t &session, F32 audio_level); + static void predSetMuteMic(const LLWebRTCVoiceClient::sessionStatePtr_t &session, bool mute); + static void predSetSpeakerVolume(const LLWebRTCVoiceClient::sessionStatePtr_t &session, F32 volume); + static void predShutdownSession(const LLWebRTCVoiceClient::sessionStatePtr_t &session); + static void predSetUserMute(const LLWebRTCVoiceClient::sessionStatePtr_t &session, const LLUUID& id, bool mute); + static void predSetUserVolume(const LLWebRTCVoiceClient::sessionStatePtr_t &session, const LLUUID& id, F32 volume); + + //---------------------------------- + // devices + void clearCaptureDevices(); + void addCaptureDevice(const LLVoiceDevice& device); + + void clearRenderDevices(); + void addRenderDevice(const LLVoiceDevice& device); + void setDevicesListUpdated(bool state); + + ///////////////////////////// + // Sending updates of current state + void setListenerPosition(const LLVector3d &position, const LLVector3 &velocity, const LLQuaternion &rot); + void setAvatarPosition(const LLVector3d &position, const LLVector3 &velocity, const LLQuaternion &rot); + + LLVector3d getListenerPosition() { return mListenerPosition; } + LLVector3d getSpeakerPosition() { return mAvatarPosition; } + + void setEarLocation(S32 loc); + + + ///////////////////////////// + // Accessors for data related to nearby speakers + + ///////////////////////////// + sessionStatePtr_t findP2PSession(const LLUUID &agent_id); + + sessionStatePtr_t addSession(const std::string &channel_id, sessionState::ptr_t session); + void deleteSession(const sessionStatePtr_t &session); + + // Does the actual work to get out of the audio session + void leaveAudioSession(); + + friend class LLWebRTCVoiceClientCapResponder; + + + void lookupName(const LLUUID &id); + void onAvatarNameCache(const LLUUID& id, const LLAvatarName& av_name); + void avatarNameResolved(const LLUUID &id, const std::string &name); + static void predAvatarNameResolution(const LLWebRTCVoiceClient::sessionStatePtr_t &session, LLUUID id, std::string name); + + boost::signals2::connection mAvatarNameCacheConnection; + +private: + + // helper function to retrieve the audio level + // Used in multiple places. + float getAudioLevel(); + + // Coroutine support methods + //--- + void voiceConnectionCoro(); + + //--- + /// Clean up objects created during a voice session. + void cleanUp(); + + LL::WorkQueue::weak_t mMainQueue; + + bool mTuningMode; + F32 mTuningMicGain; + int mTuningSpeakerVolume; + bool mDevicesListUpdated; // set to true when the device list has been updated + // and false when the panelvoicedevicesettings has queried for an update status. + std::string mSpatialSessionCredentials; + + std::string mMainSessionGroupHandle; // handle of the "main" session group. + + sessionStatePtr_t mSession; // Session state for the current session + + sessionStatePtr_t mNextSession; // Session state for the session we're trying to join + + llwebrtc::LLWebRTCDeviceInterface *mWebRTCDeviceInterface; + + LLVoiceDeviceList mCaptureDevices; + LLVoiceDeviceList mRenderDevices; + + bool startEstateSession(); + bool startParcelSession(const std::string& channelID, S32 parcelID); + bool startAdHocSession(const LLSD &channelInfo, bool notify_on_first_join, bool hangup_on_last_leave); + + bool inSpatialChannel(); + bool inOrJoiningChannel(const std::string &channelID); + bool inEstateChannel(); + + LLSD getAudioSessionChannelInfo(); + + void enforceTether(); + + void updateNeighboringRegions(); + std::set<LLUUID> getNeighboringRegions() { return mNeighboringRegions; } + + LLVoiceVersionInfo mVoiceVersion; + + bool mSpatialCoordsDirty; + + LLVector3d mListenerPosition; + LLVector3d mListenerRequestedPosition; + LLVector3 mListenerVelocity; + LLQuaternion mListenerRot; + + LLVector3d mAvatarPosition; + LLVector3 mAvatarVelocity; + LLQuaternion mAvatarRot; + + std::set<LLUUID> mNeighboringRegions; // includes current region + + bool mMuteMic; + bool mHidden; //Set to true during teleport to hide the agent's position. + + enum + { + earLocCamera = 0, // ear at camera + earLocAvatar, // ear at avatar + earLocMixed // ear at avatar location/camera direction + }; + + S32 mEarLocation; + + float mSpeakerVolume; + + F32 mMicGain; + + bool mVoiceEnabled; + bool mProcessChannels; + + typedef std::set<LLVoiceClientParticipantObserver*> observer_set_t; + observer_set_t mParticipantObservers; + + void notifyParticipantObservers(); + + typedef std::set<LLVoiceClientStatusObserver*> status_observer_set_t; + status_observer_set_t mStatusObservers; + + void notifyStatusObservers(LLVoiceClientStatusObserver::EStatusType status); + + bool mIsInTuningMode; + bool mIsProcessingChannels; + bool mIsCoroutineActive; + + // These variables can last longer than WebRTC in coroutines so we need them as static + static bool sShuttingDown; + + LLEventMailDrop mWebRTCPump; +}; + + +class LLVoiceWebRTCStats : public LLSingleton<LLVoiceWebRTCStats> +{ + LLSINGLETON(LLVoiceWebRTCStats); + LOG_CLASS(LLVoiceWebRTCStats); + virtual ~LLVoiceWebRTCStats(); + + private: + F64SecondsImplicit mStartTime; + + U32 mConnectCycles; + + F64 mConnectTime; + U32 mConnectAttempts; + + F64 mProvisionTime; + U32 mProvisionAttempts; + + F64 mEstablishTime; + U32 mEstablishAttempts; + + public: + + void reset(); + void connectionAttemptStart(); + void connectionAttemptEnd(bool success); + void provisionAttemptStart(); + void provisionAttemptEnd(bool success); + void establishAttemptStart(); + void establishAttemptEnd(bool success); + LLSD read(); +}; + +class LLVoiceWebRTCConnection : + public llwebrtc::LLWebRTCSignalingObserver, + public llwebrtc::LLWebRTCDataObserver, + public std::enable_shared_from_this<LLVoiceWebRTCConnection> +{ + public: + LLVoiceWebRTCConnection(const LLUUID ®ionID, const std::string &channelID); + + virtual ~LLVoiceWebRTCConnection() = 0; + + ////////////////////////////// + /// @name Signaling notification + // LLWebRTCSignalingObserver + //@{ + void OnIceGatheringState(EIceGatheringState state) override; + void OnIceCandidate(const llwebrtc::LLWebRTCIceCandidate &candidate) override; + void OnOfferAvailable(const std::string &sdp) override; + void OnRenegotiationNeeded() override; + void OnPeerConnectionClosed() override; + void OnAudioEstablished(llwebrtc::LLWebRTCAudioInterface *audio_interface) override; + //@} + + ///////////////////////// + /// @name Data Notification + /// LLWebRTCDataObserver + //@{ + void OnDataReceived(const std::string &data, bool binary) override; + void OnDataChannelReady(llwebrtc::LLWebRTCDataInterface *data_interface) override; + //@} + + void OnDataReceivedImpl(const std::string &data, bool binary); + + void sendJoin(); + void sendData(const std::string &data); + + void processIceUpdates(); + + static void processIceUpdatesCoro(connectionPtr_t connection); + + virtual void setMuteMic(bool muted); + virtual void setSpeakerVolume(F32 volume); + + void setUserVolume(const LLUUID& id, F32 volume); + void setUserMute(const LLUUID& id, bool mute); + + bool connectionStateMachine(); + + virtual bool isSpatial() { return false; } + + LLUUID getRegionID() { return mRegionID; } + + void shutDown() + { + mShutDown = true; + } + + void OnVoiceConnectionRequestSuccess(const LLSD &body); + + protected: + typedef enum e_voice_connection_state + { + VOICE_STATE_ERROR = 0x0, + VOICE_STATE_START_SESSION = 0x1, + VOICE_STATE_WAIT_FOR_SESSION_START = 0x2, + VOICE_STATE_REQUEST_CONNECTION = 0x4, + VOICE_STATE_CONNECTION_WAIT = 0x8, + VOICE_STATE_SESSION_ESTABLISHED = 0x10, + VOICE_STATE_WAIT_FOR_DATA_CHANNEL = 0x20, + VOICE_STATE_SESSION_UP = 0x40, + VOICE_STATE_SESSION_RETRY = 0x80, + VOICE_STATE_DISCONNECT = 0x100, + VOICE_STATE_WAIT_FOR_EXIT = 0x200, + VOICE_STATE_SESSION_EXIT = 0x400, + VOICE_STATE_WAIT_FOR_CLOSE = 0x800, + VOICE_STATE_CLOSED = 0x1000, + VOICE_STATE_SESSION_STOPPING = 0x1F80 + } EVoiceConnectionState; + + EVoiceConnectionState mVoiceConnectionState; + LL::WorkQueue::weak_t mMainQueue; + + void setVoiceConnectionState(EVoiceConnectionState new_voice_connection_state) + { + if (new_voice_connection_state & VOICE_STATE_SESSION_STOPPING) + { + // the new state is shutdown or restart. + mVoiceConnectionState = new_voice_connection_state; + return; + } + if (mVoiceConnectionState & VOICE_STATE_SESSION_STOPPING) + { + // we're currently shutting down or restarting, so ignore any + // state changes. + return; + } + + mVoiceConnectionState = new_voice_connection_state; + } + EVoiceConnectionState getVoiceConnectionState() + { + return mVoiceConnectionState; + } + + virtual void requestVoiceConnection() = 0; + static void requestVoiceConnectionCoro(connectionPtr_t connection) { connection->requestVoiceConnection(); } + + static void breakVoiceConnectionCoro(connectionPtr_t connection); + + LLVoiceClientStatusObserver::EStatusType mCurrentStatus; + + LLUUID mRegionID; + bool mPrimary; + LLUUID mViewerSession; + std::string mChannelID; + + std::string mChannelSDP; + std::string mRemoteChannelSDP; + + bool mMuted; + F32 mSpeakerVolume; + + bool mShutDown; + S32 mOutstandingRequests; + + S32 mRetryWaitPeriod; // number of UPDATE_THROTTLE_SECONDS we've + // waited since our last attempt to connect. + F32 mRetryWaitSecs; // number of seconds to wait before next retry + + + + std::vector<llwebrtc::LLWebRTCIceCandidate> mIceCandidates; + bool mIceCompleted; + + llwebrtc::LLWebRTCPeerConnectionInterface *mWebRTCPeerConnectionInterface; + llwebrtc::LLWebRTCAudioInterface *mWebRTCAudioInterface; + llwebrtc::LLWebRTCDataInterface *mWebRTCDataInterface; +}; + + +class LLVoiceWebRTCSpatialConnection : + public LLVoiceWebRTCConnection +{ + public: + LLVoiceWebRTCSpatialConnection(const LLUUID ®ionID, S32 parcelLocalID, const std::string &channelID); + + virtual ~LLVoiceWebRTCSpatialConnection(); + + void setMuteMic(bool muted) override; + + bool isSpatial() override { return true; } + + +protected: + + void requestVoiceConnection() override; + + S32 mParcelLocalID; +}; + +class LLVoiceWebRTCAdHocConnection : public LLVoiceWebRTCConnection +{ + public: + LLVoiceWebRTCAdHocConnection(const LLUUID ®ionID, const std::string &channelID, const std::string& credentials); + + virtual ~LLVoiceWebRTCAdHocConnection(); + + bool isSpatial() override { return false; } + + protected: + void requestVoiceConnection() override; + + std::string mCredentials; +}; + +#define VOICE_ELAPSED LLVoiceTimer(__FUNCTION__); + +#endif //LL_WebRTC_VOICE_CLIENT_H + diff --git a/indra/newview/skins/default/xui/de/menu_viewer.xml b/indra/newview/skins/default/xui/de/menu_viewer.xml index f6724d4993..8c446c1975 100644 --- a/indra/newview/skins/default/xui/de/menu_viewer.xml +++ b/indra/newview/skins/default/xui/de/menu_viewer.xml @@ -42,12 +42,6 @@ <menu_item_call label="Facebook..." name="Facebook"/> <menu_item_call label="Twitter..." name="Twitter"/> <menu_item_call label="Flickr..." name="Flickr"/> - <menu label="Voice-Morphing" name="VoiceMorphing"> - <menu_item_check label="Kein Voice-Morphing" name="NoVoiceMorphing"/> - <menu_item_check label="Vorschau..." name="Preview"/> - <menu_item_call label="Abonnieren..." name="Subscribe"/> - <menu_item_call label="Premium-Vorteil..." name="PremiumPerk"/> - </menu> <menu_item_check label="Gesten..." name="Gestures"/> <menu_item_check label="Freunde" name="My Friends"/> <menu_item_check label="Gruppen" name="My Groups"/> diff --git a/indra/newview/skins/default/xui/en/floater_incoming_call.xml b/indra/newview/skins/default/xui/en/floater_incoming_call.xml index 169d4c9d24..2d8089ee35 100644 --- a/indra/newview/skins/default/xui/en/floater_incoming_call.xml +++ b/indra/newview/skins/default/xui/en/floater_incoming_call.xml @@ -12,7 +12,7 @@ width="550"> <floater.string name="lifetime"> - 5 + 30 </floater.string> <floater.string name="localchat"> diff --git a/indra/newview/skins/default/xui/en/menu_viewer.xml b/indra/newview/skins/default/xui/en/menu_viewer.xml index 39a2331e04..145c82a614 100644 --- a/indra/newview/skins/default/xui/en/menu_viewer.xml +++ b/indra/newview/skins/default/xui/en/menu_viewer.xml @@ -561,42 +561,6 @@ parameter="conversation" /> </menu_item_check> <menu_item_separator/> - <menu - label="Voice morphing" - name="VoiceMorphing" - visibility_control="VoiceMorphingEnabled"> - <menu_item_check - label="No voice morphing" - name="NoVoiceMorphing"> - <menu_item_check.on_check - function="Communicate.VoiceMorphing.NoVoiceMorphing.Check" /> - <menu_item_check.on_click - function="Communicate.VoiceMorphing.NoVoiceMorphing.Click" /> - </menu_item_check> - <menu_item_separator/> - <menu_item_check - label="Preview..." - name="Preview"> - <menu_item_check.on_check - function="Floater.Visible" - parameter="voice_effect" /> - <menu_item_check.on_click - function="Floater.Toggle" - parameter="voice_effect" /> - </menu_item_check> - <menu_item_call - label="Subscribe..." - name="Subscribe"> - <menu_item_call.on_click - function="Communicate.VoiceMorphing.Subscribe" /> - </menu_item_call> - <menu_item_call - label="Premium perk..." - name="PremiumPerk"> - <menu_item_call.on_click - function="Communicate.VoiceMorphing.PremiumPerk" /> - </menu_item_call> - </menu> <menu_item_check label="Gestures..." name="Gestures" diff --git a/indra/newview/skins/default/xui/en/notifications.xml b/indra/newview/skins/default/xui/en/notifications.xml index b9d0ef0cc7..e9a67fb707 100644 --- a/indra/newview/skins/default/xui/en/notifications.xml +++ b/indra/newview/skins/default/xui/en/notifications.xml @@ -8831,6 +8831,21 @@ New Voice Morphs are available! </notification> <notification + icon="alertmodal.tga" + name="VoiceEffectsNotSupported" + sound="UISndAlert" + persist="true" + type="alertmodal"> +Voice Morphs are not supported by this viewer. +For more information about other voice morph tools, see [[FAQ_URL] this article]. + <usetemplate + ignoretext="Warn me about voice morph not being supported" + name="okignore" + yestext="OK"/> + <tag>voice</tag> + </notification> + + <notification icon="notifytip.tga" name="Cannot enter parcel: not a group member" type="notifytip"> diff --git a/indra/newview/skins/default/xui/en/panel_preferences_sound.xml b/indra/newview/skins/default/xui/en/panel_preferences_sound.xml index 7eaaaee536..eb38f8bff3 100644 --- a/indra/newview/skins/default/xui/en/panel_preferences_sound.xml +++ b/indra/newview/skins/default/xui/en/panel_preferences_sound.xml @@ -340,7 +340,7 @@ follows="left|top" top_delta="-6" layout="topleft" - left_pad="20" + left_pad="10" width="360" height="40" name="media_ear_location"> @@ -422,7 +422,7 @@ control_name="VoiceEarLocation" follows="left|top" layout="topleft" - left_pad="20" + left_pad="10" top_delta="-6" width="360" height="40" @@ -455,11 +455,21 @@ top_pad="10" width="237"/> <check_box + control_name="VoiceEchoCancellation" + height="15" + tool_tip="Check to enable voice echo cancellation" + label="Echo Cancellation" + layout="topleft" + left="260" + name="enable_echo_cancellation" + top_pad="-15" + width="200"/> + <check_box follows="top|left" enabled_control="EnableVoiceChat" control_name="PushToTalkToggle" height="15" - label="Toggle speak on/off when I press button in toolbar" + label="Toggle speak on/off with toolbar button" layout="topleft" left="20" name="push_to_talk_toggle_check" @@ -467,6 +477,16 @@ tool_tip="When in toggle mode, press and release the trigger key ONCE to switch your microphone on or off. When not in toggle mode, the microphone broadcasts your voice only while the trigger is being held down." top_pad="5"/> <check_box + control_name="VoiceAutomaticGainControl" + height="15" + tool_tip="Check to enable automatic gain control" + label="Automatic Gain Control" + layout="topleft" + name="voice_automatic_gain_control" + left="260" + top_pad="-15" + width="200"/> + <check_box name="gesture_audio_play_btn" control_name="EnableGestureSounds" disabled_control="MuteAudio" @@ -477,6 +497,44 @@ label="Play sounds from gestures" top_pad="5" left="20"/> + <text + layout="topleft" + height="15" + left="260" + top_pad="-12" + width="100" + name="noise_suppression_label"> + Noise Suppression + </text> + <combo_box + control_name="VoiceNoiseSuppressionLevel" + layout="topleft" + height="23" + left_pad="10" + top_pad="-18" + name="noise_suppression_combo" + width="80"> + <item + label="Off" + name="noise_suppression_none" + value="0"/> + <item + label="Low" + name="noise_suppression_low" + value="1"/> + <item + label="Moderate" + name="noise_suppression_moderate" + value="2"/> + <item + label="High" + name="noise_suppression_high" + value="3"/> + <item + label="Max" + name="noise_suppression_max" + value="4"/> + </combo_box> <button control_name="ShowDeviceSettings" follows="left|top" @@ -485,9 +543,9 @@ label="Voice Input/Output devices" layout="topleft" left="20" - top_pad="9" + top_pad="0" name="device_settings_btn" - width="230"> + width="200"> </button> <panel layout="topleft" diff --git a/indra/newview/skins/default/xui/en/strings.xml b/indra/newview/skins/default/xui/en/strings.xml index daa49eaa2c..232586636b 100644 --- a/indra/newview/skins/default/xui/en/strings.xml +++ b/indra/newview/skins/default/xui/en/strings.xml @@ -3820,6 +3820,7 @@ Please reinstall viewer from https://secondlife.com/support/downloads/ and cont <string name="voice_morphing_url">https://secondlife.com/destination/voice-island</string> <string name="premium_voice_morphing_url">https://secondlife.com/destination/voice-morphing-premium</string> + <string name="no_voice_morphing_faq_url">https://lindenlab.freshdesk.com/support/solutions/articles/31000173560-webrtc-update-for-voice-chat-faq/</string> <string name="lindenhomes_get_home_url">https://secondlife.com/land/lindenhomes/member.php</string> <string name="lindenhomes_my_home_url">https://land.secondlife.com/en-Us/lindenhomes/my-home.php</string> diff --git a/indra/newview/skins/default/xui/es/menu_viewer.xml b/indra/newview/skins/default/xui/es/menu_viewer.xml index c22311f882..04e43f01da 100644 --- a/indra/newview/skins/default/xui/es/menu_viewer.xml +++ b/indra/newview/skins/default/xui/es/menu_viewer.xml @@ -42,12 +42,6 @@ <menu_item_call label="Facebook..." name="Facebook"/> <menu_item_call label="Twitter..." name="Twitter"/> <menu_item_call label="Flickr..." name="Flickr"/> - <menu label="Transformación de voz" name="VoiceMorphing"> - <menu_item_check label="Sin transformación de voz" name="NoVoiceMorphing"/> - <menu_item_check label="Probar..." name="Preview"/> - <menu_item_call label="Suscribir..." name="Subscribe"/> - <menu_item_call label="Ventaja Premium..." name="PremiumPerk"/> - </menu> <menu_item_check label="Gestos..." name="Gestures"/> <menu_item_check label="Amigos" name="My Friends"/> <menu_item_check label="Grupos" name="My Groups"/> diff --git a/indra/newview/skins/default/xui/it/menu_viewer.xml b/indra/newview/skins/default/xui/it/menu_viewer.xml index 043fd28ddb..85999ccbe0 100644 --- a/indra/newview/skins/default/xui/it/menu_viewer.xml +++ b/indra/newview/skins/default/xui/it/menu_viewer.xml @@ -42,12 +42,6 @@ <menu_item_call label="Facebook..." name="Facebook"/> <menu_item_call label="Twitter..." name="Twitter"/> <menu_item_call label="Flickr..." name="Flickr"/> - <menu label="Manipolazione voce" name="VoiceMorphing"> - <menu_item_check label="Nessuna manipolazione voce" name="NoVoiceMorphing"/> - <menu_item_check label="Anteprima..." name="Preview"/> - <menu_item_call label="Abbonati..." name="Subscribe"/> - <menu_item_call label="Vantaggio Premium..." name="PremiumPerk"/> - </menu> <menu_item_check label="Gesture..." name="Gestures"/> <menu_item_check label="Amici" name="My Friends"/> <menu_item_check label="Gruppi" name="My Groups"/> diff --git a/indra/newview/skins/default/xui/ja/menu_viewer.xml b/indra/newview/skins/default/xui/ja/menu_viewer.xml index a1f3980df4..f6b10bb121 100644 --- a/indra/newview/skins/default/xui/ja/menu_viewer.xml +++ b/indra/newview/skins/default/xui/ja/menu_viewer.xml @@ -74,7 +74,6 @@ <menu_item_check label="ボイスチャット" name="Speak"/> <menu_item_check name="Conversation Log..." label="会話ログ…"/> <menu_item_separator/> - <menu label="ボイスモーフィング" name="VoiceMorphing"/> <menu_item_check label="ジェスチャー…" name="Gestures"/> <menu_item_separator/> <menu_item_check label="フレンド" name="My Friends"/> diff --git a/indra/newview/skins/default/xui/pl/menu_viewer.xml b/indra/newview/skins/default/xui/pl/menu_viewer.xml index 4d03e7c780..ee162addd2 100644 --- a/indra/newview/skins/default/xui/pl/menu_viewer.xml +++ b/indra/newview/skins/default/xui/pl/menu_viewer.xml @@ -35,11 +35,6 @@ <menu_item_check label="Czat lokalny..." name="Nearby Chat" /> <menu_item_check label="Mowa" name="Speak" /> <menu_item_check name="Conversation Log..." label="Dziennik rozmów..." /> - <menu label="Przekształcanie głosu" name="VoiceMorphing"> - <menu_item_check label="Bez przekształcania" name="NoVoiceMorphing" /> - <menu_item_check label="Podgląd..." name="Preview" /> - <menu_item_call label="Subskrybuj..." name="Subscribe" /> - </menu> <menu_item_check label="Gesty..." name="Gestures" /> <menu_item_check label="Znajomi" name="My Friends" /> <menu_item_check label="Grupy" name="My Groups" /> diff --git a/indra/newview/skins/default/xui/pt/menu_viewer.xml b/indra/newview/skins/default/xui/pt/menu_viewer.xml index b70adc3ad2..0f4873d11c 100644 --- a/indra/newview/skins/default/xui/pt/menu_viewer.xml +++ b/indra/newview/skins/default/xui/pt/menu_viewer.xml @@ -42,12 +42,6 @@ <menu_item_call label="Facebook..." name="Facebook"/> <menu_item_call label="Twitter..." name="Twitter"/> <menu_item_call label="Flickr..." name="Flickr"/> - <menu label="Distorção de voz" name="VoiceMorphing"> - <menu_item_check label="Não distorcer voz" name="NoVoiceMorphing"/> - <menu_item_check label="Visualizar..." name="Preview"/> - <menu_item_call label="Assinar..." name="Subscribe"/> - <menu_item_call label="Benefício Premium..." name="PremiumPerk"/> - </menu> <menu_item_check label="Gestos..." name="Gestures"/> <menu_item_check label="Amigos" name="My Friends"/> <menu_item_check label="Grupos" name="My Groups"/> diff --git a/indra/newview/skins/default/xui/ru/menu_viewer.xml b/indra/newview/skins/default/xui/ru/menu_viewer.xml index 8361464f4c..4a6390329d 100644 --- a/indra/newview/skins/default/xui/ru/menu_viewer.xml +++ b/indra/newview/skins/default/xui/ru/menu_viewer.xml @@ -40,12 +40,6 @@ <menu_item_call label="Facebook..." name="Facebook"/> <menu_item_call label="Twitter..." name="Twitter"/> <menu_item_call label="Flickr..." name="Flickr"/> - <menu label="Изменение голоса" name="VoiceMorphing"> - <menu_item_check label="Без изменения голоса" name="NoVoiceMorphing"/> - <menu_item_check label="Просмотр..." name="Preview"/> - <menu_item_call label="Подписаться..." name="Subscribe"/> - <menu_item_call label="Премиум-бонус..." name="PremiumPerk"/> - </menu> <menu_item_check label="Жесты..." name="Gestures"/> <menu_item_check label="Друзья" name="My Friends"/> <menu_item_check label="Группы" name="My Groups"/> diff --git a/indra/newview/skins/default/xui/tr/menu_viewer.xml b/indra/newview/skins/default/xui/tr/menu_viewer.xml index 1c977ba5ce..fb6111248c 100644 --- a/indra/newview/skins/default/xui/tr/menu_viewer.xml +++ b/indra/newview/skins/default/xui/tr/menu_viewer.xml @@ -40,12 +40,6 @@ <menu_item_call label="Facebook..." name="Facebook"/> <menu_item_call label="Twitter..." name="Twitter"/> <menu_item_call label="Flickr..." name="Flickr"/> - <menu label="Ses şekillendirme" name="VoiceMorphing"> - <menu_item_check label="Ses şekillendirme yok" name="NoVoiceMorphing"/> - <menu_item_check label="Önizleme..." name="Preview"/> - <menu_item_call label="Abone ol..." name="Subscribe"/> - <menu_item_call label="Özel üye avantajı..." name="PremiumPerk"/> - </menu> <menu_item_check label="Mimikler..." name="Gestures"/> <menu_item_check label="Arkadaşlar" name="My Friends"/> <menu_item_check label="Gruplar" name="My Groups"/> diff --git a/indra/newview/skins/default/xui/zh/menu_viewer.xml b/indra/newview/skins/default/xui/zh/menu_viewer.xml index 972434dfc5..a048af7b68 100644 --- a/indra/newview/skins/default/xui/zh/menu_viewer.xml +++ b/indra/newview/skins/default/xui/zh/menu_viewer.xml @@ -40,12 +40,6 @@ <menu_item_call label="臉書…" name="Facebook"/> <menu_item_call label="推特…" name="Twitter"/> <menu_item_call label="Flickr…" name="Flickr"/> - <menu label="語音變聲" name="VoiceMorphing"> - <menu_item_check label="沒有變聲效果" name="NoVoiceMorphing"/> - <menu_item_check label="預覽……" name="Preview"/> - <menu_item_call label="訂閱……" name="Subscribe"/> - <menu_item_call label="付費會員獨享…" name="PremiumPerk"/> - </menu> <menu_item_check label="姿勢…" name="Gestures"/> <menu_item_check label="朋友" name="My Friends"/> <menu_item_check label="群組" name="My Groups"/> diff --git a/indra/newview/tests/llsechandler_basic_test.cpp b/indra/newview/tests/llsechandler_basic_test.cpp index bfe32406cb..cf37a4713c 100644 --- a/indra/newview/tests/llsechandler_basic_test.cpp +++ b/indra/newview/tests/llsechandler_basic_test.cpp @@ -232,381 +232,402 @@ namespace tut "Certificate:\n" " Data:\n" " Version: 3 (0x2)\n" - " Serial Number:\n" - " 82:2f:8f:eb:8d:06:24:b0\n" + " Serial Number: ef:54:d8:f7:da:18:e8:19\n" " Signature Algorithm: sha256WithRSAEncryption\n" " Issuer: C=US, ST=California, L=San Francisco, O=Linden Lab, OU=Second Life Engineering, CN=Integration Test Root CA/emailAddress=noreply@lindenlab.com\n" " Validity\n" - " Not Before: May 22 22:19:45 2018 GMT\n" - " Not After : May 17 22:19:45 2038 GMT\n" + " Not Before: Jul 23 11:46:26 2024 GMT\n" + " Not After : Jul 21 11:46:26 2034 GMT\n" " Subject: C=US, ST=California, L=San Francisco, O=Linden Lab, OU=Second Life Engineering, CN=Integration Test Root CA/emailAddress=noreply@lindenlab.com\n" " Subject Public Key Info:\n" " Public Key Algorithm: rsaEncryption\n" " Public-Key: (4096 bit)\n" " Modulus:\n" - " 00:bd:e0:79:dd:3b:a6:ac:87:d0:39:f0:58:c7:a4:\n" - " 42:42:f6:5f:93:b0:36:04:b5:e2:d5:f7:2a:c0:6c:\n" - " a0:13:d2:1e:02:81:57:02:50:4c:57:b7:ef:27:9e:\n" - " f6:f1:f1:30:30:72:1e:57:34:e5:3f:82:3c:21:c4:\n" - " 66:d2:73:63:6c:91:e6:dd:49:9e:9c:b1:34:6a:81:\n" - " 45:a1:6e:c4:50:28:f2:d8:e3:fe:80:2f:83:aa:28:\n" - " 91:b4:8c:57:c9:f1:16:d9:0c:87:3c:25:80:a0:81:\n" - " 8d:71:f2:96:e2:16:f1:97:c4:b0:d8:53:bb:13:6c:\n" - " 73:54:2f:29:94:85:cf:86:6e:75:71:ad:39:e3:fc:\n" - " 39:12:53:93:1c:ce:39:e0:33:da:49:b7:3d:af:b0:\n" - " 37:ce:77:09:03:27:32:70:c0:9c:7f:9c:89:ce:90:\n" - " 45:b0:7d:94:8b:ff:13:27:ba:88:7f:ae:c4:aa:73:\n" - " d5:47:b8:87:69:89:80:0c:c1:22:18:78:c2:0d:47:\n" - " d9:10:ff:80:79:0d:46:71:ec:d9:ba:c9:f3:77:fd:\n" - " 92:6d:1f:0f:d9:54:18:6d:f6:72:24:5c:5c:3d:43:\n" - " 49:35:3e:1c:28:de:7e:44:dc:29:c3:9f:62:04:46:\n" - " aa:c4:e6:69:6a:15:f8:e3:74:1c:14:e9:f4:97:7c:\n" - " 30:6c:d4:28:fc:2a:0e:1d:6d:39:2e:1d:f9:17:43:\n" - " 35:5d:23:e7:ba:e3:a8:e9:97:6b:3c:3e:23:ef:d8:\n" - " bc:fb:7a:57:37:39:93:59:03:fc:78:ca:b1:31:ef:\n" - " 26:19:ed:56:e1:63:c3:ad:99:80:5b:47:b5:03:35:\n" - " 5f:fe:6a:a6:21:63:ec:50:fb:4e:c9:f9:ae:a5:66:\n" - " d0:55:33:8d:e6:c5:50:5a:c6:8f:5c:34:45:a7:72:\n" - " da:50:f6:66:4c:19:f5:d1:e4:fb:11:8b:a1:b5:4e:\n" - " 09:43:81:3d:39:28:86:3b:fe:07:28:97:02:b5:3a:\n" - " 07:5f:4a:20:80:1a:7d:a4:8c:f7:6c:f6:c5:9b:f6:\n" - " 61:e5:c7:b0:c3:d5:58:38:7b:bb:47:1e:34:d6:16:\n" - " 55:c5:d2:6c:b0:93:77:b1:90:69:06:b1:53:cb:1b:\n" - " 84:71:cf:b8:87:1b:1e:44:35:b4:2b:bb:04:59:58:\n" - " 0b:e8:93:d8:ae:21:9b:b1:1c:89:30:ae:11:80:77:\n" - " cc:16:f3:d6:35:ed:a1:b3:70:b3:4f:cd:a1:56:99:\n" - " ee:0e:c0:00:a4:09:70:c3:5b:0b:be:a1:07:18:dd:\n" - " c6:f4:6d:8b:58:bc:f9:bb:4b:01:2c:f6:cc:2c:9b:\n" - " 87:0e:b1:4f:9c:10:be:fc:45:e2:a4:ec:7e:fc:ff:\n" - " 45:b8:53\n" + " 00:c6:cc:07:f4:0b:17:06:4d:a6:30:b4:c7:02:6b:\n" + " 9d:a4:47:a6:09:0e:60:1a:32:d4:6b:42:88:ee:c5:\n" + " b9:e9:fb:b5:0b:60:dc:a2:45:92:a5:bb:88:12:fc:\n" + " 42:1a:80:32:79:16:62:7a:97:af:84:28:53:3c:c1:\n" + " f2:68:c0:4e:45:e4:0a:63:f9:34:1d:a2:8b:cc:70:\n" + " df:c6:65:c0:ba:31:32:d2:9d:0c:c8:ce:dc:11:12:\n" + " a4:11:fa:d3:c8:56:e2:31:8a:e3:fb:91:40:da:25:\n" + " 55:d1:f2:75:9b:4d:fa:b8:1f:b5:6d:9b:e1:fe:5d:\n" + " e8:c4:02:79:14:ef:7d:5a:b3:3a:1e:b6:d0:60:2c:\n" + " 90:dc:22:e2:c5:ae:85:1f:b4:9d:7a:20:f8:af:63:\n" + " 56:25:1a:64:f3:9c:3f:9a:cf:68:08:0a:37:db:d0:\n" + " a3:65:26:db:80:82:ff:e0:1b:51:c8:ee:f6:ad:c2:\n" + " b4:f2:ab:d2:e8:85:86:77:28:d0:63:4a:71:78:41:\n" + " e3:8c:7f:71:51:31:af:24:3f:fa:8d:d0:d8:0b:e2:\n" + " 7e:79:33:8a:bb:d2:00:9e:2e:c8:cd:d5:50:92:b8:\n" + " 5c:5a:0b:99:ef:05:39:67:da:be:70:36:51:37:37:\n" + " 20:6f:84:ab:29:11:00:7b:38:32:ba:0b:bc:34:a6:\n" + " b5:c6:a7:f0:c0:25:2d:38:0b:72:40:ab:cf:e6:ff:\n" + " 97:75:ff:e2:a9:3c:2a:57:ce:e4:52:20:8c:de:fe:\n" + " 68:ce:54:85:37:ba:b3:7f:2e:53:58:ea:9b:ac:79:\n" + " 6b:16:65:b8:11:88:5a:46:eb:9e:9e:80:3c:89:91:\n" + " 35:e0:c5:33:45:c8:86:4d:25:51:39:b1:72:97:2b:\n" + " b3:c8:c9:e8:11:cd:32:41:c8:c1:56:22:7e:33:81:\n" + " 85:61:ab:da:9e:6e:5f:24:1c:0f:9b:fa:da:9d:86:\n" + " 1a:66:f6:32:2a:10:80:ea:72:7a:4a:ef:c0:f2:7c:\n" + " 43:02:e6:70:19:6a:e1:02:0a:00:80:51:1c:a3:03:\n" + " 8b:6d:89:9f:91:37:90:d6:d8:9c:73:77:06:9e:bc:\n" + " 95:89:66:ee:43:40:a3:ee:43:a3:f6:2d:43:dd:7b:\n" + " f0:2f:0b:12:37:49:b7:81:5a:e2:54:6d:71:88:ff:\n" + " fe:7e:41:25:35:4c:b4:b9:62:65:dd:9f:1f:7a:06:\n" + " 6e:2b:20:58:78:da:08:66:a8:f1:89:de:8f:7f:5c:\n" + " 5e:c2:72:33:7f:b6:8e:41:4c:26:f6:4c:d4:0e:11:\n" + " 44:da:c7:14:f7:8b:79:4e:53:29:87:15:b1:12:e9:\n" + " 19:2b:54:33:d6:2e:7f:bd:42:20:be:fc:d7:9c:b4:\n" + " 7a:0a:db\n" " Exponent: 65537 (0x10001)\n" " X509v3 extensions:\n" - " X509v3 Subject Key Identifier: \n" - " 8A:22:C6:9C:2E:11:F3:40:0C:CE:82:0C:22:59:FF:F8:7F:D0:B9:13\n" - " X509v3 Authority Key Identifier: \n" - " keyid:8A:22:C6:9C:2E:11:F3:40:0C:CE:82:0C:22:59:FF:F8:7F:D0:B9:13\n" + " X509v3 Subject Key Identifier:\n" + " 4D:7D:AE:0D:A5:5E:22:5A:6A:8F:19:61:54:B3:58:CB:7B:C0:BD:DA\n" + " X509v3 Authority Key Identifier:\n" + " keyid:4D:7D:AE:0D:A5:5E:22:5A:6A:8F:19:61:54:B3:58:CB:7B:C0:BD:DA\n" "\n" - " X509v3 Basic Constraints: critical\n" + " X509v3 Basic Constraints:\n" " CA:TRUE\n" - " X509v3 Key Usage: critical\n" - " Digital Signature, Certificate Sign, CRL Sign\n" " Signature Algorithm: sha256WithRSAEncryption\n" - " b3:cb:33:eb:0e:02:64:f4:55:9a:3d:03:9a:cf:6a:4c:18:43:\n" - " f7:42:cb:65:dc:61:52:e5:9f:2f:42:97:3c:93:16:22:d4:af:\n" - " ae:b2:0f:c3:9b:ef:e0:cc:ee:b6:b1:69:a3:d8:da:26:c3:ad:\n" - " 3b:c5:64:dc:9f:d4:c2:53:4b:91:6d:c4:92:09:0b:ac:f0:99:\n" - " be:6f:b9:3c:03:4a:6d:9f:01:5d:ec:5a:9a:f3:a7:e5:3b:2c:\n" - " 99:57:7d:7e:25:15:68:20:12:30:96:16:86:f5:db:74:90:60:\n" - " fe:8b:df:99:f6:f7:62:49:9f:bc:8d:45:23:0a:c8:73:b8:79:\n" - " 80:3c:b9:e5:72:85:4b:b3:81:66:74:a2:72:92:4c:44:fd:7b:\n" - " 46:2e:21:a2:a9:81:a2:f3:26:4d:e3:89:7d:78:b0:c6:6f:b5:\n" - " 87:cb:ee:25:ed:27:1f:75:13:fa:6d:e9:37:73:ad:07:bb:af:\n" - " d3:6c:87:ea:02:01:70:bd:53:aa:ce:39:2c:d4:66:39:33:aa:\n" - " d1:9c:ee:67:e3:a9:45:d2:7b:2e:54:09:af:70:5f:3f:5a:67:\n" - " 2e:6c:72:ef:e0:9d:92:28:4a:df:ba:0b:b7:23:ca:5b:04:11:\n" - " 45:d1:51:e9:ea:c9:ec:54:fa:34:46:ae:fc:dc:6c:f8:1e:2c:\n" - " 9e:f4:71:51:8d:b5:a1:26:9a:13:30:be:1e:41:25:59:58:05:\n" - " 2c:64:c8:f9:5e:38:ae:dc:93:b0:8a:d6:38:74:02:cb:ce:ce:\n" - " 95:31:76:f6:7c:bf:a4:a1:8e:27:fd:ca:74:82:d1:e1:4d:b6:\n" - " 48:51:fa:c5:17:59:22:a3:84:be:82:c8:83:ec:61:a0:f4:ee:\n" - " 2c:e3:a3:ea:e5:51:c9:d3:4f:db:85:bd:ba:7a:52:14:b6:03:\n" - " ed:43:17:d8:d7:1c:22:5e:c9:56:d9:d6:81:96:11:e3:5e:01:\n" - " 40:91:30:09:da:a3:5f:d3:27:60:e5:9d:6c:da:d0:f0:39:01:\n" - " 23:4a:a6:15:7a:4a:82:eb:ec:72:4a:1d:36:dc:6f:83:c4:85:\n" - " 84:b5:8d:cd:09:e5:12:63:f3:21:56:c8:64:6b:db:b8:cf:d4:\n" - " df:ca:a8:24:8e:df:8d:63:a5:96:84:bf:ff:8b:7e:46:7a:f0:\n" - " c7:73:7c:70:8a:f5:17:d0:ac:c8:89:1e:d7:89:42:0f:4d:66:\n" - " c4:d8:bb:36:a8:ae:ca:e1:cf:e2:88:f6:cf:b0:44:4a:5f:81:\n" - " 50:4b:d6:28:81:cd:6c:f0:ec:e6:09:08:f2:59:91:a2:69:ac:\n" - " c7:81:fa:ab:61:3e:db:6f:f6:7f:db:1a:9e:b9:5d:cc:cc:33:\n" - " fa:95:c6:f7:8d:4b:30:f3\n" + " 5b:40:71:96:c8:d1:57:3f:fc:f2:3c:75:fb:c9:a6:a7:63:8a:\n" + " 22:23:96:0f:40:77:77:e2:7f:76:fc:5f:7b:1c:bd:ea:ca:f0:\n" + " be:1a:fd:59:e6:0e:00:d1:78:44:01:28:f4:01:68:67:78:cf:\n" + " 78:43:36:ac:b2:5c:13:0e:2a:94:59:88:9e:64:46:42:0a:9b:\n" + " be:7d:2d:10:11:fe:8b:64:01:fb:00:c5:2e:47:63:c0:93:3a:\n" + " 4a:f8:6c:fc:a9:16:58:ab:bc:7b:6b:20:31:9d:d7:d8:84:01:\n" + " cc:ce:52:7f:a1:18:2f:5c:c9:59:58:9a:98:b9:ef:54:d7:a0:\n" + " 56:79:28:ba:ad:f5:e5:fd:7e:d8:d6:be:dd:25:76:6f:fa:8a:\n" + " 07:f6:8e:0f:83:43:19:ee:96:c4:c9:54:df:19:5a:4c:ae:25:\n" + " 57:a2:5d:d5:e8:0a:66:d8:19:e9:c4:44:ba:6a:3b:b3:86:ae:\n" + " 44:c0:7c:6e:e5:a0:6c:45:bb:7f:34:94:e9:d3:d4:f4:04:0b:\n" + " eb:fc:9a:fa:67:d4:e5:83:5e:08:09:9c:70:a9:d3:0d:8a:08:\n" + " ed:3c:04:33:4f:ac:02:d9:5c:99:62:12:fc:0e:8d:55:8a:ce:\n" + " ca:28:5a:1a:9e:c9:59:8e:f0:f5:19:c7:30:1e:59:1f:3c:77:\n" + " 6d:fc:a2:31:ec:bf:83:fd:14:26:91:68:88:05:4c:87:82:e0:\n" + " 33:f4:ee:d8:56:97:23:3a:00:9b:e7:a2:10:c2:83:28:c6:c0:\n" + " c1:92:49:95:c1:d3:e1:43:e8:8f:0c:d0:ae:e3:50:17:1a:8d:\n" + " 0f:4a:60:71:76:8e:9e:fb:15:76:cd:cd:69:2c:59:24:69:d2:\n" + " 0f:f2:d5:0e:96:95:2b:2e:d7:81:ed:b3:7b:6f:ce:60:32:b5:\n" + " f0:f6:74:ea:27:3a:ee:2c:96:7b:e0:06:6c:33:25:c4:60:da:\n" + " 76:de:c4:a1:22:b6:b1:63:57:10:3c:62:60:98:47:39:9e:38:\n" + " ce:c7:ef:75:75:19:d3:26:2a:cf:46:e3:b0:72:38:49:ee:c3:\n" + " 4e:52:97:e5:e5:b8:bc:b1:45:56:98:54:0a:63:c8:87:ff:a0:\n" + " cb:28:12:5c:8f:a2:6e:a7:f9:50:98:2d:a5:26:08:df:16:29:\n" + " 19:63:7f:6c:b4:41:20:f7:5d:ef:6a:90:fd:1a:08:1c:c2:4c:\n" + " 3e:77:ea:e0:df:c0:dd:aa:a2:36:e7:e8:be:98:39:0a:68:59:\n" + " 8e:a0:71:2f:7c:92:ab:e0:c4:c1:c2:eb:89:b6:34:ce:44:ab:\n" + " f9:f6:a4:c8:7b:ad:a8:bc:c9:04:7c:d5:4c:a4:d2:8b:54:23:\n" + " 89:68:86:4e:07:36:d9:bc\n" "-----BEGIN CERTIFICATE-----\n" - "MIIGXDCCBESgAwIBAgIJAIIvj+uNBiSwMA0GCSqGSIb3DQEBCwUAMIG6MQswCQYD\n" + "MIIGSTCCBDGgAwIBAgIJAO9U2PfaGOgZMA0GCSqGSIb3DQEBCwUAMIG6MQswCQYD\n" "VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5j\n" "aXNjbzETMBEGA1UECgwKTGluZGVuIExhYjEgMB4GA1UECwwXU2Vjb25kIExpZmUg\n" "RW5naW5lZXJpbmcxITAfBgNVBAMMGEludGVncmF0aW9uIFRlc3QgUm9vdCBDQTEk\n" - "MCIGCSqGSIb3DQEJARYVbm9yZXBseUBsaW5kZW5sYWIuY29tMB4XDTE4MDUyMjIy\n" - "MTk0NVoXDTM4MDUxNzIyMTk0NVowgboxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApD\n" + "MCIGCSqGSIb3DQEJARYVbm9yZXBseUBsaW5kZW5sYWIuY29tMB4XDTI0MDcyMzEx\n" + "NDYyNloXDTM0MDcyMTExNDYyNlowgboxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApD\n" "YWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2NvMRMwEQYDVQQKDApMaW5k\n" "ZW4gTGFiMSAwHgYDVQQLDBdTZWNvbmQgTGlmZSBFbmdpbmVlcmluZzEhMB8GA1UE\n" "AwwYSW50ZWdyYXRpb24gVGVzdCBSb290IENBMSQwIgYJKoZIhvcNAQkBFhVub3Jl\n" "cGx5QGxpbmRlbmxhYi5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC\n" - "AQC94HndO6ash9A58FjHpEJC9l+TsDYEteLV9yrAbKAT0h4CgVcCUExXt+8nnvbx\n" - "8TAwch5XNOU/gjwhxGbSc2NskebdSZ6csTRqgUWhbsRQKPLY4/6AL4OqKJG0jFfJ\n" - "8RbZDIc8JYCggY1x8pbiFvGXxLDYU7sTbHNULymUhc+GbnVxrTnj/DkSU5Mczjng\n" - "M9pJtz2vsDfOdwkDJzJwwJx/nInOkEWwfZSL/xMnuoh/rsSqc9VHuIdpiYAMwSIY\n" - "eMINR9kQ/4B5DUZx7Nm6yfN3/ZJtHw/ZVBht9nIkXFw9Q0k1Phwo3n5E3CnDn2IE\n" - "RqrE5mlqFfjjdBwU6fSXfDBs1Cj8Kg4dbTkuHfkXQzVdI+e646jpl2s8PiPv2Lz7\n" - "elc3OZNZA/x4yrEx7yYZ7VbhY8OtmYBbR7UDNV/+aqYhY+xQ+07J+a6lZtBVM43m\n" - "xVBaxo9cNEWnctpQ9mZMGfXR5PsRi6G1TglDgT05KIY7/gcolwK1OgdfSiCAGn2k\n" - "jPds9sWb9mHlx7DD1Vg4e7tHHjTWFlXF0mywk3exkGkGsVPLG4Rxz7iHGx5ENbQr\n" - "uwRZWAvok9iuIZuxHIkwrhGAd8wW89Y17aGzcLNPzaFWme4OwACkCXDDWwu+oQcY\n" - "3cb0bYtYvPm7SwEs9swsm4cOsU+cEL78ReKk7H78/0W4UwIDAQABo2MwYTAdBgNV\n" - "HQ4EFgQUiiLGnC4R80AMzoIMIln/+H/QuRMwHwYDVR0jBBgwFoAUiiLGnC4R80AM\n" - "zoIMIln/+H/QuRMwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJ\n" - "KoZIhvcNAQELBQADggIBALPLM+sOAmT0VZo9A5rPakwYQ/dCy2XcYVLlny9ClzyT\n" - "FiLUr66yD8Ob7+DM7raxaaPY2ibDrTvFZNyf1MJTS5FtxJIJC6zwmb5vuTwDSm2f\n" - "AV3sWprzp+U7LJlXfX4lFWggEjCWFob123SQYP6L35n292JJn7yNRSMKyHO4eYA8\n" - "ueVyhUuzgWZ0onKSTET9e0YuIaKpgaLzJk3jiX14sMZvtYfL7iXtJx91E/pt6Tdz\n" - "rQe7r9Nsh+oCAXC9U6rOOSzUZjkzqtGc7mfjqUXSey5UCa9wXz9aZy5scu/gnZIo\n" - "St+6C7cjylsEEUXRUenqyexU+jRGrvzcbPgeLJ70cVGNtaEmmhMwvh5BJVlYBSxk\n" - "yPleOK7ck7CK1jh0AsvOzpUxdvZ8v6Shjif9ynSC0eFNtkhR+sUXWSKjhL6CyIPs\n" - "YaD07izjo+rlUcnTT9uFvbp6UhS2A+1DF9jXHCJeyVbZ1oGWEeNeAUCRMAnao1/T\n" - "J2DlnWza0PA5ASNKphV6SoLr7HJKHTbcb4PEhYS1jc0J5RJj8yFWyGRr27jP1N/K\n" - "qCSO341jpZaEv/+LfkZ68MdzfHCK9RfQrMiJHteJQg9NZsTYuzaorsrhz+KI9s+w\n" - "REpfgVBL1iiBzWzw7OYJCPJZkaJprMeB+qthPttv9n/bGp65XczMM/qVxveNSzDz\n" + "AQDGzAf0CxcGTaYwtMcCa52kR6YJDmAaMtRrQojuxbnp+7ULYNyiRZKlu4gS/EIa\n" + "gDJ5FmJ6l6+EKFM8wfJowE5F5Apj+TQdoovMcN/GZcC6MTLSnQzIztwREqQR+tPI\n" + "VuIxiuP7kUDaJVXR8nWbTfq4H7Vtm+H+XejEAnkU731aszoettBgLJDcIuLFroUf\n" + "tJ16IPivY1YlGmTznD+az2gICjfb0KNlJtuAgv/gG1HI7vatwrTyq9LohYZ3KNBj\n" + "SnF4QeOMf3FRMa8kP/qN0NgL4n55M4q70gCeLsjN1VCSuFxaC5nvBTln2r5wNlE3\n" + "NyBvhKspEQB7ODK6C7w0prXGp/DAJS04C3JAq8/m/5d1/+KpPCpXzuRSIIze/mjO\n" + "VIU3urN/LlNY6puseWsWZbgRiFpG656egDyJkTXgxTNFyIZNJVE5sXKXK7PIyegR\n" + "zTJByMFWIn4zgYVhq9qebl8kHA+b+tqdhhpm9jIqEIDqcnpK78DyfEMC5nAZauEC\n" + "CgCAURyjA4ttiZ+RN5DW2JxzdwaevJWJZu5DQKPuQ6P2LUPde/AvCxI3SbeBWuJU\n" + "bXGI//5+QSU1TLS5YmXdnx96Bm4rIFh42ghmqPGJ3o9/XF7CcjN/to5BTCb2TNQO\n" + "EUTaxxT3i3lOUymHFbES6RkrVDPWLn+9QiC+/NectHoK2wIDAQABo1AwTjAdBgNV\n" + "HQ4EFgQUTX2uDaVeIlpqjxlhVLNYy3vAvdowHwYDVR0jBBgwFoAUTX2uDaVeIlpq\n" + "jxlhVLNYy3vAvdowDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAW0Bx\n" + "lsjRVz/88jx1+8mmp2OKIiOWD0B3d+J/dvxfexy96srwvhr9WeYOANF4RAEo9AFo\n" + "Z3jPeEM2rLJcEw4qlFmInmRGQgqbvn0tEBH+i2QB+wDFLkdjwJM6Svhs/KkWWKu8\n" + "e2sgMZ3X2IQBzM5Sf6EYL1zJWViamLnvVNegVnkouq315f1+2Na+3SV2b/qKB/aO\n" + "D4NDGe6WxMlU3xlaTK4lV6Jd1egKZtgZ6cREumo7s4auRMB8buWgbEW7fzSU6dPU\n" + "9AQL6/ya+mfU5YNeCAmccKnTDYoI7TwEM0+sAtlcmWIS/A6NVYrOyihaGp7JWY7w\n" + "9RnHMB5ZHzx3bfyiMey/g/0UJpFoiAVMh4LgM/Tu2FaXIzoAm+eiEMKDKMbAwZJJ\n" + "lcHT4UPojwzQruNQFxqND0pgcXaOnvsVds3NaSxZJGnSD/LVDpaVKy7Xge2ze2/O\n" + "YDK18PZ06ic67iyWe+AGbDMlxGDadt7EoSK2sWNXEDxiYJhHOZ44zsfvdXUZ0yYq\n" + "z0bjsHI4Se7DTlKX5eW4vLFFVphUCmPIh/+gyygSXI+ibqf5UJgtpSYI3xYpGWN/\n" + "bLRBIPdd72qQ/RoIHMJMPnfq4N/A3aqiNufovpg5CmhZjqBxL3ySq+DEwcLribY0\n" + "zkSr+fakyHutqLzJBHzVTKTSi1QjiWiGTgc22bw=\n" "-----END CERTIFICATE-----\n" - ); + ); + const std::string mPemIntermediateCert( "Certificate:\n" " Data:\n" " Version: 3 (0x2)\n" - " Serial Number: 4096 (0x1000)\n" + " Serial Number: 85:bb:4b:66:26:db:9a:c6\n" " Signature Algorithm: sha256WithRSAEncryption\n" " Issuer: C=US, ST=California, L=San Francisco, O=Linden Lab, OU=Second Life Engineering, CN=Integration Test Root CA/emailAddress=noreply@lindenlab.com\n" " Validity\n" - " Not Before: May 22 22:39:08 2018 GMT\n" - " Not After : May 19 22:39:08 2028 GMT\n" - " Subject: C=US, ST=California, O=Linden Lab, OU=Second Life Engineering, CN=Integration Test Intermediate CA/emailAddress=noreply@lindenlab.com\n" + " Not Before: Jul 23 11:46:33 2024 GMT\n" + " Not After : Jul 21 11:46:33 2034 GMT\n" + " Subject: C=US, ST=California, L=San Francisco, O=Linden Lab, OU=Second Life Engineering, CN=Integration Test Intermediate CA/emailAddress=noreply@lindenlab.com\n" " Subject Public Key Info:\n" " Public Key Algorithm: rsaEncryption\n" " Public-Key: (4096 bit)\n" " Modulus:\n" - " 00:ce:a3:70:e2:c4:fb:4b:97:90:a1:30:bb:c1:1b:\n" - " 13:b9:aa:7e:46:17:a3:26:8d:69:3f:5e:73:95:e8:\n" - " 6a:b1:0a:b4:8f:50:65:e3:c6:5c:39:24:34:df:0b:\n" - " b7:cc:ce:62:0c:36:5a:12:2c:fe:35:4c:e9:1c:ac:\n" - " 80:5e:24:99:d7:aa:bd:be:48:c0:62:64:77:36:88:\n" - " 66:ce:f4:a8:dd:d2:76:24:62:90:55:41:fc:1d:13:\n" - " 4e:a7:4e:57:bc:a8:a4:59:4b:2c:5a:1c:d8:cc:16:\n" - " de:e8:88:30:c9:95:df:2f:a6:14:28:0f:eb:34:46:\n" - " 12:58:ba:da:0e:e6:de:9c:15:f6:f4:e3:9f:74:aa:\n" - " 70:89:79:8b:e9:5a:7b:18:54:15:94:3a:23:0a:65:\n" - " 78:05:d9:33:90:2a:ce:15:18:0d:52:fc:5c:31:65:\n" - " 20:d0:12:37:8c:11:80:ba:d4:b0:82:73:00:4b:49:\n" - " be:cb:d6:bc:e7:cd:61:f3:00:98:99:74:5a:37:81:\n" - " 49:96:7e:14:01:1b:86:d2:d0:06:94:40:63:63:46:\n" - " 11:fc:33:5c:bd:3a:5e:d4:e5:44:47:64:50:bd:a6:\n" - " 97:55:70:64:9b:26:cc:de:20:82:90:6a:83:41:9c:\n" - " 6f:71:47:14:be:cb:68:7c:85:be:ef:2e:76:12:19:\n" - " d3:c9:87:32:b4:ac:60:20:16:28:2d:af:bc:e8:01:\n" - " c6:7f:fb:d8:11:d5:f4:b7:14:bd:27:08:5b:72:be:\n" - " 09:e0:91:c8:9c:7b:b4:b3:12:ef:32:36:be:b1:b9:\n" - " a2:b7:e3:69:47:30:76:ba:9c:9b:19:99:4d:53:dd:\n" - " 5c:e8:2c:f1:b2:64:69:cf:15:bd:f8:bb:58:95:73:\n" - " 58:38:95:b4:7a:cf:84:29:a6:c2:db:f0:bd:ef:97:\n" - " 26:d4:99:ac:d7:c7:be:b0:0d:11:f4:26:86:2d:77:\n" - " 42:52:25:d7:56:c7:e3:97:b1:36:5c:97:71:d0:9b:\n" - " f5:b5:50:8d:f9:ff:fb:10:77:3c:b5:53:6d:a1:43:\n" - " 35:a9:03:32:05:ab:d7:f5:d1:19:bd:5f:92:a3:00:\n" - " 2a:79:37:a4:76:4f:e9:32:0d:e4:86:bb:ea:c3:1a:\n" - " c5:33:e8:16:d4:a5:d8:e0:e8:bb:c2:f0:22:15:e2:\n" - " d9:8c:ae:ac:7d:2b:bf:eb:a3:4c:3b:29:1d:94:ac:\n" - " a3:bb:6d:ba:6d:03:91:03:cf:46:12:c4:66:21:c5:\n" - " c6:67:d8:11:19:79:01:0e:6e:84:1c:76:6f:11:3d:\n" - " eb:94:89:c5:6a:26:1f:cd:e0:11:8b:51:ee:99:35:\n" - " 69:e5:7f:0b:77:2a:94:e4:4b:64:b9:83:04:30:05:\n" - " e4:a2:e3\n" + " 00:be:f7:d2:cb:e4:5c:46:7b:e2:11:22:89:72:da:\n" + " 77:72:ec:05:87:19:f7:77:07:fd:67:d7:af:13:d5:\n" + " 76:12:92:dd:69:4d:22:47:b0:3d:94:8a:6a:95:85:\n" + " 34:b8:78:c3:9d:63:32:b1:4b:0a:b6:0e:05:7b:ab:\n" + " 06:23:fc:0d:21:b5:fc:c6:6a:5a:36:be:6e:fc:c7:\n" + " 47:97:a3:18:2e:33:cd:0e:8a:75:2b:b7:29:e9:68:\n" + " 4a:90:53:45:db:73:ff:b3:e5:c1:d4:6b:dd:3a:b1:\n" + " ef:53:9f:23:e9:c6:87:ce:67:b9:fb:a4:d5:76:21:\n" + " 03:cb:c5:72:6b:c5:a6:07:55:fb:47:90:e8:92:38:\n" + " 73:14:11:8e:ff:21:b9:35:64:5a:61:c7:fc:1f:e4:\n" + " 4d:47:e5:03:cc:0b:c3:69:66:71:84:0c:18:2f:61:\n" + " 7f:34:dd:f2:91:e3:b7:9d:a8:b8:db:3f:6e:6f:96:\n" + " fa:34:06:82:04:c8:18:cc:de:8b:7f:26:b5:48:53:\n" + " fb:fb:15:7b:0e:38:60:fe:da:21:98:8d:73:07:b2:\n" + " 6b:fd:ad:21:59:e7:84:66:e1:04:16:1c:be:13:34:\n" + " 28:43:2c:09:3d:e4:77:2a:a4:ad:6d:f9:26:04:f7:\n" + " 43:73:9b:d9:ea:1a:43:6a:b4:db:88:f8:f9:bd:34:\n" + " f8:a6:e8:7a:ab:b4:b2:e1:29:47:a6:ba:b8:65:9c:\n" + " c6:b3:af:13:43:38:ef:2a:05:77:9f:8f:f0:0c:56:\n" + " 21:c2:92:d2:2c:c3:32:50:d1:62:ae:51:fc:99:e6:\n" + " b8:38:f8:83:1d:8d:40:11:e0:1d:51:5d:3f:fa:55:\n" + " 61:b6:18:09:1e:71:af:95:64:9c:ea:c6:11:64:f0:\n" + " a8:02:7d:bb:c8:54:2e:57:48:32:7c:51:66:0d:d6:\n" + " 3e:0e:ed:5e:30:a8:a6:47:03:64:5c:89:21:45:90:\n" + " e1:4c:91:bc:bd:81:6e:73:a9:14:27:e6:0d:6d:38:\n" + " dc:50:9d:b2:56:66:60:6c:66:b9:5d:bb:8c:96:2d:\n" + " 89:5e:0d:2b:ed:b8:03:31:ce:0a:ff:82:03:f5:b2:\n" + " 3b:e5:27:de:61:d8:8f:bf:a2:6a:64:b0:4a:87:23:\n" + " 40:28:a3:f1:ec:96:50:cd:83:50:2d:78:71:92:f2:\n" + " 88:75:b0:9d:cd:0b:e4:62:a6:a5:63:11:fc:b4:ba:\n" + " 9f:c6:67:40:2c:ad:a4:ef:94:f0:f9:a0:ba:e1:52:\n" + " 2e:27:d9:6b:1d:82:23:ed:3c:0b:0b:d2:bc:14:be:\n" + " 6d:b1:69:ad:3e:25:3a:66:d2:d1:af:9f:88:45:25:\n" + " 6b:6e:be:1f:a0:e7:b2:9f:6d:24:94:0d:f4:c2:75:\n" + " f9:1f:5d\n" " Exponent: 65537 (0x10001)\n" " X509v3 extensions:\n" - " X509v3 Subject Key Identifier: \n" - " 83:21:DE:EC:C0:79:03:6D:1E:83:F3:E5:97:29:D5:5A:C0:96:40:FA\n" - " X509v3 Authority Key Identifier: \n" - " keyid:8A:22:C6:9C:2E:11:F3:40:0C:CE:82:0C:22:59:FF:F8:7F:D0:B9:13\n" - "\n" - " X509v3 Basic Constraints: critical\n" + " X509v3 Basic Constraints:\n" " CA:TRUE, pathlen:0\n" - " X509v3 Key Usage: critical\n" + " X509v3 Key Usage:\n" " Digital Signature, Certificate Sign, CRL Sign\n" + " X509v3 Subject Key Identifier:\n" + " 56:98:DC:45:25:11:E2:8C:2B:EA:D6:C6:E2:C8:BE:2C:C8:69:FF:FF\n" + " X509v3 Authority Key Identifier:\n" + " keyid:4D:7D:AE:0D:A5:5E:22:5A:6A:8F:19:61:54:B3:58:CB:7B:C0:BD:DA\n" + " DirName:/C=US/ST=California/L=San Francisco/O=Linden Lab/OU=Second Life Engineering/CN=Integration Test Root CA/emailAddress=noreply@lindenlab.com\n" + " serial:EF:54:D8:F7:DA:18:E8:19\n" " Signature Algorithm: sha256WithRSAEncryption\n" - " a3:6c:85:9a:2e:4e:7e:5d:83:63:0f:f5:4f:a9:7d:ec:0e:6f:\n" - " ae:d7:ba:df:64:e0:46:0e:3d:da:18:15:2c:f3:73:ca:81:b1:\n" - " 10:d9:53:14:21:7d:72:5c:94:88:a5:9d:ad:ab:45:42:c6:64:\n" - " a9:d9:2e:4e:29:47:2c:b1:95:07:b7:62:48:68:1f:68:13:1c:\n" - " d2:a0:fb:5e:38:24:4a:82:0a:87:c9:93:20:43:7e:e9:f9:79:\n" - " ef:03:a2:bd:9e:24:6b:0a:01:5e:4a:36:c5:7d:7a:fe:d6:aa:\n" - " 2f:c2:8c:38:8a:99:3c:b0:6a:e5:60:be:56:d6:eb:60:03:55:\n" - " 24:42:a0:1a:fa:91:24:a3:53:15:75:5d:c8:eb:7c:1e:68:5a:\n" - " 7e:13:34:e3:85:37:1c:76:3f:77:67:1b:ed:1b:52:17:fc:4a:\n" - " a3:e2:74:84:80:2c:69:fc:dd:7d:26:97:c4:2a:69:7d:9c:dc:\n" - " 61:97:70:29:a7:3f:2b:5b:2b:22:51:fd:fe:6a:5d:f9:e7:14:\n" - " 48:b7:2d:c8:33:58:fc:f2:5f:27:f7:26:16:be:be:b5:aa:a2:\n" - " 64:53:3c:69:e8:b5:61:eb:ab:91:a5:b4:09:9b:f6:98:b8:5c:\n" - " 5b:24:2f:93:f5:2b:9c:8c:58:fb:26:3f:67:53:d7:42:64:e8:\n" - " 79:77:73:41:4e:e3:02:39:0b:b6:68:97:8b:84:e8:1d:83:a8:\n" - " 15:f1:06:46:47:80:42:5e:14:e2:61:8a:76:84:d5:d4:71:7f:\n" - " 4e:ff:d9:74:87:ff:32:c5:87:20:0a:d4:59:40:3e:d8:17:ef:\n" - " da:65:e9:0a:51:fe:1e:c3:46:91:d2:ee:e4:23:57:97:87:d4:\n" - " a6:a5:eb:ef:81:6a:d8:8c:d6:1f:8e:b1:18:4c:6b:89:32:55:\n" - " 53:68:26:9e:bb:03:be:2c:e9:8b:ff:97:9c:1c:ac:28:c3:9f:\n" - " 0b:b7:93:23:24:31:63:e4:19:13:f2:bb:08:71:b7:c5:c5:c4:\n" - " 10:ff:dc:fc:33:54:a4:5e:ec:a3:fe:0a:80:ca:9c:bc:95:6f:\n" - " 5f:39:91:3b:61:69:16:94:0f:57:4b:fc:4b:b1:be:72:98:5d:\n" - " 10:f9:08:a7:d6:e0:e8:3d:5d:54:7d:fa:4b:6a:dd:98:41:ed:\n" - " 84:a1:39:67:5c:6c:7f:0c:b0:e1:98:c1:14:ed:fe:1e:e8:05:\n" - " 8d:7f:6a:24:cb:1b:05:42:0d:7f:13:ba:ca:b5:91:db:a5:f0:\n" - " 40:2b:70:7a:2a:a5:5d:ed:56:0c:f0:c2:72:ee:63:dd:cb:5d:\n" - " 76:f6:08:e6:e6:30:ef:3a:b2:16:34:41:a4:e1:30:14:bc:c7:\n" - " f9:23:3a:1a:70:df:b8:cc\n" + " ae:d0:30:ac:31:49:20:86:0b:34:01:58:08:94:68:cc:38:9c:\n" + " f7:13:5c:46:19:33:ed:54:5e:e4:43:f3:59:33:5c:50:d9:89:\n" + " 8b:ee:75:67:a8:c7:0e:d1:30:c2:4e:a3:2e:a8:64:2d:6a:a8:\n" + " f4:bd:b1:32:dc:bc:46:48:5d:1a:18:d8:e8:0b:8c:fe:7b:51:\n" + " d9:dd:b9:e3:4b:d1:f9:e0:22:46:dd:37:5b:b2:cb:72:8e:9c:\n" + " 4b:da:67:df:fd:ce:86:49:21:31:4e:99:b6:d4:38:0b:14:5d:\n" + " ad:97:ba:8f:e2:08:15:85:73:eb:4a:7d:01:49:af:63:ae:2d:\n" + " e3:9d:0a:d7:11:c2:03:d3:15:21:97:be:3d:d2:ea:ab:cc:93:\n" + " 16:98:64:80:72:eb:c2:78:0a:09:69:c4:2b:5d:df:30:7b:be:\n" + " 9b:02:34:73:62:9f:95:b1:cf:08:e8:9e:57:a8:37:31:cf:2c:\n" + " 8c:18:b1:d5:7a:25:90:d6:b6:76:28:1b:e2:b1:cf:1b:f1:ef:\n" + " dd:2f:d3:07:af:81:e3:5f:fc:5a:e7:3c:a9:37:0d:9c:78:5b:\n" + " 58:dc:89:54:70:a4:5b:ff:9f:64:30:a3:85:12:32:69:a5:02:\n" + " 73:d9:1d:ff:69:1f:d4:97:8f:d0:a8:90:8c:dd:2e:45:a1:b1:\n" + " e3:8a:82:fc:fc:08:41:01:51:92:87:9a:09:7b:35:c3:cc:48:\n" + " 81:39:30:a9:f4:41:3b:06:a3:06:21:cc:4b:bc:1b:76:58:94:\n" + " d1:e4:22:70:7f:20:7e:7a:b4:fa:7f:e8:79:c1:8c:89:9e:e9:\n" + " e3:72:2a:43:72:47:9e:bb:26:ed:64:2c:c8:54:f7:b4:95:c2:\n" + " c4:e9:8b:df:d5:10:a7:ed:a5:7a:94:97:c4:76:45:e3:6c:c0:\n" + " 0e:a6:2a:76:d5:1d:2f:ad:99:32:c6:7b:f6:41:e0:65:37:0f:\n" + " c0:1f:c5:99:4a:75:fd:6c:e0:f1:f0:58:49:2d:81:10:ca:d8:\n" + " eb:2b:c3:9b:a9:d9:a9:f5:6c:6d:26:fd:b8:32:92:58:f4:65:\n" + " 0b:d1:8e:03:1e:d5:6a:95:d4:46:9e:65:dd:e5:85:36:e6:31:\n" + " 77:3a:1a:20:2b:07:b7:f1:9a:4e:8d:54:22:5a:54:1c:72:5c:\n" + " 1f:b4:1a:5b:21:ed:06:5a:9a:e5:3c:01:c9:9b:af:50:61:f2:\n" + " 29:6b:ec:6d:19:bb:2e:02:94:ca:36:71:ef:45:39:f1:a5:25:\n" + " 10:0e:90:bc:a7:b3:5b:ab:af:f1:19:88:6a:09:2f:1f:d0:24:\n" + " a8:62:ed:d9:1a:65:89:65:16:a5:55:de:33:e8:7a:81:66:72:\n" + " 91:17:5e:1d:22:72:f7:b8\n" "-----BEGIN CERTIFICATE-----\n" - "MIIGSDCCBDCgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwgboxCzAJBgNVBAYTAlVT\n" - "MRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2NvMRMw\n" - "EQYDVQQKDApMaW5kZW4gTGFiMSAwHgYDVQQLDBdTZWNvbmQgTGlmZSBFbmdpbmVl\n" - "cmluZzEhMB8GA1UEAwwYSW50ZWdyYXRpb24gVGVzdCBSb290IENBMSQwIgYJKoZI\n" - "hvcNAQkBFhVub3JlcGx5QGxpbmRlbmxhYi5jb20wHhcNMTgwNTIyMjIzOTA4WhcN\n" - "MjgwNTE5MjIzOTA4WjCBqjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3Ju\n" - "aWExEzARBgNVBAoMCkxpbmRlbiBMYWIxIDAeBgNVBAsMF1NlY29uZCBMaWZlIEVu\n" - "Z2luZWVyaW5nMSkwJwYDVQQDDCBJbnRlZ3JhdGlvbiBUZXN0IEludGVybWVkaWF0\n" - "ZSBDQTEkMCIGCSqGSIb3DQEJARYVbm9yZXBseUBsaW5kZW5sYWIuY29tMIICIjAN\n" - "BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAzqNw4sT7S5eQoTC7wRsTuap+Rhej\n" - "Jo1pP15zlehqsQq0j1Bl48ZcOSQ03wu3zM5iDDZaEiz+NUzpHKyAXiSZ16q9vkjA\n" - "YmR3NohmzvSo3dJ2JGKQVUH8HRNOp05XvKikWUssWhzYzBbe6IgwyZXfL6YUKA/r\n" - "NEYSWLraDubenBX29OOfdKpwiXmL6Vp7GFQVlDojCmV4BdkzkCrOFRgNUvxcMWUg\n" - "0BI3jBGAutSwgnMAS0m+y9a8581h8wCYmXRaN4FJln4UARuG0tAGlEBjY0YR/DNc\n" - "vTpe1OVER2RQvaaXVXBkmybM3iCCkGqDQZxvcUcUvstofIW+7y52EhnTyYcytKxg\n" - "IBYoLa+86AHGf/vYEdX0txS9Jwhbcr4J4JHInHu0sxLvMja+sbmit+NpRzB2upyb\n" - "GZlNU91c6CzxsmRpzxW9+LtYlXNYOJW0es+EKabC2/C975cm1Jms18e+sA0R9CaG\n" - "LXdCUiXXVsfjl7E2XJdx0Jv1tVCN+f/7EHc8tVNtoUM1qQMyBavX9dEZvV+SowAq\n" - "eTekdk/pMg3khrvqwxrFM+gW1KXY4Oi7wvAiFeLZjK6sfSu/66NMOykdlKyju226\n" - "bQORA89GEsRmIcXGZ9gRGXkBDm6EHHZvET3rlInFaiYfzeARi1HumTVp5X8LdyqU\n" - "5EtkuYMEMAXkouMCAwEAAaNmMGQwHQYDVR0OBBYEFIMh3uzAeQNtHoPz5Zcp1VrA\n" - "lkD6MB8GA1UdIwQYMBaAFIoixpwuEfNADM6CDCJZ//h/0LkTMBIGA1UdEwEB/wQI\n" - "MAYBAf8CAQAwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQCjbIWa\n" - "Lk5+XYNjD/VPqX3sDm+u17rfZOBGDj3aGBUs83PKgbEQ2VMUIX1yXJSIpZ2tq0VC\n" - "xmSp2S5OKUcssZUHt2JIaB9oExzSoPteOCRKggqHyZMgQ37p+XnvA6K9niRrCgFe\n" - "SjbFfXr+1qovwow4ipk8sGrlYL5W1utgA1UkQqAa+pEko1MVdV3I63weaFp+EzTj\n" - "hTccdj93ZxvtG1IX/Eqj4nSEgCxp/N19JpfEKml9nNxhl3Appz8rWysiUf3+al35\n" - "5xRIty3IM1j88l8n9yYWvr61qqJkUzxp6LVh66uRpbQJm/aYuFxbJC+T9SucjFj7\n" - "Jj9nU9dCZOh5d3NBTuMCOQu2aJeLhOgdg6gV8QZGR4BCXhTiYYp2hNXUcX9O/9l0\n" - "h/8yxYcgCtRZQD7YF+/aZekKUf4ew0aR0u7kI1eXh9SmpevvgWrYjNYfjrEYTGuJ\n" - "MlVTaCaeuwO+LOmL/5ecHKwow58Lt5MjJDFj5BkT8rsIcbfFxcQQ/9z8M1SkXuyj\n" - "/gqAypy8lW9fOZE7YWkWlA9XS/xLsb5ymF0Q+Qin1uDoPV1UffpLat2YQe2EoTln\n" - "XGx/DLDhmMEU7f4e6AWNf2okyxsFQg1/E7rKtZHbpfBAK3B6KqVd7VYM8MJy7mPd\n" - "y1129gjm5jDvOrIWNEGk4TAUvMf5IzoacN+4zA==\n" + "MIIHNjCCBR6gAwIBAgIJAIW7S2Ym25rGMA0GCSqGSIb3DQEBCwUAMIG6MQswCQYD\n" + "VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5j\n" + "aXNjbzETMBEGA1UECgwKTGluZGVuIExhYjEgMB4GA1UECwwXU2Vjb25kIExpZmUg\n" + "RW5naW5lZXJpbmcxITAfBgNVBAMMGEludGVncmF0aW9uIFRlc3QgUm9vdCBDQTEk\n" + "MCIGCSqGSIb3DQEJARYVbm9yZXBseUBsaW5kZW5sYWIuY29tMB4XDTI0MDcyMzEx\n" + "NDYzM1oXDTM0MDcyMTExNDYzM1owgcIxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApD\n" + "YWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2NvMRMwEQYDVQQKDApMaW5k\n" + "ZW4gTGFiMSAwHgYDVQQLDBdTZWNvbmQgTGlmZSBFbmdpbmVlcmluZzEpMCcGA1UE\n" + "AwwgSW50ZWdyYXRpb24gVGVzdCBJbnRlcm1lZGlhdGUgQ0ExJDAiBgkqhkiG9w0B\n" + "CQEWFW5vcmVwbHlAbGluZGVubGFiLmNvbTCCAiIwDQYJKoZIhvcNAQEBBQADggIP\n" + "ADCCAgoCggIBAL730svkXEZ74hEiiXLad3LsBYcZ93cH/WfXrxPVdhKS3WlNIkew\n" + "PZSKapWFNLh4w51jMrFLCrYOBXurBiP8DSG1/MZqWja+bvzHR5ejGC4zzQ6KdSu3\n" + "KeloSpBTRdtz/7PlwdRr3Tqx71OfI+nGh85nufuk1XYhA8vFcmvFpgdV+0eQ6JI4\n" + "cxQRjv8huTVkWmHH/B/kTUflA8wLw2lmcYQMGC9hfzTd8pHjt52ouNs/bm+W+jQG\n" + "ggTIGMzei38mtUhT+/sVew44YP7aIZiNcweya/2tIVnnhGbhBBYcvhM0KEMsCT3k\n" + "dyqkrW35JgT3Q3Ob2eoaQ2q024j4+b00+Kboequ0suEpR6a6uGWcxrOvE0M47yoF\n" + "d5+P8AxWIcKS0izDMlDRYq5R/JnmuDj4gx2NQBHgHVFdP/pVYbYYCR5xr5VknOrG\n" + "EWTwqAJ9u8hULldIMnxRZg3WPg7tXjCopkcDZFyJIUWQ4UyRvL2BbnOpFCfmDW04\n" + "3FCdslZmYGxmuV27jJYtiV4NK+24AzHOCv+CA/WyO+Un3mHYj7+iamSwSocjQCij\n" + "8eyWUM2DUC14cZLyiHWwnc0L5GKmpWMR/LS6n8ZnQCytpO+U8PmguuFSLifZax2C\n" + "I+08CwvSvBS+bbFprT4lOmbS0a+fiEUla26+H6Dnsp9tJJQN9MJ1+R9dAgMBAAGj\n" + "ggEzMIIBLzAPBgNVHRMECDAGAQH/AgEAMAsGA1UdDwQEAwIBhjAdBgNVHQ4EFgQU\n" + "VpjcRSUR4owr6tbG4si+LMhp//8wge8GA1UdIwSB5zCB5IAUTX2uDaVeIlpqjxlh\n" + "VLNYy3vAvdqhgcCkgb0wgboxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9y\n" + "bmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2NvMRMwEQYDVQQKDApMaW5kZW4gTGFi\n" + "MSAwHgYDVQQLDBdTZWNvbmQgTGlmZSBFbmdpbmVlcmluZzEhMB8GA1UEAwwYSW50\n" + "ZWdyYXRpb24gVGVzdCBSb290IENBMSQwIgYJKoZIhvcNAQkBFhVub3JlcGx5QGxp\n" + "bmRlbmxhYi5jb22CCQDvVNj32hjoGTANBgkqhkiG9w0BAQsFAAOCAgEArtAwrDFJ\n" + "IIYLNAFYCJRozDic9xNcRhkz7VRe5EPzWTNcUNmJi+51Z6jHDtEwwk6jLqhkLWqo\n" + "9L2xMty8RkhdGhjY6AuM/ntR2d2540vR+eAiRt03W7LLco6cS9pn3/3OhkkhMU6Z\n" + "ttQ4CxRdrZe6j+IIFYVz60p9AUmvY64t450K1xHCA9MVIZe+PdLqq8yTFphkgHLr\n" + "wngKCWnEK13fMHu+mwI0c2KflbHPCOieV6g3Mc8sjBix1XolkNa2digb4rHPG/Hv\n" + "3S/TB6+B41/8Wuc8qTcNnHhbWNyJVHCkW/+fZDCjhRIyaaUCc9kd/2kf1JeP0KiQ\n" + "jN0uRaGx44qC/PwIQQFRkoeaCXs1w8xIgTkwqfRBOwajBiHMS7wbdliU0eQicH8g\n" + "fnq0+n/oecGMiZ7p43IqQ3JHnrsm7WQsyFT3tJXCxOmL39UQp+2lepSXxHZF42zA\n" + "DqYqdtUdL62ZMsZ79kHgZTcPwB/FmUp1/Wzg8fBYSS2BEMrY6yvDm6nZqfVsbSb9\n" + "uDKSWPRlC9GOAx7VapXURp5l3eWFNuYxdzoaICsHt/GaTo1UIlpUHHJcH7QaWyHt\n" + "Blqa5TwByZuvUGHyKWvsbRm7LgKUyjZx70U58aUlEA6QvKezW6uv8RmIagkvH9Ak\n" + "qGLt2RpliWUWpVXeM+h6gWZykRdeHSJy97g=\n" "-----END CERTIFICATE-----\n" - ); + ); const std::string mPemChildCert( "Certificate:\n" " Data:\n" " Version: 3 (0x2)\n" - " Serial Number: 4096 (0x1000)\n" + " Serial Number: 9e:8d:34:13:e7:9b:f9:31\n" " Signature Algorithm: sha256WithRSAEncryption\n" - " Issuer: C=US, ST=California, O=Linden Lab, OU=Second Life Engineering, CN=Integration Test Intermediate CA/emailAddress=noreply@lindenlab.com\n" + " Issuer: C=US, ST=California, L=San Francisco, O=Linden Lab, OU=Second Life Engineering, CN=Integration Test Intermediate CA/emailAddress=noreply@lindenlab.com\n" " Validity\n" - " Not Before: May 22 22:58:15 2018 GMT\n" - " Not After : Jul 19 22:58:15 2024 GMT\n" + " Not Before: Jul 23 11:46:39 2024 GMT\n" + " Not After : Jul 21 11:46:39 2034 GMT\n" " Subject: C=US, ST=California, L=San Francisco, O=Linden Lab, OU=Second Life Engineering, CN=Integration Test Server Cert/emailAddress=noreply@lindenlab.com\n" " Subject Public Key Info:\n" " Public Key Algorithm: rsaEncryption\n" - " Public-Key: (2048 bit)\n" + " Public-Key: (4096 bit)\n" " Modulus:\n" - " 00:bf:a1:1c:76:82:4a:10:1d:25:0e:02:e2:7a:64:\n" - " 54:c7:94:c5:c0:98:d5:35:f3:cb:cb:30:ba:31:9c:\n" - " bd:4c:2f:4a:4e:24:03:4b:87:5c:c1:5c:fe:d9:89:\n" - " 3b:cb:01:bc:eb:a5:b7:78:dc:b3:58:e5:78:a7:15:\n" - " 34:50:30:aa:16:3a:b2:94:17:6d:1e:7f:b2:70:1e:\n" - " 96:41:bb:1d:e3:22:80:fa:dc:00:6a:fb:34:3e:67:\n" - " e7:c2:21:2f:1b:d3:af:04:49:91:eb:bb:60:e0:26:\n" - " 52:75:28:8a:08:5b:91:56:4e:51:50:40:51:70:af:\n" - " cb:80:66:c8:59:e9:e2:48:a8:62:d0:26:67:80:0a:\n" - " 12:16:d1:f6:15:9e:1f:f5:92:37:f3:c9:2f:03:9e:\n" - " 22:f6:60:5a:76:45:8c:01:2c:99:54:72:19:db:b7:\n" - " 72:e6:5a:69:f3:e9:31:65:5d:0f:c7:5c:9c:17:29:\n" - " 71:14:7f:db:47:c9:1e:65:a2:41:b0:2f:14:17:ec:\n" - " 4b:25:f2:43:8f:b4:a3:8d:37:1a:07:34:b3:29:bb:\n" - " 8a:44:8e:84:08:a2:1b:76:7a:cb:c2:39:2f:6e:e3:\n" - " fc:d6:91:b5:1f:ce:58:91:57:70:35:6e:25:a9:48:\n" - " 0e:07:cf:4e:dd:16:42:65:cf:8a:42:b3:27:e6:fe:\n" - " 6a:e3\n" + " 00:d8:ac:0c:27:8f:ea:c0:4d:21:e4:75:55:31:57:\n" + " 83:46:47:14:1e:f5:67:ae:98:60:c4:97:6d:e8:53:\n" + " f2:4d:3b:ec:6f:08:bc:1e:c0:e2:a6:75:b5:90:1d:\n" + " 30:a2:59:68:32:10:2b:29:67:fc:99:f1:24:6a:36:\n" + " 73:60:31:6b:c7:a0:b8:b0:38:60:b1:59:23:2c:ab:\n" + " 25:a2:c8:b0:bc:2c:c6:d7:4c:87:37:1b:5e:51:a4:\n" + " 63:3e:c4:6d:ed:da:5e:d3:ad:8a:6d:52:e4:87:38:\n" + " 33:76:cf:f2:86:58:b3:10:a4:91:8d:3d:4f:27:9a:\n" + " 8b:b4:d7:67:90:31:1c:f5:7f:78:af:6f:f2:dd:39:\n" + " d0:16:16:7b:46:ad:88:1b:3b:74:6b:10:29:8b:64:\n" + " ba:ed:9f:a7:69:99:55:8f:73:0d:18:a3:7f:40:20:\n" + " 3a:41:4a:94:39:62:8b:fe:c6:9d:79:d0:cd:1c:e2:\n" + " d4:74:bb:43:75:eb:86:8b:30:c1:8d:cc:14:ab:75:\n" + " 2e:f5:3e:0c:05:cb:e4:c3:92:d8:81:8c:df:a5:4e:\n" + " 2e:0b:ae:17:15:9b:e6:dd:9e:16:46:42:27:92:8a:\n" + " 0e:3a:74:1e:d1:3f:ee:7e:a5:d7:ec:1c:63:d4:96:\n" + " 5b:36:f9:15:ee:da:66:ac:5e:de:91:d9:08:24:fb:\n" + " 5d:fc:9b:77:dd:ff:20:a6:67:6f:48:41:5e:5a:ac:\n" + " 13:a4:2c:2a:f2:a3:15:86:e2:84:33:34:e3:91:27:\n" + " 8b:37:ba:b0:c7:5e:1a:0d:b9:f2:4e:0c:55:e6:bb:\n" + " d9:63:f5:05:7b:aa:19:e5:57:ce:a5:b1:46:4b:b3:\n" + " 04:f6:a0:97:26:ed:48:ed:97:93:a6:75:b1:a3:42:\n" + " fc:cc:57:89:da:44:e9:16:a6:30:2c:01:8e:f2:ed:\n" + " be:45:05:08:8a:af:1e:07:51:89:cf:51:4c:aa:f3:\n" + " b3:f0:6f:db:21:80:11:32:0a:23:e2:ff:cc:59:15:\n" + " eb:ff:d2:b8:d6:a1:c1:b4:96:12:82:bf:3f:68:ad:\n" + " c8:61:50:f8:88:4f:d0:be:8e:29:64:1a:16:a5:d9:\n" + " 29:76:16:cd:70:37:c4:f2:1f:4e:c6:57:36:dd:c1:\n" + " 27:19:72:ef:98:7e:34:25:3f:76:b1:ea:15:b2:38:\n" + " 6e:d3:43:03:7a:2b:78:91:9a:19:26:2a:31:b7:5e:\n" + " b7:22:c4:fd:bf:93:10:a4:23:3f:d7:79:53:28:5d:\n" + " 2e:ba:0c:b0:5e:0a:b4:c4:a1:71:75:88:1b:b2:0e:\n" + " 2c:67:08:7b:f0:f6:37:d3:aa:39:50:03:a3:7c:17:\n" + " 1d:52:52:2a:6b:d0:a2:54:2e:ba:11:bc:26:a9:16:\n" + " a6:1b:79\n" " Exponent: 65537 (0x10001)\n" " X509v3 extensions:\n" - " X509v3 Basic Constraints: \n" + " X509v3 Basic Constraints:\n" " CA:FALSE\n" - " Netscape Cert Type: \n" - " SSL Server\n" - " Netscape Comment: \n" - " OpenSSL Generated Server Certificate\n" - " X509v3 Subject Key Identifier: \n" - " BB:59:9F:DE:6B:51:A7:6C:B3:6D:5B:8B:42:F7:B1:65:77:17:A4:E4\n" - " X509v3 Authority Key Identifier: \n" - " keyid:83:21:DE:EC:C0:79:03:6D:1E:83:F3:E5:97:29:D5:5A:C0:96:40:FA\n" - " DirName:/C=US/ST=California/L=San Francisco/O=Linden Lab/OU=Second Life Engineering/CN=Integration Test Root CA/emailAddress=noreply@lindenlab.com\n" - " serial:10:00\n" - "\n" - " X509v3 Key Usage: critical\n" + " X509v3 Key Usage:\n" " Digital Signature, Key Encipherment\n" - " X509v3 Extended Key Usage: \n" + " X509v3 Extended Key Usage:\n" " TLS Web Server Authentication\n" + " X509v3 Subject Key Identifier:\n" + " 7B:1A:F9:2B:C4:B2:F6:AE:D6:F2:8E:B1:73:FB:DD:11:CA:DB:F8:87\n" + " X509v3 Authority Key Identifier:\n" + " keyid:56:98:DC:45:25:11:E2:8C:2B:EA:D6:C6:E2:C8:BE:2C:C8:69:FF:FF\n" + " DirName:/C=US/ST=California/L=San Francisco/O=Linden Lab/OU=Second Life Engineering/CN=Integration Test Root CA/emailAddress=noreply@lindenlab.com\n" + " serial:85:BB:4B:66:26:DB:9A:C6\n" " Signature Algorithm: sha256WithRSAEncryption\n" - " 18:a6:58:55:9b:d4:af:7d:8a:27:d3:28:3a:4c:4b:42:4e:f0:\n" - " 30:d6:d9:95:11:48:12:0a:96:40:d9:2b:21:39:c5:d4:8d:e5:\n" - " 10:bc:68:78:69:0b:9f:15:4a:0b:f1:ab:99:45:0c:20:5f:27:\n" - " df:e7:14:2d:4a:30:f2:c2:8d:37:73:36:1a:27:55:5a:08:5f:\n" - " 71:a1:5e:05:83:b2:59:fe:02:5e:d7:4a:30:15:23:58:04:cf:\n" - " 48:cc:b0:71:88:9c:6b:57:f0:04:0a:d3:a0:64:6b:ee:f3:5f:\n" - " ea:ac:e1:2b:b9:7f:79:b8:db:ce:72:48:72:db:c8:5c:38:72:\n" - " 31:55:d0:ff:6b:bd:73:23:a7:30:18:5d:ed:47:18:0a:67:8e:\n" - " 53:32:0e:99:9b:96:72:45:7f:c6:00:2c:5d:1a:97:53:75:3a:\n" - " 0b:49:3d:3a:00:37:14:67:0c:28:97:34:87:aa:c5:32:e4:ae:\n" - " 34:83:12:4a:10:f7:0e:74:d4:5f:73:bd:ef:0c:b7:d8:0a:7d:\n" - " 8e:8d:5a:48:bd:f4:8e:7b:f9:4a:15:3b:61:c9:5e:40:59:6e:\n" - " c7:a8:a4:02:28:72:c5:54:8c:77:f4:55:a7:86:c0:38:a0:68:\n" - " 19:da:0f:72:5a:a9:7e:69:9f:9c:3a:d6:66:aa:e1:f4:fd:f9:\n" - " b8:4b:6c:71:9e:f0:38:02:c7:6a:9e:dc:e6:fb:ef:23:59:4f:\n" - " 5c:84:0a:df:ea:86:1f:fd:0e:5c:fa:c4:e5:50:1c:10:cf:89:\n" - " 4e:08:0e:4c:4b:61:1a:49:12:f7:e9:4b:17:71:43:7b:6d:b6:\n" - " b5:9f:d4:3b:c7:88:53:48:63:b6:00:80:8f:49:0a:c5:7e:58:\n" - " ac:78:d8:b9:06:b0:bc:86:e2:2e:48:5b:c3:24:fa:aa:72:d8:\n" - " ec:f6:c7:91:9f:0f:c8:b5:fd:2b:b2:a7:bc:2f:40:20:2b:47:\n" - " e0:d1:1d:94:52:6f:6b:be:12:b6:8c:dc:11:db:71:e6:19:ef:\n" - " a8:71:8b:ad:d3:32:c0:1c:a4:3f:b3:0f:af:e5:50:e1:ff:41:\n" - " a4:b7:6f:57:71:af:fd:16:4c:e8:24:b3:99:1b:cf:12:8f:43:\n" - " 05:80:ba:18:19:0a:a5:ec:49:81:41:4c:7e:28:b2:21:f2:59:\n" - " 6e:4a:ed:de:f9:fa:99:85:60:1f:e6:c2:42:5c:08:00:3c:84:\n" - " 06:a9:24:d4:cf:7b:6e:1b:59:1d:f4:70:16:03:a1:e0:0b:00:\n" - " 95:5c:39:03:fc:9d:1c:8e:f7:59:0c:61:47:f6:7f:07:22:48:\n" - " 83:40:ac:e1:98:5f:c7:be:05:d5:29:2b:bf:0d:03:0e:e9:5e:\n" - " 2b:dd:09:18:fe:5e:30:61\n" + " ad:7c:50:12:24:62:62:83:e9:dd:81:1a:12:1c:6d:ae:1e:a6:\n" + " 01:cc:93:8b:ac:83:7c:3d:57:d7:7f:d2:13:40:82:c7:27:07:\n" + " 31:d8:c4:01:04:64:9c:dc:ae:7b:52:bd:f5:62:7a:d0:7c:13:\n" + " 1a:19:86:6a:ce:9a:ba:69:07:77:75:b6:67:56:d0:c3:8d:6f:\n" + " 59:5f:ac:31:83:32:2c:4f:8c:85:8c:f3:56:5b:e0:83:16:19:\n" + " c9:55:4d:56:2c:e0:06:f8:71:85:4b:7e:c6:20:b3:f6:5b:85:\n" + " 6a:b7:0f:0e:0c:75:38:6a:aa:53:cc:b0:bf:c1:fd:a1:01:8a:\n" + " 7e:5a:0b:4d:51:fc:1b:14:b0:8d:62:17:b7:5d:6a:64:30:80:\n" + " aa:50:9a:23:9e:19:46:11:9d:49:d1:35:81:87:80:8c:9c:71:\n" + " 61:26:07:23:5d:a7:ea:4e:0c:53:77:bd:eb:18:6d:63:8b:2c:\n" + " e1:83:bb:bb:f8:3e:7c:e8:0d:19:1e:be:35:aa:99:0f:c7:25:\n" + " 0c:a8:f9:74:02:c8:4c:8e:bb:13:18:fd:aa:21:34:bc:2d:9f:\n" + " 10:96:e2:99:e3:9a:d7:91:0e:1e:77:20:70:e9:b4:63:25:f8:\n" + " ea:14:1f:24:b0:6a:8b:2a:f4:61:b1:0d:7d:18:bc:1d:6d:04:\n" + " 11:b2:9f:a2:a7:55:be:2b:2c:2f:c1:d8:95:13:73:af:1c:96:\n" + " 49:30:9c:9c:94:81:6c:9b:a7:87:5c:cf:46:95:95:4a:6f:bf:\n" + " df:c9:3d:74:3e:24:6e:44:1e:14:8b:68:23:e4:00:b5:a5:b7:\n" + " 5b:a9:ea:16:5f:fa:b1:d3:1a:b1:9b:36:ef:a4:7a:6f:a3:b0:\n" + " 97:35:ac:70:c0:cc:8e:a2:d3:40:0e:c1:70:0b:d5:ce:cd:51:\n" + " 82:8a:40:72:04:8d:62:af:ba:a8:e7:a8:e9:b9:99:b7:5c:5d:\n" + " 27:96:b2:3d:f9:0d:26:8c:3f:db:ac:86:97:be:f1:2c:0b:ca:\n" + " 90:07:93:96:f4:75:c3:e8:4c:f6:a8:a2:3f:da:11:21:e7:b1:\n" + " 8c:62:36:ae:91:a9:2a:73:ba:67:f5:24:16:c3:ee:b7:b1:b4:\n" + " e3:8a:28:23:84:cf:38:c6:f0:8e:21:f6:b8:76:9a:6d:d1:e3:\n" + " 74:81:7a:22:20:a0:82:2a:31:8a:ba:44:0b:61:5a:aa:ba:c6:\n" + " 07:99:36:0a:24:06:2f:8e:c1:1c:4b:f0:65:72:fb:e9:b5:31:\n" + " 59:13:2c:c6:f8:5b:91:e2:d8:96:f3:1a:06:0b:2a:62:12:4d:\n" + " 5e:65:c9:e9:e4:00:99:a6:d3:60:1f:c3:d6:cc:a6:9b:a5:14:\n" + " 1b:4d:db:e7:3d:52:7e:2c\n" "-----BEGIN CERTIFICATE-----\n" - "MIIGbjCCBFagAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwgaoxCzAJBgNVBAYTAlVT\n" - "MRMwEQYDVQQIDApDYWxpZm9ybmlhMRMwEQYDVQQKDApMaW5kZW4gTGFiMSAwHgYD\n" - "VQQLDBdTZWNvbmQgTGlmZSBFbmdpbmVlcmluZzEpMCcGA1UEAwwgSW50ZWdyYXRp\n" - "b24gVGVzdCBJbnRlcm1lZGlhdGUgQ0ExJDAiBgkqhkiG9w0BCQEWFW5vcmVwbHlA\n" - "bGluZGVubGFiLmNvbTAeFw0xODA1MjIyMjU4MTVaFw0yNDA3MTkyMjU4MTVaMIG+\n" - "MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2Fu\n" - "IEZyYW5jaXNjbzETMBEGA1UECgwKTGluZGVuIExhYjEgMB4GA1UECwwXU2Vjb25k\n" - "IExpZmUgRW5naW5lZXJpbmcxJTAjBgNVBAMMHEludGVncmF0aW9uIFRlc3QgU2Vy\n" - "dmVyIENlcnQxJDAiBgkqhkiG9w0BCQEWFW5vcmVwbHlAbGluZGVubGFiLmNvbTCC\n" - "ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL+hHHaCShAdJQ4C4npkVMeU\n" - "xcCY1TXzy8swujGcvUwvSk4kA0uHXMFc/tmJO8sBvOult3jcs1jleKcVNFAwqhY6\n" - "spQXbR5/snAelkG7HeMigPrcAGr7ND5n58IhLxvTrwRJkeu7YOAmUnUoighbkVZO\n" - "UVBAUXCvy4BmyFnp4kioYtAmZ4AKEhbR9hWeH/WSN/PJLwOeIvZgWnZFjAEsmVRy\n" - "Gdu3cuZaafPpMWVdD8dcnBcpcRR/20fJHmWiQbAvFBfsSyXyQ4+0o403Ggc0sym7\n" - "ikSOhAiiG3Z6y8I5L27j/NaRtR/OWJFXcDVuJalIDgfPTt0WQmXPikKzJ+b+auMC\n" - "AwEAAaOCAYYwggGCMAkGA1UdEwQCMAAwEQYJYIZIAYb4QgEBBAQDAgZAMDMGCWCG\n" - "SAGG+EIBDQQmFiRPcGVuU1NMIEdlbmVyYXRlZCBTZXJ2ZXIgQ2VydGlmaWNhdGUw\n" - "HQYDVR0OBBYEFLtZn95rUadss21bi0L3sWV3F6TkMIHoBgNVHSMEgeAwgd2AFIMh\n" - "3uzAeQNtHoPz5Zcp1VrAlkD6oYHApIG9MIG6MQswCQYDVQQGEwJVUzETMBEGA1UE\n" - "CAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzETMBEGA1UECgwK\n" - "TGluZGVuIExhYjEgMB4GA1UECwwXU2Vjb25kIExpZmUgRW5naW5lZXJpbmcxITAf\n" - "BgNVBAMMGEludGVncmF0aW9uIFRlc3QgUm9vdCBDQTEkMCIGCSqGSIb3DQEJARYV\n" - "bm9yZXBseUBsaW5kZW5sYWIuY29tggIQADAOBgNVHQ8BAf8EBAMCBaAwEwYDVR0l\n" - "BAwwCgYIKwYBBQUHAwEwDQYJKoZIhvcNAQELBQADggIBABimWFWb1K99iifTKDpM\n" - "S0JO8DDW2ZURSBIKlkDZKyE5xdSN5RC8aHhpC58VSgvxq5lFDCBfJ9/nFC1KMPLC\n" - "jTdzNhonVVoIX3GhXgWDsln+Al7XSjAVI1gEz0jMsHGInGtX8AQK06Bka+7zX+qs\n" - "4Su5f3m4285ySHLbyFw4cjFV0P9rvXMjpzAYXe1HGApnjlMyDpmblnJFf8YALF0a\n" - "l1N1OgtJPToANxRnDCiXNIeqxTLkrjSDEkoQ9w501F9zve8Mt9gKfY6NWki99I57\n" - "+UoVO2HJXkBZbseopAIocsVUjHf0VaeGwDigaBnaD3JaqX5pn5w61maq4fT9+bhL\n" - "bHGe8DgCx2qe3Ob77yNZT1yECt/qhh/9Dlz6xOVQHBDPiU4IDkxLYRpJEvfpSxdx\n" - "Q3tttrWf1DvHiFNIY7YAgI9JCsV+WKx42LkGsLyG4i5IW8Mk+qpy2Oz2x5GfD8i1\n" - "/Suyp7wvQCArR+DRHZRSb2u+EraM3BHbceYZ76hxi63TMsAcpD+zD6/lUOH/QaS3\n" - "b1dxr/0WTOgks5kbzxKPQwWAuhgZCqXsSYFBTH4osiHyWW5K7d75+pmFYB/mwkJc\n" - "CAA8hAapJNTPe24bWR30cBYDoeALAJVcOQP8nRyO91kMYUf2fwciSINArOGYX8e+\n" - "BdUpK78NAw7pXivdCRj+XjBh\n" + "MIIHSTCCBTGgAwIBAgIJAJ6NNBPnm/kxMA0GCSqGSIb3DQEBCwUAMIHCMQswCQYD\n" + "VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5j\n" + "aXNjbzETMBEGA1UECgwKTGluZGVuIExhYjEgMB4GA1UECwwXU2Vjb25kIExpZmUg\n" + "RW5naW5lZXJpbmcxKTAnBgNVBAMMIEludGVncmF0aW9uIFRlc3QgSW50ZXJtZWRp\n" + "YXRlIENBMSQwIgYJKoZIhvcNAQkBFhVub3JlcGx5QGxpbmRlbmxhYi5jb20wHhcN\n" + "MjQwNzIzMTE0NjM5WhcNMzQwNzIxMTE0NjM5WjCBvjELMAkGA1UEBhMCVVMxEzAR\n" + "BgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xEzARBgNV\n" + "BAoMCkxpbmRlbiBMYWIxIDAeBgNVBAsMF1NlY29uZCBMaWZlIEVuZ2luZWVyaW5n\n" + "MSUwIwYDVQQDDBxJbnRlZ3JhdGlvbiBUZXN0IFNlcnZlciBDZXJ0MSQwIgYJKoZI\n" + "hvcNAQkBFhVub3JlcGx5QGxpbmRlbmxhYi5jb20wggIiMA0GCSqGSIb3DQEBAQUA\n" + "A4ICDwAwggIKAoICAQDYrAwnj+rATSHkdVUxV4NGRxQe9WeumGDEl23oU/JNO+xv\n" + "CLwewOKmdbWQHTCiWWgyECspZ/yZ8SRqNnNgMWvHoLiwOGCxWSMsqyWiyLC8LMbX\n" + "TIc3G15RpGM+xG3t2l7TrYptUuSHODN2z/KGWLMQpJGNPU8nmou012eQMRz1f3iv\n" + "b/LdOdAWFntGrYgbO3RrECmLZLrtn6dpmVWPcw0Yo39AIDpBSpQ5Yov+xp150M0c\n" + "4tR0u0N164aLMMGNzBSrdS71PgwFy+TDktiBjN+lTi4LrhcVm+bdnhZGQieSig46\n" + "dB7RP+5+pdfsHGPUlls2+RXu2masXt6R2Qgk+138m3fd/yCmZ29IQV5arBOkLCry\n" + "oxWG4oQzNOORJ4s3urDHXhoNufJODFXmu9lj9QV7qhnlV86lsUZLswT2oJcm7Ujt\n" + "l5OmdbGjQvzMV4naROkWpjAsAY7y7b5FBQiKrx4HUYnPUUyq87Pwb9shgBEyCiPi\n" + "/8xZFev/0rjWocG0lhKCvz9orchhUPiIT9C+jilkGhal2Sl2Fs1wN8TyH07GVzbd\n" + "wScZcu+YfjQlP3ax6hWyOG7TQwN6K3iRmhkmKjG3XrcixP2/kxCkIz/XeVMoXS66\n" + "DLBeCrTEoXF1iBuyDixnCHvw9jfTqjlQA6N8Fx1SUipr0KJULroRvCapFqYbeQID\n" + "AQABo4IBQjCCAT4wCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwEwYDVR0lBAwwCgYI\n" + "KwYBBQUHAwEwHQYDVR0OBBYEFHsa+SvEsvau1vKOsXP73RHK2/iHMIHvBgNVHSME\n" + "gecwgeSAFFaY3EUlEeKMK+rWxuLIvizIaf//oYHApIG9MIG6MQswCQYDVQQGEwJV\n" + "UzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzET\n" + "MBEGA1UECgwKTGluZGVuIExhYjEgMB4GA1UECwwXU2Vjb25kIExpZmUgRW5naW5l\n" + "ZXJpbmcxITAfBgNVBAMMGEludGVncmF0aW9uIFRlc3QgUm9vdCBDQTEkMCIGCSqG\n" + "SIb3DQEJARYVbm9yZXBseUBsaW5kZW5sYWIuY29tggkAhbtLZibbmsYwDQYJKoZI\n" + "hvcNAQELBQADggIBAK18UBIkYmKD6d2BGhIcba4epgHMk4usg3w9V9d/0hNAgscn\n" + "BzHYxAEEZJzcrntSvfVietB8ExoZhmrOmrppB3d1tmdW0MONb1lfrDGDMixPjIWM\n" + "81Zb4IMWGclVTVYs4Ab4cYVLfsYgs/ZbhWq3Dw4MdThqqlPMsL/B/aEBin5aC01R\n" + "/BsUsI1iF7ddamQwgKpQmiOeGUYRnUnRNYGHgIyccWEmByNdp+pODFN3vesYbWOL\n" + "LOGDu7v4PnzoDRkevjWqmQ/HJQyo+XQCyEyOuxMY/aohNLwtnxCW4pnjmteRDh53\n" + "IHDptGMl+OoUHySwaosq9GGxDX0YvB1tBBGyn6KnVb4rLC/B2JUTc68clkkwnJyU\n" + "gWybp4dcz0aVlUpvv9/JPXQ+JG5EHhSLaCPkALWlt1up6hZf+rHTGrGbNu+kem+j\n" + "sJc1rHDAzI6i00AOwXAL1c7NUYKKQHIEjWKvuqjnqOm5mbdcXSeWsj35DSaMP9us\n" + "hpe+8SwLypAHk5b0dcPoTPaooj/aESHnsYxiNq6RqSpzumf1JBbD7rextOOKKCOE\n" + "zzjG8I4h9rh2mm3R43SBeiIgoIIqMYq6RAthWqq6xgeZNgokBi+OwRxL8GVy++m1\n" + "MVkTLMb4W5Hi2JbzGgYLKmISTV5lyenkAJmm02Afw9bMppulFBtN2+c9Un4s\n" "-----END CERTIFICATE-----\n" - ); + ); + // Test wrapper declaration : wrapping nothing for the moment struct sechandler_basic_test @@ -701,14 +722,13 @@ namespace tut //std::ostringstream llsd_value; //llsd_value << LLSDOStreamer<LLSDNotationFormatter>(llsd_cert) << std::endl; LL_DEBUGS() << "test 1 cert " << llsd_cert << LL_ENDL; - ensure_equals("Issuer Name/commonName", (std::string)llsd_cert["issuer_name"]["commonName"], "Integration Test Intermediate CA"); ensure_equals("Issuer Name/countryName", (std::string)llsd_cert["issuer_name"]["countryName"], "US"); ensure_equals("Issuer Name/state", (std::string)llsd_cert["issuer_name"]["stateOrProvinceName"], "California"); ensure_equals("Issuer Name/org name", (std::string)llsd_cert["issuer_name"]["organizationName"], "Linden Lab"); ensure_equals("Issuer Name/org unit", (std::string)llsd_cert["issuer_name"]["organizationalUnitName"], "Second Life Engineering"); ensure_equals("Issuer name string", (std::string)llsd_cert["issuer_name_string"], - "emailAddress=noreply@lindenlab.com,CN=Integration Test Intermediate CA,OU=Second Life Engineering,O=Linden Lab,ST=California,C=US"); + "emailAddress=noreply@lindenlab.com,CN=Integration Test Intermediate CA,OU=Second Life Engineering,O=Linden Lab,L=San Francisco,ST=California,C=US"); ensure_equals("subject Name/commonName", (std::string)llsd_cert["subject_name"]["commonName"], "Integration Test Server Cert"); ensure_equals("subject Name/countryName", (std::string)llsd_cert["subject_name"]["countryName"], "US"); @@ -721,9 +741,9 @@ namespace tut ensure_equals("subject name string", (std::string)llsd_cert["subject_name_string"], "emailAddress=noreply@lindenlab.com,CN=Integration Test Server Cert,OU=Second Life Engineering,O=Linden Lab,L=San Francisco,ST=California,C=US"); - ensure_equals("serial number", (std::string)llsd_cert["serial_number"], "1000"); - ensure_equals("valid from", (std::string)llsd_cert["valid_from"], "2018-05-22T22:58:15Z"); - ensure_equals("valid to", (std::string)llsd_cert["valid_to"], "2024-07-19T22:58:15Z"); + ensure_equals("serial number", (std::string)llsd_cert["serial_number"], "9E8D3413E79BF931"); + ensure_equals("valid from", (std::string)llsd_cert["valid_from"], "2024-07-23T11:46:39Z"); + ensure_equals("valid to", (std::string)llsd_cert["valid_to"], "2034-07-21T11:46:39Z"); LLSD expectedKeyUsage = LLSD::emptyArray(); expectedKeyUsage.append(LLSD((std::string)"digitalSignature")); expectedKeyUsage.append(LLSD((std::string)"keyEncipherment")); @@ -1042,7 +1062,7 @@ namespace tut //validate find LLSD find_info = LLSD::emptyMap(); - find_info["subjectKeyIdentifier"] = "bb:59:9f:de:6b:51:a7:6c:b3:6d:5b:8b:42:f7:b1:65:77:17:a4:e4"; + find_info["subjectKeyIdentifier"] = "7b:1a:f9:2b:c4:b2:f6:ae:d6:f2:8e:b1:73:fb:dd:11:ca:db:f8:87"; LLBasicCertificateVector::iterator found_cert = test_vector->find(find_info); ensure("found some cert", found_cert != test_vector->end()); X509* found_x509 = (*found_cert).get()->getOpenSSLX509(); @@ -1225,7 +1245,7 @@ namespace tut X509_STORE_CTX_set0_untrusted(test_store, NULL); test_chain = new LLBasicCertificateChain(test_store); X509_STORE_CTX_free(test_store); - ensure_equals("two elements in store", test_chain->size(), 1); + ensure_equals("two elements in store [1]", test_chain->size(), 1); X509* test_cert = (*test_chain)[0]->getOpenSSLX509(); ensure("validate first element in store is expected cert", !X509_cmp(test_cert, mX509ChildCert)); X509_free(test_cert); @@ -1238,7 +1258,7 @@ namespace tut sk_X509_push(X509_STORE_CTX_get0_untrusted(test_store), mX509IntermediateCert); test_chain = new LLBasicCertificateChain(test_store); X509_STORE_CTX_free(test_store); - ensure_equals("two elements in store", test_chain->size(), 2); + ensure_equals("two elements in store [2]", test_chain->size(), 2); test_cert = (*test_chain)[0]->getOpenSSLX509(); ensure("validate first element in store is expected cert", !X509_cmp(test_cert, mX509ChildCert)); X509_free(test_cert); @@ -1254,7 +1274,7 @@ namespace tut sk_X509_push(X509_STORE_CTX_get0_untrusted(test_store), mX509TestCert); test_chain = new LLBasicCertificateChain(test_store); X509_STORE_CTX_free(test_store); - ensure_equals("two elements in store", test_chain->size(), 1); + ensure_equals("two elements in store [3]", test_chain->size(), 1); test_cert = (*test_chain)[0]->getOpenSSLX509(); ensure("validate first element in store is expected cert", !X509_cmp(test_cert, mX509ChildCert)); X509_free(test_cert); @@ -1267,7 +1287,7 @@ namespace tut sk_X509_push(X509_STORE_CTX_get0_untrusted(test_store), mX509TestCert); test_chain = new LLBasicCertificateChain(test_store); X509_STORE_CTX_free(test_store); - ensure_equals("two elements in store", test_chain->size(), 2); + ensure_equals("two elements in store [4]", test_chain->size(), 2); test_cert = (*test_chain)[0]->getOpenSSLX509(); ensure("validate first element in store is expected cert", !X509_cmp(test_cert, mX509ChildCert)); X509_free(test_cert); diff --git a/indra/newview/viewer_manifest.py b/indra/newview/viewer_manifest.py index 695a9e6634..f69d690cea 100755 --- a/indra/newview/viewer_manifest.py +++ b/indra/newview/viewer_manifest.py @@ -549,6 +549,12 @@ class Windows_x86_64_Manifest(ViewerManifest): # Get shared libs from the shared libs staging directory with self.prefix(src=os.path.join(self.args['build'], os.pardir, 'sharedlibs', self.args['buildtype'])): + # WebRTC libraries + for libfile in ( + 'llwebrtc.dll', + ): + self.path(libfile) + # Get fmodstudio dll if needed if self.args['fmodstudio'] == 'ON': if(self.args['buildtype'].lower() == 'debug'): @@ -992,6 +998,20 @@ class Darwin_x86_64_Manifest(ViewerManifest): print("Skipping %s" % dst) return added + # WebRTC libraries + with self.prefix(src=os.path.join(self.args['build'], os.pardir, + 'sharedlibs', self.args['buildtype'], 'Resources')): + for libfile in ( + 'libllwebrtc.dylib', + ): + self.path(libfile) + + oldpath = os.path.join("@rpath", libfile) + self.run_command( + ['install_name_tool', '-change', oldpath, + '@executable_path/../Resources/%s' % libfile, + executable]) + # dylibs is a list of all the .dylib files we expect to need # in our bundled sub-apps. For each of these we'll create a # symlink from sub-app/Contents/Resources to the real .dylib. diff --git a/scripts/messages/message_template.msg.sha1 b/scripts/messages/message_template.msg.sha1 index 5ad85458e9..47291dcd66 100755 --- a/scripts/messages/message_template.msg.sha1 +++ b/scripts/messages/message_template.msg.sha1 @@ -1 +1 @@ -e3bd0529a647d938ab6d48f26d21dd52c07ebc6e
\ No newline at end of file +dd15c52581b3fe99e072b26872deba2560893fc4 |