diff --git a/autobuild.xml b/autobuild.xml index 6309ae21523..d582c2b3d0f 100644 --- a/autobuild.xml +++ b/autobuild.xml @@ -1288,53 +1288,21 @@ platforms - darwin64 - - archive - - creds - github - hash - 7facda95e2f00c260513f3d4db42588fa8ba703c - hash_algorithm - sha1 - url - https://api.github.com/repos/secondlife/llphysicsextensions_source/releases/assets/196289774 - - name - darwin64 - - linux64 - - archive - - creds - github - hash - 01d08f13c7bc8d1b95b0330fa6833b7d8274e4d0 - hash_algorithm - sha1 - url - https://api.github.com/repos/secondlife/llphysicsextensions_source/releases/assets/196289775 - - name - linux64 - - windows64 + common archive creds github hash - 6d00345c7d3471bc5f7c1218e014dd0f1a2c069b + fff82c79edb900c547c40dca9a0e3ebac5a8c7da hash_algorithm sha1 url - https://api.github.com/repos/secondlife/llphysicsextensions_source/releases/assets/196289778 + https://api.github.com/repos/secondlife/llphysicsextensions_source/releases/assets/299858950 name - windows64 + common license @@ -2428,11 +2396,11 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors archive hash - 6314fdcee81a3538a7d960178ade66301c2fa002 + b87d3aaae14ca27350fac06e074b688202181a7d86cb7f0a2d551ceeb42473cb5e800bcbf64a83e4207297e7b65ca2c8759505f0a0e6a9a708d7cda752b49494 hash_algorithm - sha1 + blake2b url - https://github.com/secondlife/3p-webrtc-build/releases/download/m114.5735.08.73-alpha/webrtc-m114.5735.08.73-alpha.11958809572-darwin64-11958809572.tar.zst + https://github.com/AlchemyViewer/3p-webrtc/releases/download/m137.7151.04.20-r8/webrtc-m137.7151.04.20-r8.18457594440-darwin64-18457594440.tar.zst name darwin64 @@ -2442,11 +2410,11 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors archive hash - 95d7730a3d6955697e043f3fdf20ebdcc0c71fc0 + 9e7499473f298a1a1a93a4cd829655488f5f8f363fdc35dbd6fb0b9319b6a85427f38e8368cb06faf3195fc2153fcb1c78e6d8edc1f6174ad9c5c583b6660d20 hash_algorithm - sha1 + blake2b url - https://github.com/secondlife/3p-webrtc-build/releases/download/m114.5735.08.73-alpha/webrtc-m114.5735.08.73-alpha.11958809572-linux64-11958809572.tar.zst + https://github.com/AlchemyViewer/3p-webrtc/releases/download/m137.7151.04.20-r8/webrtc-m137.7151.04.20-r8.18457594440-linux64-18457594440.tar.zst name linux64 @@ -2456,11 +2424,11 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors archive hash - c7b329d6409576af6eb5b80655b007f52639c43b + 6533d3224108b71cba82eeeddbfd1065f1f5711aaa98c77ab73f8c67eea20c3ebab7527085ba417982670fbd74ca86fc92964aed8b6799d8e2285344eb5cb6ef hash_algorithm - sha1 + blake2b url - https://github.com/secondlife/3p-webrtc-build/releases/download/m114.5735.08.73-alpha/webrtc-m114.5735.08.73-alpha.11958809572-windows64-11958809572.tar.zst + https://github.com/AlchemyViewer/3p-webrtc/releases/download/m137.7151.04.20-r8/webrtc-m137.7151.04.20-r8.18457594440-windows64-18457594440.tar.zst name windows64 @@ -2473,7 +2441,7 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors copyright Copyright (c) 2011, The WebRTC project authors. All rights reserved. version - m114.5735.08.73-alpha.11958809572 + m137.7151.04.20-r8.18457594440 name webrtc canonical_repo diff --git a/build.sh b/build.sh index 302185a9bf5..e5725e897fb 100755 --- a/build.sh +++ b/build.sh @@ -160,7 +160,6 @@ pre_build() if [[ "$arch" == "Darwin" ]] then - HAVOK=OFF SIGNING=("-DENABLE_SIGNING:BOOL=YES" \ "-DSIGNING_IDENTITY:STRING=Developer ID Application: Linden Research, Inc.") fi @@ -289,8 +288,8 @@ build() done fi - # *TODO: Make this a build extension. - package_llphysicsextensions_tpv || fatal "failed building llphysicsextensions packages" + # *TODO: Make this a build extension. disabled for now + # package_llphysicsextensions_tpv || fatal "failed building llphysicsextensions packages" end_section "extensions $variant" else diff --git a/indra/cmake/LLPhysicsExtensions.cmake b/indra/cmake/LLPhysicsExtensions.cmake index 549bf4c07cd..162ae38fbad 100644 --- a/indra/cmake/LLPhysicsExtensions.cmake +++ b/indra/cmake/LLPhysicsExtensions.cmake @@ -22,7 +22,14 @@ if (HAVOK) include(Havok) use_prebuilt_binary(llphysicsextensions_source) set(LLPHYSICSEXTENSIONS_SRC_DIR ${LIBS_PREBUILT_DIR}/llphysicsextensions/src) - target_link_libraries( llphysicsextensions_impl INTERFACE llphysicsextensions) + if(DARWIN) + set(LLPHYSICSEXTENSIONS_STUB_DIR ${LIBS_PREBUILT_DIR}/llphysicsextensions/stub) + # can't set these library dependencies per-arch here, need to do it using XCODE_ATTRIBUTE_OTHER_LDFLAGS[arch=*] in newview/CMakeLists.txt + #target_link_libraries( llphysicsextensions_impl INTERFACE llphysicsextensions) + #target_link_libraries( llphysicsextensions_impl INTERFACE llphysicsextensionsstub) + else() + target_link_libraries( llphysicsextensions_impl INTERFACE llphysicsextensions) + endif() target_compile_definitions( llphysicsextensions_impl INTERFACE LL_HAVOK=1 ) elseif (HAVOK_TPV) use_prebuilt_binary(llphysicsextensions_tpv) diff --git a/indra/llrender/llrender.cpp b/indra/llrender/llrender.cpp index f68a6f8dd5d..b3d4b5edbd2 100644 --- a/indra/llrender/llrender.cpp +++ b/indra/llrender/llrender.cpp @@ -534,7 +534,6 @@ void LLTexUnit::setTextureFilteringOptionFast(LLTexUnit::eTextureFilterOptions o { F32 aniso_level = llclamp(LLRender::sAnisotropicFilteringLevel, 1.f, gGLManager.mMaxAnisotropy); glTexParameterf(sGLTextureType[tex_type], GL_TEXTURE_MAX_ANISOTROPY, aniso_level); - } else { diff --git a/indra/llwebrtc/CMakeLists.txt b/indra/llwebrtc/CMakeLists.txt index 0c3878131f6..d3302aa86bf 100644 --- a/indra/llwebrtc/CMakeLists.txt +++ b/indra/llwebrtc/CMakeLists.txt @@ -42,7 +42,7 @@ if (WINDOWS) iphlpapi libcmt) # as the webrtc libraries are release, build this binary as release as well. - target_compile_options(llwebrtc PRIVATE "/MT") + target_compile_options(llwebrtc PRIVATE "/MT" "/Zc:wchar_t") if (USE_BUGSPLAT) set_target_properties(llwebrtc PROPERTIES PDB_OUTPUT_DIRECTORY "${SYMBOLS_STAGING_DIR}") endif (USE_BUGSPLAT) diff --git a/indra/llwebrtc/llwebrtc.cpp b/indra/llwebrtc/llwebrtc.cpp index 2f0632e6573..7a2ff0ce8e2 100644 --- a/indra/llwebrtc/llwebrtc.cpp +++ b/indra/llwebrtc/llwebrtc.cpp @@ -9,7 +9,7 @@ * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; - * version 2.1 of the License only. + * version 2.1 of the License only * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -32,41 +32,79 @@ #include "api/audio_codecs/audio_encoder_factory.h" #include "api/audio_codecs/builtin_audio_decoder_factory.h" #include "api/audio_codecs/builtin_audio_encoder_factory.h" +#include "api/audio/builtin_audio_processing_builder.h" #include "api/media_stream_interface.h" #include "api/media_stream_track.h" #include "modules/audio_processing/audio_buffer.h" #include "modules/audio_mixer/audio_mixer_impl.h" +#include "api/environment/environment_factory.h" namespace llwebrtc { +#if WEBRTC_WIN +static int16_t PLAYOUT_DEVICE_DEFAULT = webrtc::AudioDeviceModule::kDefaultDevice; +static int16_t RECORD_DEVICE_DEFAULT = webrtc::AudioDeviceModule::kDefaultDevice; +#else +static int16_t PLAYOUT_DEVICE_DEFAULT = 0; +static int16_t RECORD_DEVICE_DEFAULT = 0; +#endif -static int16_t PLAYOUT_DEVICE_DEFAULT = -1; -static int16_t PLAYOUT_DEVICE_BAD = -2; -static int16_t RECORD_DEVICE_DEFAULT = -1; -static int16_t RECORD_DEVICE_BAD = -2; -LLAudioDeviceObserver::LLAudioDeviceObserver() : mSumVector {0}, mMicrophoneEnergy(0.0) {} +// +// LLWebRTCAudioTransport implementation +// -float LLAudioDeviceObserver::getMicrophoneEnergy() { return mMicrophoneEnergy; } +LLWebRTCAudioTransport::LLWebRTCAudioTransport() : mMicrophoneEnergy(0.0) +{ + memset(mSumVector, 0, sizeof(mSumVector)); +} -// TODO: Pull smoothing/filtering code into a common helper function -// for LLAudioDeviceObserver and LLCustomProcessor +void LLWebRTCAudioTransport::SetEngineTransport(webrtc::AudioTransport* t) +{ + engine_.store(t, std::memory_order_release); +} -void LLAudioDeviceObserver::OnCaptureData(const void *audio_samples, - const size_t num_samples, - const size_t bytes_per_sample, - const size_t num_channels, - const uint32_t samples_per_sec) +int32_t LLWebRTCAudioTransport::RecordedDataIsAvailable(const void* audio_data, + size_t number_of_frames, + size_t bytes_per_frame, + size_t number_of_channels, + uint32_t samples_per_sec, + uint32_t total_delay_ms, + int32_t clock_drift, + uint32_t current_mic_level, + bool key_pressed, + uint32_t& new_mic_level) { + auto* engine = engine_.load(std::memory_order_acquire); + + // 1) Deliver to engine (authoritative). + int32_t ret = 0; + if (engine) + { + ret = engine->RecordedDataIsAvailable(audio_data, + number_of_frames, + bytes_per_frame, + number_of_channels, + samples_per_sec, + total_delay_ms, + clock_drift, + current_mic_level, + key_pressed, + new_mic_level); + } + + // 2) Calculate energy for microphone level monitoring // calculate the energy float energy = 0; - const short *samples = (const short *) audio_samples; - for (size_t index = 0; index < num_samples * num_channels; index++) + const short *samples = (const short *) audio_data; + + for (size_t index = 0; index < number_of_frames * number_of_channels; index++) { float sample = (static_cast(samples[index]) / (float) 32767); energy += sample * sample; } - + float gain = mGain.load(std::memory_order_relaxed); + energy = energy * gain * gain; // smooth it. size_t buffer_size = sizeof(mSumVector) / sizeof(mSumVector[0]); float totalSum = 0; @@ -78,18 +116,59 @@ void LLAudioDeviceObserver::OnCaptureData(const void *audio_samples, } mSumVector[i] = energy; totalSum += energy; - mMicrophoneEnergy = std::sqrt(totalSum / (num_samples * buffer_size)); + mMicrophoneEnergy = std::sqrt(totalSum / (number_of_frames * number_of_channels * buffer_size)); + + return ret; +} + +int32_t LLWebRTCAudioTransport::NeedMorePlayData(size_t number_of_frames, + size_t bytes_per_frame, + size_t number_of_channels, + uint32_t samples_per_sec, + void* audio_data, + size_t& number_of_samples_out, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) +{ + auto* engine = engine_.load(std::memory_order_acquire); + if (!engine) + { + // No engine sink; output silence to be safe. + const size_t bytes = number_of_frames * bytes_per_frame * number_of_channels; + memset(audio_data, 0, bytes); + number_of_samples_out = bytes_per_frame; + return 0; + } + + // Only the engine should fill the buffer. + return engine->NeedMorePlayData(number_of_frames, + bytes_per_frame, + number_of_channels, + samples_per_sec, + audio_data, + number_of_samples_out, + elapsed_time_ms, + ntp_time_ms); } -void LLAudioDeviceObserver::OnRenderData(const void *audio_samples, - const size_t num_samples, - const size_t bytes_per_sample, - const size_t num_channels, - const uint32_t samples_per_sec) +void LLWebRTCAudioTransport::PullRenderData(int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames, + void* audio_data, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) { + auto* engine = engine_.load(std::memory_order_acquire); + + if (engine) + { + engine + ->PullRenderData(bits_per_sample, sample_rate, number_of_channels, number_of_frames, audio_data, elapsed_time_ms, ntp_time_ms); + } } -LLCustomProcessor::LLCustomProcessor() : mSampleRateHz(0), mNumChannels(0), mMicrophoneEnergy(0.0), mGain(1.0) +LLCustomProcessor::LLCustomProcessor(LLCustomProcessorStatePtr state) : mSampleRateHz(0), mNumChannels(0), mState(state) { memset(mSumVector, 0, sizeof(mSumVector)); } @@ -101,40 +180,61 @@ void LLCustomProcessor::Initialize(int sample_rate_hz, int num_channels) memset(mSumVector, 0, sizeof(mSumVector)); } -void LLCustomProcessor::Process(webrtc::AudioBuffer *audio_in) +void LLCustomProcessor::Process(webrtc::AudioBuffer *audio) { - webrtc::StreamConfig stream_config; - stream_config.set_sample_rate_hz(mSampleRateHz); - stream_config.set_num_channels(mNumChannels); - std::vector frame; - std::vector frame_samples; - - if (audio_in->num_channels() < 1 || audio_in->num_frames() < 480) + if (audio->num_channels() < 1 || audio->num_frames() < 480) { return; } - // grab the input audio - frame_samples.resize(stream_config.num_samples()); - frame.resize(stream_config.num_channels()); - for (size_t ch = 0; ch < stream_config.num_channels(); ++ch) + // calculate the energy + + float desired_gain = mState->getGain(); + if (mState->getDirty()) { - frame[ch] = &(frame_samples)[ch * stream_config.num_frames()]; + // We'll delay ramping by 30ms in order to clear out buffers that may + // have had content before muting. And for the last 20ms, we'll ramp + // down or up smoothly. + mRampFrames = 5; + + // we've changed our desired gain, so set the incremental + // gain change so that we smoothly step over 20ms + mGainStep = (desired_gain - mCurrentGain) / (mSampleRateHz / 50); } - audio_in->CopyTo(stream_config, &frame[0]); - - // calculate the energy - float energy = 0; - for (size_t index = 0; index < stream_config.num_samples(); index++) + if (mRampFrames) { - float sample = frame_samples[index]; - sample = sample * mGain; // apply gain - frame_samples[index] = sample; // write processed sample back to buffer. - energy += sample * sample; + if (mRampFrames-- > 2) + { + // don't change the gain if we're still in the 'don't move' phase + mGainStep = 0.0f; + } + } + else + { + // We've ramped all the way down, so don't step the gain any more and + // just maintaint he current gain. + mGainStep = 0.0f; + mCurrentGain = desired_gain; } - audio_in->CopyFrom(&frame[0], stream_config); + float energy = 0; + + auto chans = audio->channels(); + for (size_t ch = 0; ch < audio->num_channels(); ch++) + { + float* frame_samples = chans[ch]; + float gain = mCurrentGain; + for (size_t index = 0; index < audio->num_frames(); index++) + { + float sample = frame_samples[index]; + sample = sample * gain; // apply gain + frame_samples[index] = sample; // write processed sample back to buffer. + energy += sample * sample; + gain += mGainStep; + } + } + mCurrentGain += audio->num_frames() * mGainStep; // smooth it. size_t buffer_size = sizeof(mSumVector) / sizeof(mSumVector[0]); @@ -147,7 +247,7 @@ void LLCustomProcessor::Process(webrtc::AudioBuffer *audio_in) } mSumVector[i] = energy; totalSum += energy; - mMicrophoneEnergy = std::sqrt(totalSum / (stream_config.num_samples() * buffer_size)); + mState->setMicrophoneEnergy(std::sqrt(totalSum / (audio->num_channels() * audio->num_frames() * buffer_size))); } // @@ -159,99 +259,65 @@ LLWebRTCImpl::LLWebRTCImpl(LLWebRTCLogCallback* logCallback) : mPeerCustomProcessor(nullptr), mMute(true), mTuningMode(false), - mPlayoutDevice(0), - mRecordingDevice(0), - mTuningAudioDeviceObserver(nullptr) + mDevicesDeploying(0), + mGain(0.0f) { } void LLWebRTCImpl::init() { - mPlayoutDevice = 0; - mRecordingDevice = 0; - rtc::InitializeSSL(); + webrtc::InitializeSSL(); // Normal logging is rather spammy, so turn it off. - rtc::LogMessage::LogToDebug(rtc::LS_NONE); - rtc::LogMessage::SetLogToStderr(true); - rtc::LogMessage::AddLogToStream(mLogSink, rtc::LS_VERBOSE); + webrtc::LogMessage::LogToDebug(webrtc::LS_NONE); + webrtc::LogMessage::SetLogToStderr(true); + webrtc::LogMessage::AddLogToStream(mLogSink, webrtc::LS_VERBOSE); mTaskQueueFactory = webrtc::CreateDefaultTaskQueueFactory(); // Create the native threads. - mNetworkThread = rtc::Thread::CreateWithSocketServer(); + mNetworkThread = webrtc::Thread::CreateWithSocketServer(); mNetworkThread->SetName("WebRTCNetworkThread", nullptr); mNetworkThread->Start(); - mWorkerThread = rtc::Thread::Create(); + mWorkerThread = webrtc::Thread::Create(); mWorkerThread->SetName("WebRTCWorkerThread", nullptr); mWorkerThread->Start(); - mSignalingThread = rtc::Thread::Create(); + mSignalingThread = webrtc::Thread::Create(); mSignalingThread->SetName("WebRTCSignalingThread", nullptr); mSignalingThread->Start(); - mTuningAudioDeviceObserver = new LLAudioDeviceObserver; - mWorkerThread->PostTask( - [this]() - { - // Initialize the audio devices on the Worker Thread - mTuningDeviceModule = - webrtc::CreateAudioDeviceWithDataObserver(webrtc::AudioDeviceModule::AudioLayer::kPlatformDefaultAudio, - mTaskQueueFactory.get(), - std::unique_ptr(mTuningAudioDeviceObserver)); - - mTuningDeviceModule->Init(); - mTuningDeviceModule->SetPlayoutDevice(mPlayoutDevice); - mTuningDeviceModule->SetRecordingDevice(mRecordingDevice); - mTuningDeviceModule->EnableBuiltInAEC(false); - mTuningDeviceModule->SetAudioDeviceSink(this); - mTuningDeviceModule->InitMicrophone(); - mTuningDeviceModule->InitSpeaker(); - mTuningDeviceModule->SetStereoRecording(false); - mTuningDeviceModule->SetStereoPlayout(true); - mTuningDeviceModule->InitRecording(); - mTuningDeviceModule->InitPlayout(); - updateDevices(); - }); - mWorkerThread->BlockingCall( [this]() { - // the peer device module doesn't need an observer - // as we pull peer data after audio processing. - mPeerDeviceModule = webrtc::CreateAudioDeviceWithDataObserver(webrtc::AudioDeviceModule::AudioLayer::kPlatformDefaultAudio, - mTaskQueueFactory.get(), - nullptr); - mPeerDeviceModule->Init(); - mPeerDeviceModule->SetPlayoutDevice(mPlayoutDevice); - mPeerDeviceModule->SetRecordingDevice(mRecordingDevice); - mPeerDeviceModule->EnableBuiltInAEC(false); - mPeerDeviceModule->InitMicrophone(); - mPeerDeviceModule->InitSpeaker(); + webrtc::scoped_refptr realADM = + webrtc::AudioDeviceModule::Create(webrtc::AudioDeviceModule::AudioLayer::kPlatformDefaultAudio, mTaskQueueFactory.get()); + mDeviceModule = webrtc::make_ref_counted(realADM); + mDeviceModule->SetObserver(this); }); // The custom processor allows us to retrieve audio data (and levels) // from after other audio processing such as AEC, AGC, etc. - mPeerCustomProcessor = new LLCustomProcessor; - webrtc::AudioProcessingBuilder apb; - apb.SetCapturePostProcessing(std::unique_ptr(mPeerCustomProcessor)); - mAudioProcessingModule = apb.Create(); + mPeerCustomProcessor = std::make_shared(); + webrtc::BuiltinAudioProcessingBuilder apb; + apb.SetCapturePostProcessing(std::make_unique(mPeerCustomProcessor)); + mAudioProcessingModule = apb.Build(webrtc::CreateEnvironment()); webrtc::AudioProcessing::Config apm_config; apm_config.echo_canceller.enabled = false; apm_config.echo_canceller.mobile_mode = false; apm_config.gain_controller1.enabled = false; - apm_config.gain_controller1.mode = webrtc::AudioProcessing::Config::GainController1::kAdaptiveAnalog; - apm_config.gain_controller2.enabled = false; - apm_config.high_pass_filter.enabled = false; + apm_config.gain_controller2.enabled = true; + apm_config.high_pass_filter.enabled = true; apm_config.noise_suppression.enabled = true; apm_config.noise_suppression.level = webrtc::AudioProcessing::Config::NoiseSuppression::kVeryHigh; - apm_config.transient_suppression.enabled = false; + apm_config.transient_suppression.enabled = true; apm_config.pipeline.multi_channel_render = true; apm_config.pipeline.multi_channel_capture = false; mAudioProcessingModule->ApplyConfig(apm_config); webrtc::ProcessingConfig processing_config; + processing_config.input_stream().set_num_channels(2); processing_config.input_stream().set_sample_rate_hz(48000); processing_config.output_stream().set_num_channels(2); @@ -266,18 +332,37 @@ void LLWebRTCImpl::init() mPeerConnectionFactory = webrtc::CreatePeerConnectionFactory(mNetworkThread.get(), mWorkerThread.get(), mSignalingThread.get(), - mPeerDeviceModule, + mDeviceModule, webrtc::CreateBuiltinAudioEncoderFactory(), webrtc::CreateBuiltinAudioDecoderFactory(), nullptr /* video_encoder_factory */, nullptr /* video_decoder_factory */, nullptr /* audio_mixer */, mAudioProcessingModule); + mWorkerThread->PostTask( + [this]() + { + if (mDeviceModule) + { + mDeviceModule->EnableBuiltInAEC(false); + updateDevices(); + } + }); } void LLWebRTCImpl::terminate() { + mWorkerThread->BlockingCall( + [this]() + { + if (mDeviceModule) + { + mDeviceModule->ForceStopRecording(); + mDeviceModule->StopPlayout(); + } + }); + for (auto &connection : mPeerConnections) { connection->terminate(); @@ -294,77 +379,28 @@ void LLWebRTCImpl::terminate() mWorkerThread->BlockingCall( [this]() { - if (mTuningDeviceModule) - { - mTuningDeviceModule->StopRecording(); - mTuningDeviceModule->Terminate(); - } - if (mPeerDeviceModule) + if (mDeviceModule) { - mPeerDeviceModule->StopRecording(); - mPeerDeviceModule->Terminate(); - } - mTuningDeviceModule = nullptr; - mPeerDeviceModule = nullptr; - mTaskQueueFactory = nullptr; - }); - rtc::LogMessage::RemoveLogToStream(mLogSink); -} - -// -// Devices functions -// -// Most device-related functionality needs to happen -// on the worker thread (the audio thread,) so those calls will be -// proxied over to that thread. -// -void LLWebRTCImpl::setRecording(bool recording) -{ - mWorkerThread->PostTask( - [this, recording]() - { - if (recording) - { - mPeerDeviceModule->SetStereoRecording(false); - mPeerDeviceModule->InitRecording(); - mPeerDeviceModule->StartRecording(); - } - else - { - mPeerDeviceModule->StopRecording(); - } - }); -} - -void LLWebRTCImpl::setPlayout(bool playing) -{ - mWorkerThread->PostTask( - [this, playing]() - { - if (playing) - { - mPeerDeviceModule->SetStereoPlayout(true); - mPeerDeviceModule->InitPlayout(); - mPeerDeviceModule->StartPlayout(); - } - else - { - mPeerDeviceModule->StopPlayout(); + mDeviceModule->Terminate(); } + mDeviceModule = nullptr; + mTaskQueueFactory = nullptr; }); + webrtc::LogMessage::RemoveLogToStream(mLogSink); } void LLWebRTCImpl::setAudioConfig(LLWebRTCDeviceInterface::AudioConfig config) { webrtc::AudioProcessing::Config apm_config; apm_config.echo_canceller.enabled = config.mEchoCancellation; - apm_config.echo_canceller.mobile_mode = false; // don't use mobile hardware echo cancellation. - apm_config.gain_controller1.enabled = config.mAGC; - apm_config.gain_controller1.mode = webrtc::AudioProcessing::Config::GainController1::kAdaptiveAnalog; - apm_config.gain_controller2.enabled = false; // use the main gain controller. - apm_config.high_pass_filter.enabled = false; // don't filter, to improve quality for music and other pure sources. - apm_config.transient_suppression.enabled = false; // transient suppression may increase latency. - apm_config.pipeline.multi_channel_render = true; // stereo + apm_config.echo_canceller.mobile_mode = false; + apm_config.gain_controller1.enabled = false; + apm_config.gain_controller2.enabled = config.mAGC; + apm_config.gain_controller2.adaptive_digital.enabled = true; // auto-level speech + apm_config.high_pass_filter.enabled = true; + apm_config.transient_suppression.enabled = true; + apm_config.pipeline.multi_channel_render = true; + apm_config.pipeline.multi_channel_capture = true; apm_config.pipeline.multi_channel_capture = true; switch (config.mNoiseSuppressionLevel) @@ -413,142 +449,142 @@ void LLWebRTCImpl::unsetDevicesObserver(LLWebRTCDevicesObserver *observer) } } -void ll_set_device_module_capture_device(rtc::scoped_refptr device_module, int16_t device) +// must be run in the worker thread. +void LLWebRTCImpl::workerDeployDevices() { -#if WEBRTC_WIN - if (device < 0) - { - device_module->SetRecordingDevice(webrtc::AudioDeviceModule::kDefaultDevice); - } - else + if (!mDeviceModule) { - device_module->SetRecordingDevice(device); + return; } -#else - // passed in default is -1, but the device list - // has it at 0 - device_module->SetRecordingDevice(device + 1); -#endif - device_module->InitMicrophone(); -} -void LLWebRTCImpl::setCaptureDevice(const std::string &id) -{ int16_t recordingDevice = RECORD_DEVICE_DEFAULT; - if (id != "Default") + int16_t recording_device_start = 0; + + if (mRecordingDevice != "Default") { - for (int16_t i = 0; i < mRecordingDeviceList.size(); i++) + for (int16_t i = recording_device_start; i < mRecordingDeviceList.size(); i++) { - if (mRecordingDeviceList[i].mID == id) + if (mRecordingDeviceList[i].mID == mRecordingDevice) { recordingDevice = i; +#if !WEBRTC_WIN + // linux and mac devices range from 1 to the end of the list, with the index 0 being the + // 'default' device. Windows has a special 'default' device and other devices are indexed + // from 0 + recordingDevice++; +#endif break; } } } - if (recordingDevice == mRecordingDevice) - { - return; - } - mRecordingDevice = recordingDevice; - if (mTuningMode) - { - mWorkerThread->PostTask([this, recordingDevice]() - { - ll_set_device_module_capture_device(mTuningDeviceModule, recordingDevice); - }); - } - else - { - mWorkerThread->PostTask([this, recordingDevice]() - { - bool recording = mPeerDeviceModule->Recording(); - if (recording) - { - mPeerDeviceModule->StopRecording(); - } - ll_set_device_module_capture_device(mPeerDeviceModule, recordingDevice); - if (recording) - { - mPeerDeviceModule->SetStereoRecording(false); - mPeerDeviceModule->InitRecording(); - mPeerDeviceModule->StartRecording(); - } - }); - } -} - -void ll_set_device_module_render_device(rtc::scoped_refptr device_module, int16_t device) -{ + mDeviceModule->StopPlayout(); + mDeviceModule->ForceStopRecording(); #if WEBRTC_WIN - if (device < 0) + if (recordingDevice < 0) { - device_module->SetPlayoutDevice(webrtc::AudioDeviceModule::kDefaultDevice); + mDeviceModule->SetRecordingDevice((webrtc::AudioDeviceModule::WindowsDeviceType)recordingDevice); } else { - device_module->SetPlayoutDevice(device); + mDeviceModule->SetRecordingDevice(recordingDevice); } #else - device_module->SetPlayoutDevice(device + 1); + mDeviceModule->SetRecordingDevice(recordingDevice); #endif - device_module->InitSpeaker(); -} + mDeviceModule->InitMicrophone(); + mDeviceModule->SetStereoRecording(false); + mDeviceModule->InitRecording(); -void LLWebRTCImpl::setRenderDevice(const std::string &id) -{ int16_t playoutDevice = PLAYOUT_DEVICE_DEFAULT; - if (id != "Default") + int16_t playout_device_start = 0; + if (mPlayoutDevice != "Default") { - for (int16_t i = 0; i < mPlayoutDeviceList.size(); i++) + for (int16_t i = playout_device_start; i < mPlayoutDeviceList.size(); i++) { - if (mPlayoutDeviceList[i].mID == id) + if (mPlayoutDeviceList[i].mID == mPlayoutDevice) { playoutDevice = i; +#if !WEBRTC_WIN + // linux and mac devices range from 1 to the end of the list, with the index 0 being the + // 'default' device. Windows has a special 'default' device and other devices are indexed + // from 0 + playoutDevice++; +#endif break; } } } - if (playoutDevice == mPlayoutDevice) + +#if WEBRTC_WIN + if (playoutDevice < 0) + { + mDeviceModule->SetPlayoutDevice((webrtc::AudioDeviceModule::WindowsDeviceType)playoutDevice); + } + else { - return; + mDeviceModule->SetPlayoutDevice(playoutDevice); } - mPlayoutDevice = playoutDevice; +#else + mDeviceModule->SetPlayoutDevice(playoutDevice); +#endif + mDeviceModule->InitSpeaker(); + mDeviceModule->SetStereoPlayout(true); + mDeviceModule->InitPlayout(); - if (mTuningMode) + if ((!mMute && mPeerConnections.size()) || mTuningMode) { - mWorkerThread->PostTask( - [this, playoutDevice]() - { - ll_set_device_module_render_device(mTuningDeviceModule, playoutDevice); - }); + mDeviceModule->ForceStartRecording(); } - else + + if (!mTuningMode) { - mWorkerThread->PostTask( - [this, playoutDevice]() + mDeviceModule->StartPlayout(); + } + mSignalingThread->PostTask( + [this] + { + for (auto& connection : mPeerConnections) { - bool playing = mPeerDeviceModule->Playing(); - if (playing) + if (mTuningMode) { - mPeerDeviceModule->StopPlayout(); + connection->enableSenderTracks(false); } - ll_set_device_module_render_device(mPeerDeviceModule, playoutDevice); - if (playing) + else { - mPeerDeviceModule->SetStereoPlayout(true); - mPeerDeviceModule->InitPlayout(); - mPeerDeviceModule->StartPlayout(); + connection->resetMute(); } - }); - } + connection->enableReceiverTracks(!mTuningMode); + } + if (1 < mDevicesDeploying.fetch_sub(1, std::memory_order_relaxed)) + { + mWorkerThread->PostTask([this] { workerDeployDevices(); }); + } + }); +} + +void LLWebRTCImpl::setCaptureDevice(const std::string &id) +{ + + mRecordingDevice = id; + deployDevices(); +} + +void LLWebRTCImpl::setRenderDevice(const std::string &id) +{ + mPlayoutDevice = id; + deployDevices(); } // updateDevices needs to happen on the worker thread. void LLWebRTCImpl::updateDevices() { - int16_t renderDeviceCount = mTuningDeviceModule->PlayoutDevices(); + if (!mDeviceModule) + { + return; + } + + int16_t renderDeviceCount = mDeviceModule->PlayoutDevices(); mPlayoutDeviceList.clear(); #if WEBRTC_WIN @@ -562,11 +598,11 @@ void LLWebRTCImpl::updateDevices() { char name[webrtc::kAdmMaxDeviceNameSize]; char guid[webrtc::kAdmMaxGuidSize]; - mTuningDeviceModule->PlayoutDeviceName(index, name, guid); + mDeviceModule->PlayoutDeviceName(index, name, guid); mPlayoutDeviceList.emplace_back(name, guid); } - int16_t captureDeviceCount = mTuningDeviceModule->RecordingDevices(); + int16_t captureDeviceCount = mDeviceModule->RecordingDevices(); mRecordingDeviceList.clear(); #if WEBRTC_WIN @@ -580,7 +616,7 @@ void LLWebRTCImpl::updateDevices() { char name[webrtc::kAdmMaxDeviceNameSize]; char guid[webrtc::kAdmMaxGuidSize]; - mTuningDeviceModule->RecordingDeviceName(index, name, guid); + mDeviceModule->RecordingDeviceName(index, name, guid); mRecordingDeviceList.emplace_back(name, guid); } @@ -592,10 +628,6 @@ void LLWebRTCImpl::updateDevices() void LLWebRTCImpl::OnDevicesUpdated() { - // reset these to a bad value so an update is forced - mRecordingDevice = RECORD_DEVICE_BAD; - mPlayoutDevice = PLAYOUT_DEVICE_BAD; - updateDevices(); } @@ -604,60 +636,109 @@ void LLWebRTCImpl::setTuningMode(bool enable) { mTuningMode = enable; mWorkerThread->PostTask( - [this, enable] { - if (enable) - { - mPeerDeviceModule->StopRecording(); - mPeerDeviceModule->StopPlayout(); - ll_set_device_module_render_device(mTuningDeviceModule, mPlayoutDevice); - ll_set_device_module_capture_device(mTuningDeviceModule, mRecordingDevice); - mTuningDeviceModule->InitPlayout(); - mTuningDeviceModule->InitRecording(); - mTuningDeviceModule->StartRecording(); - // TODO: Starting Playout on the TDM appears to create an audio artifact (click) - // in this case, so disabling it for now. We may have to do something different - // if we enable 'echo playback' via the TDM when tuning. - //mTuningDeviceModule->StartPlayout(); - } - else - { - mTuningDeviceModule->StopRecording(); - //mTuningDeviceModule->StopPlayout(); - ll_set_device_module_render_device(mPeerDeviceModule, mPlayoutDevice); - ll_set_device_module_capture_device(mPeerDeviceModule, mRecordingDevice); - mPeerDeviceModule->SetStereoPlayout(true); - mPeerDeviceModule->SetStereoRecording(false); - mPeerDeviceModule->InitPlayout(); - mPeerDeviceModule->InitRecording(); - mPeerDeviceModule->StartPlayout(); - mPeerDeviceModule->StartRecording(); - } - } - ); - mSignalingThread->PostTask( - [this, enable] + [this] { - for (auto &connection : mPeerConnections) - { - if (enable) - { - connection->enableSenderTracks(false); - } - else + mDeviceModule->SetTuning(mTuningMode, mMute); + mSignalingThread->PostTask( + [this] { - connection->resetMute(); - } - connection->enableReceiverTracks(!enable); - } + for (auto& connection : mPeerConnections) + { + if (mTuningMode) + { + connection->enableSenderTracks(false); + } + else + { + connection->resetMute(); + } + connection->enableReceiverTracks(!mTuningMode); + } + }); }); } -float LLWebRTCImpl::getTuningAudioLevel() { return -20 * log10f(mTuningAudioDeviceObserver->getMicrophoneEnergy()); } +void LLWebRTCImpl::deployDevices() +{ + if (0 < mDevicesDeploying.fetch_add(1, std::memory_order_relaxed)) + { + return; + } + mWorkerThread->PostTask( + [this] { + workerDeployDevices(); + }); +} + +float LLWebRTCImpl::getTuningAudioLevel() +{ + return mDeviceModule ? -20 * log10f(mDeviceModule->GetMicrophoneEnergy()) : std::numeric_limits::infinity(); +} -float LLWebRTCImpl::getPeerConnectionAudioLevel() { return -20 * log10f(mPeerCustomProcessor->getMicrophoneEnergy()); } +void LLWebRTCImpl::setTuningMicGain(float gain) +{ + if (mTuningMode && mDeviceModule) + { + mDeviceModule->SetTuningMicGain(gain); + } +} -void LLWebRTCImpl::setPeerConnectionGain(float gain) { mPeerCustomProcessor->setGain(gain); } +float LLWebRTCImpl::getPeerConnectionAudioLevel() +{ + return mTuningMode ? std::numeric_limits::infinity() + : (mPeerCustomProcessor ? -20 * log10f(mPeerCustomProcessor->getMicrophoneEnergy()) + : std::numeric_limits::infinity()); +} +void LLWebRTCImpl::setMicGain(float gain) +{ + mGain = gain; + if (!mTuningMode && mPeerCustomProcessor) + { + mPeerCustomProcessor->setGain(gain); + } +} + +void LLWebRTCImpl::setMute(bool mute, int delay_ms) +{ + if (mMute != mute) + { + mMute = mute; + intSetMute(mute, delay_ms); + } +} + +void LLWebRTCImpl::intSetMute(bool mute, int delay_ms) +{ + if (mPeerCustomProcessor) + { + mPeerCustomProcessor->setGain(mMute ? 0.0f : mGain); + } + if (mMute) + { + mWorkerThread->PostDelayedTask( + [this] + { + if (mDeviceModule) + { + mDeviceModule->ForceStopRecording(); + } + }, + webrtc::TimeDelta::Millis(delay_ms)); + } + else + { + mWorkerThread->PostTask( + [this] + { + if (mDeviceModule) + { + mDeviceModule->InitRecording(); + mDeviceModule->ForceStartRecording(); + } + }); + } +} // // Peer Connection Helpers @@ -665,34 +746,31 @@ void LLWebRTCImpl::setPeerConnectionGain(float gain) { mPeerCustomProcessor->set LLWebRTCPeerConnectionInterface *LLWebRTCImpl::newPeerConnection() { - rtc::scoped_refptr peerConnection = rtc::scoped_refptr(new rtc::RefCountedObject()); + bool empty = mPeerConnections.empty(); + webrtc::scoped_refptr peerConnection = webrtc::scoped_refptr(new webrtc::RefCountedObject()); peerConnection->init(this); - - mPeerConnections.emplace_back(peerConnection); - // Should it really start disabled? - // Seems like something doesn't get the memo and senders need to be reset later - // to remove the voice indicator from taskbar - peerConnection->enableSenderTracks(false); - if (mPeerConnections.empty()) + if (empty) { - setRecording(true); - setPlayout(true); + intSetMute(mMute); } + mPeerConnections.emplace_back(peerConnection); + + peerConnection->enableSenderTracks(false); + peerConnection->resetMute(); return peerConnection.get(); } void LLWebRTCImpl::freePeerConnection(LLWebRTCPeerConnectionInterface* peer_connection) { - std::vector>::iterator it = + std::vector>::iterator it = std::find(mPeerConnections.begin(), mPeerConnections.end(), peer_connection); if (it != mPeerConnections.end()) { mPeerConnections.erase(it); - } - if (mPeerConnections.empty()) - { - setRecording(false); - setPlayout(false); + if (mPeerConnections.empty()) + { + intSetMute(true); + } } } @@ -752,7 +830,6 @@ void LLWebRTCPeerConnectionImpl::terminate() track->set_enabled(false); } } - mPeerConnection->SetAudioRecording(false); mPeerConnection->Close(); if (mLocalStream) @@ -839,7 +916,7 @@ bool LLWebRTCPeerConnectionImpl::initializeConnection(const LLWebRTCPeerConnecti mDataChannel->RegisterObserver(this); } - cricket::AudioOptions audioOptions; + webrtc::AudioOptions audioOptions; audioOptions.auto_gain_control = true; audioOptions.echo_cancellation = true; audioOptions.noise_suppression = true; @@ -847,7 +924,7 @@ bool LLWebRTCPeerConnectionImpl::initializeConnection(const LLWebRTCPeerConnecti mLocalStream = mPeerConnectionFactory->CreateLocalMediaStream("SLStream"); - rtc::scoped_refptr audio_track( + webrtc::scoped_refptr audio_track( mPeerConnectionFactory->CreateAudioTrack("SLAudio", mPeerConnectionFactory->CreateAudioSource(audioOptions).get())); audio_track->set_enabled(false); mLocalStream->AddTrack(audio_track); @@ -861,19 +938,12 @@ bool LLWebRTCPeerConnectionImpl::initializeConnection(const LLWebRTCPeerConnecti webrtc::RtpParameters params; webrtc::RtpCodecParameters codecparam; codecparam.name = "opus"; - codecparam.kind = cricket::MEDIA_TYPE_AUDIO; + codecparam.kind = webrtc::MediaType::AUDIO; codecparam.clock_rate = 48000; codecparam.num_channels = 2; codecparam.parameters["stereo"] = "1"; codecparam.parameters["sprop-stereo"] = "1"; params.codecs.push_back(codecparam); - - // Fixed bitrates result in lower CPU cost - for (auto&& encoding : params.encodings) - { - encoding.max_bitrate_bps = 64000; - encoding.min_bitrate_bps = 64000; - } sender->SetParameters(params); } @@ -883,7 +953,7 @@ bool LLWebRTCPeerConnectionImpl::initializeConnection(const LLWebRTCPeerConnecti webrtc::RtpParameters params; webrtc::RtpCodecParameters codecparam; codecparam.name = "opus"; - codecparam.kind = cricket::MEDIA_TYPE_AUDIO; + codecparam.kind = webrtc::MediaType::AUDIO; codecparam.clock_rate = 48000; codecparam.num_channels = 2; codecparam.parameters["stereo"] = "1"; @@ -910,7 +980,6 @@ void LLWebRTCPeerConnectionImpl::enableSenderTracks(bool enable) // set_enabled shouldn't be done on the worker thread. if (mPeerConnection) { - mPeerConnection->SetAudioRecording(enable); auto senders = mPeerConnection->GetSenders(); for (auto &sender : senders) { @@ -944,7 +1013,7 @@ void LLWebRTCPeerConnectionImpl::AnswerAvailable(const std::string &sdp) { RTC_LOG(LS_INFO) << __FUNCTION__ << " " << mPeerConnection->peer_connection_state(); mPeerConnection->SetRemoteDescription(webrtc::CreateSessionDescription(webrtc::SdpType::kAnswer, sdp), - rtc::scoped_refptr(this)); + webrtc::scoped_refptr(this)); } }); } @@ -957,22 +1026,22 @@ void LLWebRTCPeerConnectionImpl::AnswerAvailable(const std::string &sdp) void LLWebRTCPeerConnectionImpl::setMute(bool mute) { EMicMuteState new_state = mute ? MUTE_MUTED : MUTE_UNMUTED; - if (new_state == mMute) - { - return; // no change - } + + // even if mute hasn't changed, we still need to update the mute + // state on the connections to handle cases where the 'Default' device + // has changed in the OS (unplugged headset, etc.) which messes + // with the mute state. + bool force_reset = mMute == MUTE_INITIAL && mute; bool enable = !mute; mMute = new_state; + mWebRTCImpl->PostSignalingTask( [this, force_reset, enable]() { if (mPeerConnection) { - // SetAudioRecording must be called before enabling/disabling tracks. - mPeerConnection->SetAudioRecording(enable); - auto senders = mPeerConnection->GetSenders(); RTC_LOG(LS_INFO) << __FUNCTION__ << (mMute ? "disabling" : "enabling") << " streams count " << senders.size(); @@ -1052,14 +1121,14 @@ void LLWebRTCPeerConnectionImpl::setSendVolume(float volume) // PeerConnectionObserver implementation. // -void LLWebRTCPeerConnectionImpl::OnAddTrack(rtc::scoped_refptr receiver, - const std::vector> &streams) +void LLWebRTCPeerConnectionImpl::OnAddTrack(webrtc::scoped_refptr receiver, + const std::vector> &streams) { RTC_LOG(LS_INFO) << __FUNCTION__ << " " << receiver->id(); webrtc::RtpParameters params; webrtc::RtpCodecParameters codecparam; codecparam.name = "opus"; - codecparam.kind = cricket::MEDIA_TYPE_AUDIO; + codecparam.kind = webrtc::MediaType::AUDIO; codecparam.clock_rate = 48000; codecparam.num_channels = 2; codecparam.parameters["stereo"] = "1"; @@ -1068,12 +1137,12 @@ void LLWebRTCPeerConnectionImpl::OnAddTrack(rtc::scoped_refptrSetParameters(params); } -void LLWebRTCPeerConnectionImpl::OnRemoveTrack(rtc::scoped_refptr receiver) +void LLWebRTCPeerConnectionImpl::OnRemoveTrack(webrtc::scoped_refptr receiver) { RTC_LOG(LS_INFO) << __FUNCTION__ << " " << receiver->id(); } -void LLWebRTCPeerConnectionImpl::OnDataChannel(rtc::scoped_refptr channel) +void LLWebRTCPeerConnectionImpl::OnDataChannel(webrtc::scoped_refptr channel) { if (mDataChannel) { @@ -1160,23 +1229,23 @@ static std::string iceCandidateToTrickleString(const webrtc::IceCandidateInterfa candidate->candidate().address().ipaddr().ToString() << " " << candidate->candidate().address().PortAsString() << " typ "; - if (candidate->candidate().type() == cricket::LOCAL_PORT_TYPE) + if (candidate->candidate().type() == webrtc::IceCandidateType::kHost) { candidate_stream << "host"; } - else if (candidate->candidate().type() == cricket::STUN_PORT_TYPE) + else if (candidate->candidate().type() == webrtc::IceCandidateType::kSrflx) { candidate_stream << "srflx " << "raddr " << candidate->candidate().related_address().ipaddr().ToString() << " " << "rport " << candidate->candidate().related_address().PortAsString(); } - else if (candidate->candidate().type() == cricket::RELAY_PORT_TYPE) + else if (candidate->candidate().type() == webrtc::IceCandidateType::kRelay) { candidate_stream << "relay " << "raddr " << candidate->candidate().related_address().ipaddr().ToString() << " " << "rport " << candidate->candidate().related_address().PortAsString(); } - else if (candidate->candidate().type() == cricket::PRFLX_PORT_TYPE) + else if (candidate->candidate().type() == webrtc::IceCandidateType::kPrflx) { candidate_stream << "prflx " << "raddr " << candidate->candidate().related_address().ipaddr().ToString() << " " << @@ -1254,7 +1323,7 @@ void LLWebRTCPeerConnectionImpl::OnSuccess(webrtc::SessionDescriptionInterface * else if (sdp_line.find("a=fmtp:" + opus_payload) == 0) { sdp_mangled_stream << sdp_line << "a=fmtp:" << opus_payload - << " minptime=10;useinbandfec=1;stereo=1;sprop-stereo=1;maxplaybackrate=48000;sprop-maxplaybackrate=48000;sprop-maxcapturerate=48000;complexity=4\n"; + << " minptime=10;useinbandfec=1;stereo=1;sprop-stereo=1;maxplaybackrate=48000;sprop-maxplaybackrate=48000;sprop-maxcapturerate=48000\n"; } else { @@ -1271,7 +1340,7 @@ void LLWebRTCPeerConnectionImpl::OnSuccess(webrtc::SessionDescriptionInterface * mPeerConnection->SetLocalDescription(std::unique_ptr( webrtc::CreateSessionDescription(webrtc::SdpType::kOffer, mangled_sdp)), - rtc::scoped_refptr(this)); + webrtc::scoped_refptr(this)); } @@ -1381,7 +1450,7 @@ void LLWebRTCPeerConnectionImpl::sendData(const std::string& data, bool binary) { if (mDataChannel) { - rtc::CopyOnWriteBuffer cowBuffer(data.data(), data.length()); + webrtc::CopyOnWriteBuffer cowBuffer(data.data(), data.length()); webrtc::DataBuffer buffer(cowBuffer, binary); mWebRTCImpl->PostNetworkTask([this, buffer]() { if (mDataChannel) @@ -1435,6 +1504,7 @@ void terminate() if (gWebRTCImpl) { gWebRTCImpl->terminate(); + delete gWebRTCImpl; gWebRTCImpl = nullptr; } } diff --git a/indra/llwebrtc/llwebrtc.h b/indra/llwebrtc/llwebrtc.h index c6fdb909ddc..7d06b7d2b40 100644 --- a/indra/llwebrtc/llwebrtc.h +++ b/indra/llwebrtc/llwebrtc.h @@ -159,7 +159,10 @@ class LLWebRTCDeviceInterface virtual void setTuningMode(bool enable) = 0; virtual float getTuningAudioLevel() = 0; // for use during tuning virtual float getPeerConnectionAudioLevel() = 0; // for use when not tuning - virtual void setPeerConnectionGain(float gain) = 0; + virtual void setMicGain(float gain) = 0; + virtual void setTuningMicGain(float gain) = 0; + + virtual void setMute(bool mute, int delay_ms = 0) = 0; }; // LLWebRTCAudioInterface provides the viewer with a way diff --git a/indra/llwebrtc/llwebrtc_impl.h b/indra/llwebrtc/llwebrtc_impl.h index b6294dbd4a5..df06cb88fa9 100644 --- a/indra/llwebrtc/llwebrtc_impl.h +++ b/indra/llwebrtc/llwebrtc_impl.h @@ -54,12 +54,12 @@ #include "rtc_base/ref_counted_object.h" #include "rtc_base/ssl_adapter.h" #include "rtc_base/thread.h" +#include "rtc_base/logging.h" #include "api/peer_connection_interface.h" #include "api/media_stream_interface.h" #include "api/create_peerconnection_factory.h" #include "modules/audio_device/include/audio_device.h" #include "modules/audio_device/include/audio_device_data_observer.h" -#include "rtc_base/task_queue.h" #include "api/task_queue/task_queue_factory.h" #include "api/task_queue/default_task_queue_factory.h" #include "modules/audio_device/include/audio_device_defines.h" @@ -69,35 +69,30 @@ namespace llwebrtc class LLWebRTCPeerConnectionImpl; -class LLWebRTCLogSink : public rtc::LogSink { +class LLWebRTCLogSink : public webrtc::LogSink +{ public: - LLWebRTCLogSink(LLWebRTCLogCallback* callback) : - mCallback(callback) - { - } + LLWebRTCLogSink(LLWebRTCLogCallback* callback) : mCallback(callback) {} // Destructor: close the log file - ~LLWebRTCLogSink() override - { - } + ~LLWebRTCLogSink() override { mCallback = nullptr; } - void OnLogMessage(const std::string& msg, - rtc::LoggingSeverity severity) override + void OnLogMessage(const std::string& msg, webrtc::LoggingSeverity severity) override { if (mCallback) { - switch(severity) + switch (severity) { - case rtc::LS_VERBOSE: + case webrtc::LS_VERBOSE: mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg); break; - case rtc::LS_INFO: + case webrtc::LS_INFO: mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg); break; - case rtc::LS_WARNING: + case webrtc::LS_WARNING: mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg); break; - case rtc::LS_ERROR: + case webrtc::LS_ERROR: mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg); break; default: @@ -118,73 +113,309 @@ class LLWebRTCLogSink : public rtc::LogSink { LLWebRTCLogCallback* mCallback; }; -// Implements a class allowing capture of audio data -// to determine audio level of the microphone. -class LLAudioDeviceObserver : public webrtc::AudioDeviceDataObserver +// ----------------------------------------------------------------------------- +// A proxy transport that forwards capture data to two AudioTransport sinks: +// - the "engine" (libwebrtc's VoiceEngine) +// - the "user" (your app's listener) +// +// Playout (NeedMorePlayData) goes only to the engine by default to avoid +// double-writing into the output buffer. See notes below if you want a tap. +// ----------------------------------------------------------------------------- +class LLWebRTCAudioTransport : public webrtc::AudioTransport { - public: - LLAudioDeviceObserver(); - - // Retrieve the RMS audio loudness - float getMicrophoneEnergy(); - - // Data retrieved from the caputure device is - // passed in here for processing. - void OnCaptureData(const void *audio_samples, - const size_t num_samples, - const size_t bytes_per_sample, - const size_t num_channels, - const uint32_t samples_per_sec) override; - - // This is for data destined for the render device. - // not currently used. - void OnRenderData(const void *audio_samples, - const size_t num_samples, - const size_t bytes_per_sample, - const size_t num_channels, - const uint32_t samples_per_sec) override; +public: + LLWebRTCAudioTransport(); + + void SetEngineTransport(webrtc::AudioTransport* t); + + // -------- Capture path: fan out to both sinks -------- + int32_t RecordedDataIsAvailable(const void* audio_data, + size_t number_of_samples, + size_t bytes_per_sample, + size_t number_of_channels, + uint32_t samples_per_sec, + uint32_t total_delay_ms, + int32_t clock_drift, + uint32_t current_mic_level, + bool key_pressed, + uint32_t& new_mic_level) override; + + // -------- Playout path: delegate to engine only -------- + int32_t NeedMorePlayData(size_t number_of_samples, + size_t bytes_per_sample, + size_t number_of_channels, + uint32_t samples_per_sec, + void* audio_data, + size_t& number_of_samples_out, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) override; + + // Method to pull mixed render audio data from all active VoE channels. + // The data will not be passed as reference for audio processing internally. + void PullRenderData(int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames, + void* audio_data, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) override; + + float GetMicrophoneEnergy() { return mMicrophoneEnergy.load(std::memory_order_relaxed); } + void SetGain(float gain) { mGain.store(gain, std::memory_order_relaxed); } + +private: + std::atomic engine_{ nullptr }; + static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing (30 frames) + float mSumVector[NUM_PACKETS_TO_FILTER]; + std::atomic mMicrophoneEnergy; + std::atomic mGain{ 0.0f }; - protected: - static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing (30 frames) - float mSumVector[NUM_PACKETS_TO_FILTER]; - float mMicrophoneEnergy; }; + +// ----------------------------------------------------------------------------- +// LLWebRTCAudioDeviceModule +// - Wraps a real ADM to provide microphone energy for tuning +// ----------------------------------------------------------------------------- +class LLWebRTCAudioDeviceModule : public webrtc::AudioDeviceModule +{ +public: + explicit LLWebRTCAudioDeviceModule(webrtc::scoped_refptr inner) : inner_(std::move(inner)), tuning_(false) + { + RTC_CHECK(inner_); + } + + // ----- AudioDeviceModule interface: we mostly forward to |inner_| ----- + int32_t ActiveAudioLayer(AudioLayer* audioLayer) const override { return inner_->ActiveAudioLayer(audioLayer); } + + int32_t RegisterAudioCallback(webrtc::AudioTransport* engine_transport) override + { + // The engine registers its transport here. We put our audio transport between engine and ADM. + audio_transport_.SetEngineTransport(engine_transport); + // Register our proxy with the real ADM. + return inner_->RegisterAudioCallback(&audio_transport_); + } + + int32_t Init() override { return inner_->Init(); } + int32_t Terminate() override { return inner_->Terminate(); } + bool Initialized() const override { return inner_->Initialized(); } + + // --- Device enumeration/selection (forward) --- + int16_t PlayoutDevices() override { return inner_->PlayoutDevices(); } + int16_t RecordingDevices() override { return inner_->RecordingDevices(); } + int32_t PlayoutDeviceName(uint16_t index, char name[webrtc::kAdmMaxDeviceNameSize], char guid[webrtc::kAdmMaxGuidSize]) override + { + return inner_->PlayoutDeviceName(index, name, guid); + } + int32_t RecordingDeviceName(uint16_t index, char name[webrtc::kAdmMaxDeviceNameSize], char guid[webrtc::kAdmMaxGuidSize]) override + { + return inner_->RecordingDeviceName(index, name, guid); + } + int32_t SetPlayoutDevice(uint16_t index) override { return inner_->SetPlayoutDevice(index); } + int32_t SetRecordingDevice(uint16_t index) override { return inner_->SetRecordingDevice(index); } + + // Windows default/communications selectors, if your branch exposes them: + int32_t SetPlayoutDevice(WindowsDeviceType type) override { return inner_->SetPlayoutDevice(type); } + int32_t SetRecordingDevice(WindowsDeviceType type) override { return inner_->SetRecordingDevice(type); } + + // --- Init/start/stop (forward) --- + int32_t InitPlayout() override { return inner_->InitPlayout(); } + bool PlayoutIsInitialized() const override { return inner_->PlayoutIsInitialized(); } + int32_t StartPlayout() override { + if (tuning_) return 0; // For tuning, don't allow playout + return inner_->StartPlayout(); + } + int32_t StopPlayout() override { return inner_->StopPlayout(); } + bool Playing() const override { return inner_->Playing(); } + + int32_t InitRecording() override { return inner_->InitRecording(); } + bool RecordingIsInitialized() const override { return inner_->RecordingIsInitialized(); } + int32_t StartRecording() override { + // ignore start recording as webrtc.lib will + // send one when streams first connect, resulting + // in an inadvertant 'recording' when mute is on. + // We take full control of StartRecording via + // ForceStartRecording below. + return 0; + } + int32_t StopRecording() override { + // ignore stop recording as webrtc.lib will send one when streams shut down, + // even if there are other streams in place. Start/Stop recording are entirely + // controlled by the app + return 0; + } + int32_t ForceStartRecording() { return inner_->StartRecording(); } + int32_t ForceStopRecording() { return inner_->StopRecording(); } + bool Recording() const override { return inner_->Recording(); } + + // --- Stereo opts (forward if available on your branch) --- + int32_t SetStereoPlayout(bool enable) override { return inner_->SetStereoPlayout(enable); } + int32_t SetStereoRecording(bool enable) override { return inner_->SetStereoRecording(enable); } + int32_t PlayoutIsAvailable(bool* available) override { return inner_->PlayoutIsAvailable(available); } + int32_t RecordingIsAvailable(bool* available) override { return inner_->RecordingIsAvailable(available); } + + // --- AGC/Volume/Mute/etc. (forward) --- + int32_t SetMicrophoneVolume(uint32_t volume) override { return inner_->SetMicrophoneVolume(volume); } + int32_t MicrophoneVolume(uint32_t* volume) const override { return inner_->MicrophoneVolume(volume); } + + // --- Speaker/Microphone init (forward) --- + int32_t InitSpeaker() override { return inner_->InitSpeaker(); } + bool SpeakerIsInitialized() const override { return inner_->SpeakerIsInitialized(); } + int32_t InitMicrophone() override { return inner_->InitMicrophone(); } + bool MicrophoneIsInitialized() const override { return inner_->MicrophoneIsInitialized(); } + + // --- Speaker Volume (forward) --- + int32_t SpeakerVolumeIsAvailable(bool* available) override { return inner_->SpeakerVolumeIsAvailable(available); } + int32_t SetSpeakerVolume(uint32_t volume) override { return inner_->SetSpeakerVolume(volume); } + int32_t SpeakerVolume(uint32_t* volume) const override { return inner_->SpeakerVolume(volume); } + int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override { return inner_->MaxSpeakerVolume(maxVolume); } + int32_t MinSpeakerVolume(uint32_t* minVolume) const override { return inner_->MinSpeakerVolume(minVolume); } + + // --- Microphone Volume (forward) --- + int32_t MicrophoneVolumeIsAvailable(bool* available) override { return inner_->MicrophoneVolumeIsAvailable(available); } + int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override { return inner_->MaxMicrophoneVolume(maxVolume); } + int32_t MinMicrophoneVolume(uint32_t* minVolume) const override { return inner_->MinMicrophoneVolume(minVolume); } + + // --- Speaker Mute (forward) --- + int32_t SpeakerMuteIsAvailable(bool* available) override { return inner_->SpeakerMuteIsAvailable(available); } + int32_t SetSpeakerMute(bool enable) override { return inner_->SetSpeakerMute(enable); } + int32_t SpeakerMute(bool* enabled) const override { return inner_->SpeakerMute(enabled); } + + // --- Microphone Mute (forward) --- + int32_t MicrophoneMuteIsAvailable(bool* available) override { return inner_->MicrophoneMuteIsAvailable(available); } + int32_t SetMicrophoneMute(bool enable) override { return inner_->SetMicrophoneMute(enable); } + int32_t MicrophoneMute(bool* enabled) const override { return inner_->MicrophoneMute(enabled); } + + // --- Stereo Support (forward) --- + int32_t StereoPlayoutIsAvailable(bool* available) const override { return inner_->StereoPlayoutIsAvailable(available); } + int32_t StereoPlayout(bool* enabled) const override { return inner_->StereoPlayout(enabled); } + int32_t StereoRecordingIsAvailable(bool* available) const override { return inner_->StereoRecordingIsAvailable(available); } + int32_t StereoRecording(bool* enabled) const override { return inner_->StereoRecording(enabled); } + + // --- Delay/Timing (forward) --- + int32_t PlayoutDelay(uint16_t* delayMS) const override { return inner_->PlayoutDelay(delayMS); } + + // --- Built-in Audio Processing (forward) --- + bool BuiltInAECIsAvailable() const override { return inner_->BuiltInAECIsAvailable(); } + bool BuiltInAGCIsAvailable() const override { return inner_->BuiltInAGCIsAvailable(); } + bool BuiltInNSIsAvailable() const override { return inner_->BuiltInNSIsAvailable(); } + int32_t EnableBuiltInAEC(bool enable) override { return inner_->EnableBuiltInAEC(enable); } + int32_t EnableBuiltInAGC(bool enable) override { return inner_->EnableBuiltInAGC(enable); } + int32_t EnableBuiltInNS(bool enable) override { return inner_->EnableBuiltInNS(enable); } + + // --- Additional AudioDeviceModule methods (forward) --- + int32_t GetPlayoutUnderrunCount() const override { return inner_->GetPlayoutUnderrunCount(); } + + // Used to generate RTC stats. If not implemented, RTCAudioPlayoutStats will + // not be present in the stats. + std::optional GetStats() const override { return inner_->GetStats(); } + +// Only supported on iOS. +#if defined(WEBRTC_IOS) + virtual int GetPlayoutAudioParameters(AudioParameters* params) const override { return inner_->GetPlayoutAudioParameters(params); } + virtual int GetRecordAudioParameters(AudioParameters* params) override { return inner_->GetRecordAudioParameters(params); } +#endif // WEBRTC_IOS + + virtual int32_t GetPlayoutDevice() const override { return inner_->GetPlayoutDevice(); } + virtual int32_t GetRecordingDevice() const override { return inner_->GetRecordingDevice(); } + virtual int32_t SetObserver(webrtc::AudioDeviceObserver* observer) override { return inner_->SetObserver(observer); } + + // tuning microphone energy calculations + float GetMicrophoneEnergy() { return audio_transport_.GetMicrophoneEnergy(); } + void SetTuningMicGain(float gain) { audio_transport_.SetGain(gain); } + void SetTuning(bool tuning, bool mute) + { + tuning_ = tuning; + if (tuning) + { + inner_->InitRecording(); + inner_->StartRecording(); + inner_->StopPlayout(); + } + else + { + if (mute) + { + inner_->StopRecording(); + } + else + { + inner_->InitRecording(); + inner_->StartRecording(); + } + inner_->StartPlayout(); + } + } + +protected: + ~LLWebRTCAudioDeviceModule() override = default; + +private: + webrtc::scoped_refptr inner_; + LLWebRTCAudioTransport audio_transport_; + + bool tuning_; +}; + +class LLCustomProcessorState +{ + +public: + float getMicrophoneEnergy() { return mMicrophoneEnergy.load(std::memory_order_relaxed); } + void setMicrophoneEnergy(float energy) { mMicrophoneEnergy.store(energy, std::memory_order_relaxed); } + + void setGain(float gain) + { + mGain.store(gain, std::memory_order_relaxed); + mDirty.store(true, std::memory_order_relaxed); + } + + float getGain() { return mGain.load(std::memory_order_relaxed); } + + bool getDirty() { return mDirty.exchange(false, std::memory_order_relaxed); } + + protected: + std::atomic mDirty{ true }; + std::atomic mMicrophoneEnergy{ 0.0f }; + std::atomic mGain{ 0.0f }; +}; + +using LLCustomProcessorStatePtr = std::shared_ptr; + // Used to process/retrieve audio levels after // all of the processing (AGC, AEC, etc.) for display in-world to the user. class LLCustomProcessor : public webrtc::CustomProcessing { - public: - LLCustomProcessor(); +public: + LLCustomProcessor(LLCustomProcessorStatePtr state); ~LLCustomProcessor() override {} // (Re-) Initializes the submodule. void Initialize(int sample_rate_hz, int num_channels) override; // Analyzes the given capture or render signal. - void Process(webrtc::AudioBuffer *audio) override; + void Process(webrtc::AudioBuffer* audio) override; // Returns a string representation of the module state. std::string ToString() const override { return ""; } - float getMicrophoneEnergy() { return mMicrophoneEnergy; } - - void setGain(float gain) { mGain = gain; } - - protected: - static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing - int mSampleRateHz; - int mNumChannels; +protected: + static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing + int mSampleRateHz{ 48000 }; + int mNumChannels{ 2 }; + int mRampFrames{ 2 }; + float mCurrentGain{ 0.0f }; + float mGainStep{ 0.0f }; float mSumVector[NUM_PACKETS_TO_FILTER]; - float mMicrophoneEnergy; - float mGain; + friend LLCustomProcessorState; + LLCustomProcessorStatePtr mState; }; // Primary singleton implementation for interfacing // with the native webrtc library. -class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceSink +class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceObserver { public: LLWebRTCImpl(LLWebRTCLogCallback* logCallback); @@ -214,10 +445,15 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS float getTuningAudioLevel() override; float getPeerConnectionAudioLevel() override; - void setPeerConnectionGain(float gain) override; + void setMicGain(float gain) override; + void setTuningMicGain(float gain) override; + + void setMute(bool mute, int delay_ms = 20) override; + + void intSetMute(bool mute, int delay_ms = 20); // - // AudioDeviceSink + // AudioDeviceObserver // void OnDevicesUpdated() override; @@ -246,19 +482,19 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS mNetworkThread->PostTask(std::move(task), location); } - void WorkerBlockingCall(rtc::FunctionView functor, + void WorkerBlockingCall(webrtc::FunctionView functor, const webrtc::Location& location = webrtc::Location::Current()) { mWorkerThread->BlockingCall(std::move(functor), location); } - void SignalingBlockingCall(rtc::FunctionView functor, + void SignalingBlockingCall(webrtc::FunctionView functor, const webrtc::Location& location = webrtc::Location::Current()) { mSignalingThread->BlockingCall(std::move(functor), location); } - void NetworkBlockingCall(rtc::FunctionView functor, + void NetworkBlockingCall(webrtc::FunctionView functor, const webrtc::Location& location = webrtc::Location::Current()) { mNetworkThread->BlockingCall(std::move(functor), location); @@ -266,7 +502,7 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS // Allows the LLWebRTCPeerConnectionImpl class to retrieve the // native webrtc PeerConnectionFactory. - rtc::scoped_refptr getPeerConnectionFactory() + webrtc::scoped_refptr getPeerConnectionFactory() { return mPeerConnectionFactory; } @@ -275,49 +511,47 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS LLWebRTCPeerConnectionInterface* newPeerConnection(); void freePeerConnection(LLWebRTCPeerConnectionInterface* peer_connection); - // enables/disables capture via the capture device - void setRecording(bool recording); - - void setPlayout(bool playing); - protected: + + void workerDeployDevices(); LLWebRTCLogSink* mLogSink; // The native webrtc threads - std::unique_ptr mNetworkThread; - std::unique_ptr mWorkerThread; - std::unique_ptr mSignalingThread; + std::unique_ptr mNetworkThread; + std::unique_ptr mWorkerThread; + std::unique_ptr mSignalingThread; // The factory that allows creation of native webrtc PeerConnections. - rtc::scoped_refptr mPeerConnectionFactory; + webrtc::scoped_refptr mPeerConnectionFactory; - rtc::scoped_refptr mAudioProcessingModule; + webrtc::scoped_refptr mAudioProcessingModule; // more native webrtc stuff - std::unique_ptr mTaskQueueFactory; + std::unique_ptr mTaskQueueFactory; // Devices void updateDevices(); - rtc::scoped_refptr mTuningDeviceModule; - rtc::scoped_refptr mPeerDeviceModule; + void deployDevices(); + std::atomic mDevicesDeploying; + webrtc::scoped_refptr mDeviceModule; std::vector mVoiceDevicesObserverList; // accessors in native webrtc for devices aren't apparently implemented yet. bool mTuningMode; - int32_t mRecordingDevice; + std::string mRecordingDevice; LLWebRTCVoiceDeviceList mRecordingDeviceList; - int32_t mPlayoutDevice; + std::string mPlayoutDevice; LLWebRTCVoiceDeviceList mPlayoutDeviceList; bool mMute; + float mGain; - LLAudioDeviceObserver * mTuningAudioDeviceObserver; - LLCustomProcessor * mPeerCustomProcessor; + LLCustomProcessorStatePtr mPeerCustomProcessor; // peer connections - std::vector> mPeerConnections; + std::vector> mPeerConnections; }; @@ -342,7 +576,7 @@ class LLWebRTCPeerConnectionImpl : public LLWebRTCPeerConnectionInterface, void terminate(); virtual void AddRef() const override = 0; - virtual rtc::RefCountReleaseStatus Release() const override = 0; + virtual webrtc::RefCountReleaseStatus Release() const override = 0; // // LLWebRTCPeerConnection @@ -373,10 +607,10 @@ class LLWebRTCPeerConnectionImpl : public LLWebRTCPeerConnectionInterface, // void OnSignalingChange(webrtc::PeerConnectionInterface::SignalingState new_state) override {} - void OnAddTrack(rtc::scoped_refptr receiver, - const std::vector> &streams) override; - void OnRemoveTrack(rtc::scoped_refptr receiver) override; - void OnDataChannel(rtc::scoped_refptr channel) override; + void OnAddTrack(webrtc::scoped_refptr receiver, + const std::vector> &streams) override; + void OnRemoveTrack(webrtc::scoped_refptr receiver) override; + void OnDataChannel(webrtc::scoped_refptr channel) override; void OnRenegotiationNeeded() override {} void OnIceConnectionChange(webrtc::PeerConnectionInterface::IceConnectionState new_state) override {}; void OnIceGatheringChange(webrtc::PeerConnectionInterface::IceGatheringState new_state) override; @@ -415,7 +649,7 @@ class LLWebRTCPeerConnectionImpl : public LLWebRTCPeerConnectionInterface, LLWebRTCImpl * mWebRTCImpl; - rtc::scoped_refptr mPeerConnectionFactory; + webrtc::scoped_refptr mPeerConnectionFactory; typedef enum { MUTE_INITIAL, @@ -429,12 +663,12 @@ class LLWebRTCPeerConnectionImpl : public LLWebRTCPeerConnectionInterface, std::vector> mCachedIceCandidates; bool mAnswerReceived; - rtc::scoped_refptr mPeerConnection; - rtc::scoped_refptr mLocalStream; + webrtc::scoped_refptr mPeerConnection; + webrtc::scoped_refptr mLocalStream; // data std::vector mDataObserverList; - rtc::scoped_refptr mDataChannel; + webrtc::scoped_refptr mDataChannel; }; } diff --git a/indra/newview/CMakeLists.txt b/indra/newview/CMakeLists.txt index 42539796871..84219250af2 100644 --- a/indra/newview/CMakeLists.txt +++ b/indra/newview/CMakeLists.txt @@ -57,6 +57,11 @@ if (NOT HAVOK_TPV) # which means we need to duct tape this togther ... add_subdirectory(${LLPHYSICSEXTENSIONS_SRC_DIR} llphysicsextensions) + if (NOT "${LLPHYSICSEXTENSIONS_STUB_DIR}" STREQUAL "") + # for darwin universal builds we need both real llphysicsextensions and the stub for aarch64 fallback + # this will only be set when HAVOK is ON, otherwise the normal stub fallback will be in effect + add_subdirectory(${LLPHYSICSEXTENSIONS_STUB_DIR} llphysicsextensionsstub) + endif() # Another hack that works with newer cmake versions: cmake_policy( SET CMP0079 NEW) @@ -2058,6 +2063,12 @@ elseif (DARWIN) PROPERTIES RESOURCE SecondLife.xib LINK_FLAGS_RELEASE "${LINK_FLAGS_RELEASE} -Xlinker -dead_strip" + # arch specific flags for universal builds: https://stackoverflow.com/a/77942065 + XCODE_ATTRIBUTE_OTHER_CFLAGS[arch=x86_64] "$(inherited) -DLLPHYSICSEXTENSIONS_USE_FULL" + XCODE_ATTRIBUTE_OTHER_CFLAGS[arch=arm64] "$(inherited) -DLLPHYSICSEXTENSIONS_USE_STUB" + # only generate the .MAP file for llphysicsextensions_tpv on x86_64 + XCODE_ATTRIBUTE_OTHER_LDFLAGS[arch=x86_64] "$(inherited) -L${CMAKE_CURRENT_BINARY_DIR}/llphysicsextensions/$,$,${CMAKE_CFG_INTDIR}>/ -lllphysicsextensions -Xlinker -map -Xlinker ${CMAKE_CURRENT_BINARY_DIR}/${VIEWER_BINARY_NAME}.MAP" + XCODE_ATTRIBUTE_OTHER_LDFLAGS[arch=arm64] "$(inherited) -L${CMAKE_CURRENT_BINARY_DIR}/llphysicsextensionsstub/$,$,${CMAKE_CFG_INTDIR}>/ -lllphysicsextensionsstub" ) else (WINDOWS) # Linux diff --git a/indra/newview/llappviewer.cpp b/indra/newview/llappviewer.cpp index 1602b1cba5c..3bd45fdf668 100644 --- a/indra/newview/llappviewer.cpp +++ b/indra/newview/llappviewer.cpp @@ -3456,7 +3456,7 @@ LLSD LLAppViewer::getViewerInfo() const info["FONT_SIZE_ADJUSTMENT"] = gSavedSettings.getF32("FontScreenDPI"); info["UI_SCALE"] = gSavedSettings.getF32("UIScaleFactor"); info["DRAW_DISTANCE"] = gSavedSettings.getF32("RenderFarClip"); - info["NET_BANDWITH"] = gSavedSettings.getF32("ThrottleBandwidthKBPS"); + info["NET_BANDWITH"] = LLViewerThrottle::getMaxBandwidthKbps(); info["LOD_FACTOR"] = gSavedSettings.getF32("RenderVolumeLODFactor"); info["RENDER_QUALITY"] = (F32)gSavedSettings.getU32("RenderQualityPerformance"); info["TEXTURE_MEMORY"] = LLSD::Integer(gGLManager.mVRAM); diff --git a/indra/newview/llconversationview.cpp b/indra/newview/llconversationview.cpp index 07f58076627..0a434947580 100644 --- a/indra/newview/llconversationview.cpp +++ b/indra/newview/llconversationview.cpp @@ -542,7 +542,7 @@ void LLConversationViewSession::onCurrentVoiceSessionChanged(const LLUUID& sessi { bool old_value = mIsInActiveVoiceChannel; mIsInActiveVoiceChannel = vmi->getUUID() == session_id; - mCallIconLayoutPanel->setVisible(mIsInActiveVoiceChannel); + mCallIconLayoutPanel->setVisible(mIsInActiveVoiceChannel && !LLVoiceChannel::isSuspended()); if (old_value != mIsInActiveVoiceChannel) { refresh(); diff --git a/indra/newview/llfloatermarketplace.cpp b/indra/newview/llfloatermarketplace.cpp index 889daf84ab3..4abea64302b 100644 --- a/indra/newview/llfloatermarketplace.cpp +++ b/indra/newview/llfloatermarketplace.cpp @@ -27,10 +27,11 @@ #include "llviewerprecompiledheaders.h" #include "llfloatermarketplace.h" +#include "llviewercontrol.h" #include "lluictrlfactory.h" LLFloaterMarketplace::LLFloaterMarketplace(const LLSD& key) - : LLFloater(key) + : LLFloaterWebContent(key) { } @@ -38,10 +39,25 @@ LLFloaterMarketplace::~LLFloaterMarketplace() { } +// just to override LLFloaterWebContent +void LLFloaterMarketplace::onClose(bool app_quitting) +{ +} + bool LLFloaterMarketplace::postBuild() { - enableResizeCtrls(true, true, false); + LLFloaterWebContent::postBuild(); + mWebBrowser = getChild("marketplace_contents"); + mWebBrowser->addObserver(this); + return true; } - +void LLFloaterMarketplace::openMarketplace() +{ + std::string url = gSavedSettings.getString("MarketplaceURL"); + if (mCurrentURL != url) + { + mWebBrowser->navigateTo(url, HTTP_CONTENT_TEXT_HTML); + } +} diff --git a/indra/newview/llfloatermarketplace.h b/indra/newview/llfloatermarketplace.h index 2ae4d0d64a2..9524c94eeec 100644 --- a/indra/newview/llfloatermarketplace.h +++ b/indra/newview/llfloatermarketplace.h @@ -27,14 +27,20 @@ #pragma once #include "llfloater.h" +#include "llfloaterwebcontent.h" class LLFloaterMarketplace: - public LLFloater + public LLFloaterWebContent { friend class LLFloaterReg; + +public: + void openMarketplace(); + private: LLFloaterMarketplace(const LLSD& key); ~LLFloaterMarketplace(); bool postBuild() override; + void onClose(bool app_quitting) override; }; diff --git a/indra/newview/llfloatersearch.cpp b/indra/newview/llfloatersearch.cpp index 95870f0f913..7ee1b88f05b 100644 --- a/indra/newview/llfloatersearch.cpp +++ b/indra/newview/llfloatersearch.cpp @@ -42,15 +42,31 @@ class LLSearchHandler : public LLCommandHandler { // requires trusted browser to trigger LLSearchHandler() : LLCommandHandler("search", UNTRUSTED_CLICK_ONLY) { } bool handle(const LLSD& tokens, const LLSD& query_map, const std::string& grid, LLMediaCtrl* web) { + const size_t parts = tokens.size(); + + // get the (optional) category for the search + std::string collection; + if (parts > 0) + { + collection = tokens[0].asString(); + } + + // get the (optional) search string + std::string search_text; + if (parts > 1) + { + search_text = tokens[1].asString(); + } + // open the search floater and perform the requested search - LLFloaterReg::showInstance("search", tokens); + LLFloaterReg::showInstance("search", llsd::map("collection", collection,"query", search_text)); return true; } }; LLSearchHandler gSearchHandler; LLFloaterSearch::LLFloaterSearch(const LLSD& key) - : LLFloater(key) + : LLFloaterWebContent(key) { mSearchType.insert("standard"); mSearchType.insert("land"); @@ -70,6 +86,12 @@ LLFloaterSearch::~LLFloaterSearch() void LLFloaterSearch::onOpen(const LLSD& tokens) { initiateSearch(tokens); + mWebBrowser->setFocus(true); +} + +// just to override LLFloaterWebContent +void LLFloaterSearch::onClose(bool app_quitting) +{ } void LLFloaterSearch::initiateSearch(const LLSD& tokens) @@ -82,25 +104,11 @@ void LLFloaterSearch::initiateSearch(const LLSD& tokens) // substituted into the final URL using the logic from the original search. subs["TYPE"] = "standard"; - const size_t parts = tokens.size(); + std::string collection = tokens.has("collection") ? tokens["collection"].asString() : ""; - // get the (optional) category for the search - std::string collection; - if (parts > 0) - { - collection = tokens[0].asString(); - } - - // get the (optional) search string - std::string search_text; - if (parts > 1) - { - search_text = tokens[1].asString(); - } + std::string search_text = tokens.has("query") ? tokens["query"].asString() : ""; - // TODO: where does category get set? I cannot find a reference to - // it in internal docs - might be conflated with values in mSearchType - std::string category; + std::string category = tokens.has("category") ? tokens["category"].asString() : ""; if (mSearchType.find(category) != mSearchType.end()) { subs["TYPE"] = category; @@ -159,7 +167,11 @@ void LLFloaterSearch::initiateSearch(const LLSD& tokens) bool LLFloaterSearch::postBuild() { - enableResizeCtrls(true, true, false); + LLFloaterWebContent::postBuild(); + mWebBrowser = getChild("search_contents"); + mWebBrowser->addObserver(this); + getChildView("address")->setEnabled(false); + getChildView("popexternal")->setEnabled(false); // This call is actioned by the preload code in llViewerWindow // that creates the search floater during the login process diff --git a/indra/newview/llfloatersearch.h b/indra/newview/llfloatersearch.h index e8a2be4797c..6d93474f4a6 100644 --- a/indra/newview/llfloatersearch.h +++ b/indra/newview/llfloatersearch.h @@ -27,13 +27,15 @@ #pragma once #include "llfloater.h" +#include "llfloaterwebcontent.h" class LLFloaterSearch: - public LLFloater { + public LLFloaterWebContent { friend class LLFloaterReg; public: void onOpen(const LLSD& key) override; + void onClose(bool app_quitting) override; private: LLFloaterSearch(const LLSD& key); diff --git a/indra/newview/llinventoryfunctions.cpp b/indra/newview/llinventoryfunctions.cpp index 22e20a804f4..c8ddcfbf82b 100644 --- a/indra/newview/llinventoryfunctions.cpp +++ b/indra/newview/llinventoryfunctions.cpp @@ -3927,6 +3927,31 @@ void LLInventoryAction::fileUploadLocation(const LLUUID& dest_id, const std::str } } +bool LLInventoryAction::isFileUploadLocation(const LLUUID& dest_id, const std::string& action) +{ + if (action == "def_model") + { + return gInventory.findUserDefinedCategoryUUIDForType(LLFolderType::FT_OBJECT) == dest_id; + } + else if (action == "def_texture") + { + return gInventory.findUserDefinedCategoryUUIDForType(LLFolderType::FT_TEXTURE) == dest_id; + } + else if (action == "def_sound") + { + return gInventory.findUserDefinedCategoryUUIDForType(LLFolderType::FT_SOUND) == dest_id; + } + else if (action == "def_animation") + { + return gInventory.findUserDefinedCategoryUUIDForType(LLFolderType::FT_ANIMATION) == dest_id; + } + else if (action == "def_pbr_material") + { + return gInventory.findUserDefinedCategoryUUIDForType(LLFolderType::FT_MATERIAL) == dest_id; + } + return false; +} + void LLInventoryAction::onItemsRemovalConfirmation(const LLSD& notification, const LLSD& response, LLHandle root) { S32 option = LLNotificationsUtil::getSelectedOption(notification, response); diff --git a/indra/newview/llinventoryfunctions.h b/indra/newview/llinventoryfunctions.h index b95f1094913..09c191554f1 100644 --- a/indra/newview/llinventoryfunctions.h +++ b/indra/newview/llinventoryfunctions.h @@ -656,6 +656,7 @@ struct LLInventoryAction static void onItemsRemovalConfirmation(const LLSD& notification, const LLSD& response, LLHandle root); static void removeItemFromDND(LLFolderView* root); static void fileUploadLocation(const LLUUID& dest_id, const std::string& action); + static bool isFileUploadLocation(const LLUUID& dest_id, const std::string& action); static void saveMultipleTextures(const std::vector& filenames, std::set selected_items, LLInventoryModel* model); diff --git a/indra/newview/llinventorygallerymenu.cpp b/indra/newview/llinventorygallerymenu.cpp index 304e989f83a..0b6181de9b2 100644 --- a/indra/newview/llinventorygallerymenu.cpp +++ b/indra/newview/llinventorygallerymenu.cpp @@ -110,6 +110,7 @@ LLContextMenu* LLInventoryGalleryContextMenu::createMenu() registrar.add("Inventory.Share", boost::bind(&LLAvatarActions::shareWithAvatars, uuids, gFloaterView->getParentFloater(mGallery))); enable_registrar.add("Inventory.CanSetUploadLocation", boost::bind(&LLInventoryGalleryContextMenu::canSetUploadLocation, this, _2)); + enable_registrar.add("Inventory.FileUploadLocation.Check", boost::bind(&LLInventoryGalleryContextMenu::isUploadLocationSelected, this, _2)); enable_registrar.add("Inventory.EnvironmentEnabled", [](LLUICtrl*, const LLSD&) { @@ -489,6 +490,12 @@ void LLInventoryGalleryContextMenu::fileUploadLocation(const LLSD& userdata) LLInventoryAction::fileUploadLocation(mUUIDs.front(), param); } +bool LLInventoryGalleryContextMenu::isUploadLocationSelected(const LLSD& userdata) +{ + const std::string param = userdata.asString(); + return LLInventoryAction::isFileUploadLocation(mUUIDs.front(), param); +} + bool LLInventoryGalleryContextMenu::canSetUploadLocation(const LLSD& userdata) { if (mUUIDs.size() != 1) diff --git a/indra/newview/llinventorygallerymenu.h b/indra/newview/llinventorygallerymenu.h index 7c3545432b1..e90c7a19d25 100644 --- a/indra/newview/llinventorygallerymenu.h +++ b/indra/newview/llinventorygallerymenu.h @@ -47,6 +47,7 @@ class LLInventoryGalleryContextMenu : public LLListContextMenu void updateMenuItemsVisibility(LLContextMenu* menu); void fileUploadLocation(const LLSD& userdata); + bool isUploadLocationSelected(const LLSD& userdata); bool canSetUploadLocation(const LLSD& userdata); static void onRename(const LLSD& notification, const LLSD& response); diff --git a/indra/newview/llinventorypanel.cpp b/indra/newview/llinventorypanel.cpp index faec8824f64..db0af7f9ee5 100644 --- a/indra/newview/llinventorypanel.cpp +++ b/indra/newview/llinventorypanel.cpp @@ -191,6 +191,7 @@ LLInventoryPanel::LLInventoryPanel(const LLInventoryPanel::Params& p) : mCommitCallbackRegistrar.add("Inventory.BeginIMSession", boost::bind(&LLInventoryPanel::beginIMSession, this)); mCommitCallbackRegistrar.add("Inventory.Share", boost::bind(&LLAvatarActions::shareWithAvatars, this)); mCommitCallbackRegistrar.add("Inventory.FileUploadLocation", boost::bind(&LLInventoryPanel::fileUploadLocation, this, _2)); + mEnableCallbackRegistrar.add("Inventory.FileUploadLocation.Check", boost::bind(&LLInventoryPanel::isUploadLocationSelected, this, _2)); mCommitCallbackRegistrar.add("Inventory.OpenNewFolderWindow", boost::bind(&LLInventoryPanel::openSingleViewInventory, this, LLUUID())); } @@ -1872,6 +1873,13 @@ void LLInventoryPanel::fileUploadLocation(const LLSD& userdata) LLInventoryAction::fileUploadLocation(dest, param); } +bool LLInventoryPanel::isUploadLocationSelected(const LLSD& userdata) +{ + const std::string param = userdata.asString(); + const LLUUID dest = LLFolderBridge::sSelf.get()->getUUID(); + return LLInventoryAction::isFileUploadLocation(dest, param); +} + void LLInventoryPanel::openSingleViewInventory(LLUUID folder_id) { LLPanelMainInventory::newFolderWindow(folder_id.isNull() ? LLFolderBridge::sSelf.get()->getUUID() : folder_id); diff --git a/indra/newview/llinventorypanel.h b/indra/newview/llinventorypanel.h index 473283352ff..50333709fc6 100644 --- a/indra/newview/llinventorypanel.h +++ b/indra/newview/llinventorypanel.h @@ -225,6 +225,7 @@ class LLInventoryPanel : public LLPanel void doCreate(const LLSD& userdata); bool beginIMSession(); void fileUploadLocation(const LLSD& userdata); + bool isUploadLocationSelected(const LLSD& userdata); void openSingleViewInventory(LLUUID folder_id = LLUUID()); void purgeSelectedItems(); bool attachObject(const LLSD& userdata); diff --git a/indra/newview/llreflectionmapmanager.cpp b/indra/newview/llreflectionmapmanager.cpp index f6e43e13dc4..5840cc52886 100644 --- a/indra/newview/llreflectionmapmanager.cpp +++ b/indra/newview/llreflectionmapmanager.cpp @@ -998,11 +998,11 @@ void LLReflectionMapManager::updateProbeFace(LLReflectionMap* probe, U32 face) mTexture->bind(channel); } } + + gIrradianceGenProgram.unbind(); } mMipChain[0].flush(); - - gIrradianceGenProgram.unbind(); } } diff --git a/indra/newview/llspeakingindicatormanager.cpp b/indra/newview/llspeakingindicatormanager.cpp index 532b245cedc..06458a9f3ce 100644 --- a/indra/newview/llspeakingindicatormanager.cpp +++ b/indra/newview/llspeakingindicatormanager.cpp @@ -200,8 +200,17 @@ void SpeakingIndicatorManager::cleanupSingleton() void SpeakingIndicatorManager::sOnCurrentChannelChanged(const LLUUID& /*session_id*/) { - switchSpeakerIndicators(mSwitchedIndicatorsOn, false); - mSwitchedIndicatorsOn.clear(); + if (LLVoiceChannel::isSuspended()) + { + switchSpeakerIndicators(mSwitchedIndicatorsOn, false); + mSwitchedIndicatorsOn.clear(); + } + else + { + // Multiple onParticipantsChanged can arrive at the same time + // from different sources, might want to filter by some factor. + onParticipantsChanged(); + } } void SpeakingIndicatorManager::onParticipantsChanged() diff --git a/indra/newview/llstatusbar.cpp b/indra/newview/llstatusbar.cpp index 412eaa18657..662ab7e6a01 100644 --- a/indra/newview/llstatusbar.cpp +++ b/indra/newview/llstatusbar.cpp @@ -41,6 +41,7 @@ #include "llpanelpresetscamerapulldown.h" #include "llpanelpresetspulldown.h" #include "llpanelvolumepulldown.h" +#include "llfloatermarketplace.h" #include "llfloaterregioninfo.h" #include "llfloaterscriptdebug.h" #include "llhints.h" @@ -521,7 +522,11 @@ void LLStatusBar::onClickBuyCurrency() void LLStatusBar::onClickShop() { - LLFloaterReg::toggleInstanceOrBringToFront("marketplace"); + LLFloaterReg::showInstanceOrBringToFront("marketplace"); + if (LLFloaterMarketplace* marketplace = LLFloaterReg::getTypedInstance("marketplace")) + { + marketplace->openMarketplace(); + } } void LLStatusBar::onMouseEnterPresetsCamera() diff --git a/indra/newview/llviewermenufile.cpp b/indra/newview/llviewermenufile.cpp index 99aacc92a02..5a0802cfbcb 100644 --- a/indra/newview/llviewermenufile.cpp +++ b/indra/newview/llviewermenufile.cpp @@ -912,7 +912,23 @@ class LLFileUploadModel : public view_listener_t { bool handleEvent(const LLSD& userdata) { - LLFloaterModelPreview::showModelPreview(); + if (LLConvexDecomposition::isFunctional()) + { + LLFloaterModelPreview::showModelPreview(); + } + else + { + if (gGLManager.mIsApple) + { + LLNotificationsUtil::add("ModelUploaderMissingPhysicsApple"); + } + else + { + // TPV? + LLNotificationsUtil::add("ModelUploaderMissingPhysics"); + LLFloaterModelPreview::showModelPreview(); + } + } return true; } }; diff --git a/indra/newview/llviewermessage.cpp b/indra/newview/llviewermessage.cpp index cf329456fd6..e397fd90184 100644 --- a/indra/newview/llviewermessage.cpp +++ b/indra/newview/llviewermessage.cpp @@ -3686,13 +3686,10 @@ void send_agent_update(bool force_send, bool send_reliable) static F32 last_draw_disatance_step = 1024; F32 memory_limited_draw_distance = gAgentCamera.mDrawDistance; - if (LLViewerTexture::sDesiredDiscardBias > 2.f && LLViewerTexture::isSystemMemoryLow()) + if (LLViewerTexture::isSystemMemoryCritical()) { // If we are low on memory, reduce requested draw distance - // Discard's bias is clamped to 4 so we need to check 2 to 4 range - // Factor is intended to go from 1.0 to 2.0 - F32 factor = 1.f + (LLViewerTexture::sDesiredDiscardBias - 2.f) / 2.f; - memory_limited_draw_distance = llmax(gAgentCamera.mDrawDistance / factor, gAgentCamera.mDrawDistance / 2.f); + memory_limited_draw_distance = llmax(gAgentCamera.mDrawDistance / LLViewerTexture::getSystemMemoryBudgetFactor(), gAgentCamera.mDrawDistance / 2.f); } if (tp_state == LLAgent::TELEPORT_ARRIVING || LLStartUp::getStartupState() < STATE_MISC) diff --git a/indra/newview/llviewertexture.cpp b/indra/newview/llviewertexture.cpp index 326b1bfd996..10203ccff6f 100644 --- a/indra/newview/llviewertexture.cpp +++ b/indra/newview/llviewertexture.cpp @@ -658,23 +658,35 @@ U32Megabytes LLViewerTexture::getFreeSystemMemory() return physical_res; } -//static -bool LLViewerTexture::isSystemMemoryLow() +S32Megabytes get_render_free_main_memory_treshold() { static LLCachedControl min_free_main_memory(gSavedSettings, "RenderMinFreeMainMemoryThreshold", 512); const U32Megabytes MIN_FREE_MAIN_MEMORY(min_free_main_memory); - return getFreeSystemMemory() < MIN_FREE_MAIN_MEMORY; + return MIN_FREE_MAIN_MEMORY; +} + +//static +bool LLViewerTexture::isSystemMemoryLow() +{ + return getFreeSystemMemory() < get_render_free_main_memory_treshold(); +} + +//static +bool LLViewerTexture::isSystemMemoryCritical() +{ + return getFreeSystemMemory() < get_render_free_main_memory_treshold() / 2; } F32 LLViewerTexture::getSystemMemoryBudgetFactor() { - static LLCachedControl min_free_main_memory(gSavedSettings, "RenderMinFreeMainMemoryThreshold", 512); - const S32Megabytes MIN_FREE_MAIN_MEMORY(min_free_main_memory); + const S32Megabytes MIN_FREE_MAIN_MEMORY(get_render_free_main_memory_treshold() / 2); S32 free_budget = (S32Megabytes)getFreeSystemMemory() - MIN_FREE_MAIN_MEMORY; if (free_budget < 0) { - // Result should range from 1 (0 free budget) to 2 (-512 free budget) - return 1.f - free_budget / MIN_FREE_MAIN_MEMORY; + // Leave some padding, otherwise we will crash out of memory before hitting factor 2. + const S32Megabytes PAD_BUFFER(32); + // Result should range from 1 at 0 free budget to 2 at -224 free budget, 2.14 at -256MB + return 1.f - free_budget / (MIN_FREE_MAIN_MEMORY - PAD_BUFFER); } return 1.f; } diff --git a/indra/newview/llviewertexture.h b/indra/newview/llviewertexture.h index 973d08f3648..406a136c9b1 100644 --- a/indra/newview/llviewertexture.h +++ b/indra/newview/llviewertexture.h @@ -114,6 +114,7 @@ class LLViewerTexture : public LLGLTexture static void initClass(); static void updateClass(); static bool isSystemMemoryLow(); + static bool isSystemMemoryCritical(); static F32 getSystemMemoryBudgetFactor(); LLViewerTexture(bool usemipmaps = true); diff --git a/indra/newview/llvocache.cpp b/indra/newview/llvocache.cpp index a8772d17950..dc356737ba5 100644 --- a/indra/newview/llvocache.cpp +++ b/indra/newview/llvocache.cpp @@ -488,13 +488,11 @@ void LLVOCacheEntry::updateDebugSettings() static const F32 MIN_RADIUS = 1.0f; F32 draw_radius = gAgentCamera.mDrawDistance; - if (LLViewerTexture::sDesiredDiscardBias > 2.f && LLViewerTexture::isSystemMemoryLow()) + if (LLViewerTexture::isSystemMemoryCritical()) { - // Discard's bias maximum is 4 so we need to check 2 to 4 range // Factor is intended to go from 1.0 to 2.0 - F32 factor = 1.f + (LLViewerTexture::sDesiredDiscardBias - 2.f) / 2.f; // For safety cap reduction at 50%, we don't want to go below half of draw distance - draw_radius = llmax(draw_radius / factor, draw_radius / 2.f); + draw_radius = llmax(draw_radius / LLViewerTexture::getSystemMemoryBudgetFactor(), draw_radius / 2.f); } const F32 clamped_min_radius = llclamp((F32) min_radius, MIN_RADIUS, draw_radius); // [1, mDrawDistance] sNearRadius = MIN_RADIUS + ((clamped_min_radius - MIN_RADIUS) * adjust_factor); diff --git a/indra/newview/llvoicechannel.cpp b/indra/newview/llvoicechannel.cpp index b3ac28eb7a6..fbe896ac27d 100644 --- a/indra/newview/llvoicechannel.cpp +++ b/indra/newview/llvoicechannel.cpp @@ -357,6 +357,8 @@ void LLVoiceChannel::suspend() { sSuspendedVoiceChannel = sCurrentVoiceChannel; sSuspended = true; + + sCurrentVoiceChannelChangedSignal(sSuspendedVoiceChannel->mSessionID); } } @@ -365,6 +367,7 @@ void LLVoiceChannel::resume() { if (sSuspended) { + sSuspended = false; // needs to be before activate() so that observers will be able to read state if (LLVoiceClient::getInstance()->voiceEnabled()) { if (sSuspendedVoiceChannel) @@ -382,7 +385,6 @@ void LLVoiceChannel::resume() LLVoiceChannelProximal::getInstance()->activate(); } } - sSuspended = false; } } diff --git a/indra/newview/llvoicechannel.h b/indra/newview/llvoicechannel.h index 4d7bf551e12..bf119638d38 100644 --- a/indra/newview/llvoicechannel.h +++ b/indra/newview/llvoicechannel.h @@ -103,6 +103,7 @@ class LLVoiceChannel : public LLVoiceClientStatusObserver static void suspend(); static void resume(); + static bool isSuspended() { return sSuspended; } protected: virtual void setState(EState state); diff --git a/indra/newview/llvoiceclient.cpp b/indra/newview/llvoiceclient.cpp index 3edd2b473c3..71a9e71a9fd 100644 --- a/indra/newview/llvoiceclient.cpp +++ b/indra/newview/llvoiceclient.cpp @@ -292,7 +292,14 @@ void LLVoiceClient::setHidden(bool hidden) void LLVoiceClient::terminate() { - if (mSpatialVoiceModule) mSpatialVoiceModule->terminate(); + if (LLVivoxVoiceClient::instanceExists()) + { + LLWebRTCVoiceClient::getInstance()->terminate(); + } + if (LLVivoxVoiceClient::instanceExists()) + { + LLVivoxVoiceClient::getInstance()->terminate(); + } mSpatialVoiceModule = NULL; m_servicePump = NULL; diff --git a/indra/newview/llvoicewebrtc.cpp b/indra/newview/llvoicewebrtc.cpp index b71d502d571..f500fb69e81 100644 --- a/indra/newview/llvoicewebrtc.cpp +++ b/indra/newview/llvoicewebrtc.cpp @@ -82,9 +82,15 @@ const std::string WEBRTC_VOICE_SERVER_TYPE = "webrtc"; namespace { - const F32 MAX_AUDIO_DIST = 50.0f; - //const F32 VOLUME_SCALE_WEBRTC = 0.01f; - const F32 LEVEL_SCALE_WEBRTC = 0.008f; + const F32 MAX_AUDIO_DIST = 50.0f; + const F32 VOLUME_SCALE_WEBRTC = 0.01f; + const F32 TUNING_LEVEL_SCALE = 0.01f; + const F32 TUNING_LEVEL_START_POINT = 0.8f; + const F32 LEVEL_SCALE = 0.005f; + const F32 LEVEL_START_POINT = 0.18f; + const uint32_t SET_HIDDEN_RESTORE_DELAY_MS = 200; // 200 ms to unmute again after hiding during teleport + const uint32_t MUTE_FADE_DELAY_MS = 500; // 20ms fade followed by 480ms silence gets rid of the click just after unmuting. + // This is because the buffers and processing is cleared by the silence. const F32 SPEAKING_AUDIO_LEVEL = 0.30; @@ -201,7 +207,6 @@ bool LLWebRTCVoiceClient::sShuttingDown = false; LLWebRTCVoiceClient::LLWebRTCVoiceClient() : mHidden(false), - mTuningMode(false), mTuningMicGain(0.0), mTuningSpeakerVolume(50), // Set to 50 so the user can hear themselves when he sets his mic volume mDevicesListUpdated(false), @@ -283,6 +288,8 @@ void LLWebRTCVoiceClient::terminate() return; } + LL_INFOS("Voice") << "Terminating WebRTC" << LL_ENDL; + mVoiceEnabled = false; llwebrtc::terminate(); @@ -348,25 +355,45 @@ void LLWebRTCVoiceClient::updateSettings() static LLCachedControl sOutputDevice(gSavedSettings, "VoiceOutputAudioDevice"); setRenderDevice(sOutputDevice); - LL_INFOS("Voice") << "Input device: " << std::quoted(sInputDevice()) << ", output device: " << std::quoted(sOutputDevice()) << LL_ENDL; + LL_INFOS("Voice") << "Input device: " << std::quoted(sInputDevice()) << ", output device: " << std::quoted(sOutputDevice()) + << LL_ENDL; static LLCachedControl sMicLevel(gSavedSettings, "AudioLevelMic"); setMicGain(sMicLevel); llwebrtc::LLWebRTCDeviceInterface::AudioConfig config; + bool audioConfigChanged = false; + static LLCachedControl sEchoCancellation(gSavedSettings, "VoiceEchoCancellation", true); - config.mEchoCancellation = sEchoCancellation; + if (sEchoCancellation != config.mEchoCancellation) + { + config.mEchoCancellation = sEchoCancellation; + audioConfigChanged = true; + } static LLCachedControl sAGC(gSavedSettings, "VoiceAutomaticGainControl", true); - config.mAGC = sAGC; + if (sAGC != config.mAGC) + { + config.mAGC = sAGC; + audioConfigChanged = true; + } - static LLCachedControl sNoiseSuppressionLevel(gSavedSettings, + static LLCachedControl sNoiseSuppressionLevel( + gSavedSettings, "VoiceNoiseSuppressionLevel", llwebrtc::LLWebRTCDeviceInterface::AudioConfig::ENoiseSuppressionLevel::NOISE_SUPPRESSION_LEVEL_VERY_HIGH); - config.mNoiseSuppressionLevel = (llwebrtc::LLWebRTCDeviceInterface::AudioConfig::ENoiseSuppressionLevel)(U32)sNoiseSuppressionLevel; - - mWebRTCDeviceInterface->setAudioConfig(config); + auto noiseSuppressionLevel = + (llwebrtc::LLWebRTCDeviceInterface::AudioConfig::ENoiseSuppressionLevel)(U32)sNoiseSuppressionLevel; + if (noiseSuppressionLevel != config.mNoiseSuppressionLevel) + { + config.mNoiseSuppressionLevel = noiseSuppressionLevel; + audioConfigChanged = true; + } + if (audioConfigChanged) + { + mWebRTCDeviceInterface->setAudioConfig(config); + } } } @@ -664,7 +691,10 @@ LLVoiceDeviceList& LLWebRTCVoiceClient::getCaptureDevices() void LLWebRTCVoiceClient::setCaptureDevice(const std::string& name) { - mWebRTCDeviceInterface->setCaptureDevice(name); + if (mWebRTCDeviceInterface) + { + mWebRTCDeviceInterface->setCaptureDevice(name); + } } void LLWebRTCVoiceClient::setDevicesListUpdated(bool state) { @@ -695,21 +725,38 @@ void LLWebRTCVoiceClient::OnDevicesChangedImpl(const llwebrtc::LLWebRTCVoiceDevi std::string outputDevice = gSavedSettings.getString("VoiceOutputAudioDevice"); LL_DEBUGS("Voice") << "Setting devices to-input: '" << inputDevice << "' output: '" << outputDevice << "'" << LL_ENDL; - clearRenderDevices(); - for (auto &device : render_devices) + + // only set the render device if the device list has changed. + if (mRenderDevices.size() != render_devices.size() || !std::equal(mRenderDevices.begin(), + mRenderDevices.end(), + render_devices.begin(), + [](const LLVoiceDevice& a, const llwebrtc::LLWebRTCVoiceDevice& b) { + return a.display_name == b.mDisplayName && a.full_name == b.mID; })) { - addRenderDevice(LLVoiceDevice(device.mDisplayName, device.mID)); + clearRenderDevices(); + for (auto& device : render_devices) + { + addRenderDevice(LLVoiceDevice(device.mDisplayName, device.mID)); + } + setRenderDevice(outputDevice); } - setRenderDevice(outputDevice); - clearCaptureDevices(); - for (auto &device : capture_devices) + // only set the capture device if the device list has changed. + if (mCaptureDevices.size() != capture_devices.size() ||!std::equal(mCaptureDevices.begin(), + mCaptureDevices.end(), + capture_devices.begin(), + [](const LLVoiceDevice& a, const llwebrtc::LLWebRTCVoiceDevice& b) + { return a.display_name == b.mDisplayName && a.full_name == b.mID; })) { - LL_DEBUGS("Voice") << "Checking capture device:'" << device.mID << "'" << LL_ENDL; + clearCaptureDevices(); + for (auto& device : capture_devices) + { + LL_DEBUGS("Voice") << "Checking capture device:'" << device.mID << "'" << LL_ENDL; - addCaptureDevice(LLVoiceDevice(device.mDisplayName, device.mID)); + addCaptureDevice(LLVoiceDevice(device.mDisplayName, device.mID)); + } + setCaptureDevice(inputDevice); } - setCaptureDevice(inputDevice); setDevicesListUpdated(true); } @@ -734,7 +781,10 @@ LLVoiceDeviceList& LLWebRTCVoiceClient::getRenderDevices() void LLWebRTCVoiceClient::setRenderDevice(const std::string& name) { - mWebRTCDeviceInterface->setRenderDevice(name); + if (mWebRTCDeviceInterface) + { + mWebRTCDeviceInterface->setRenderDevice(name); + } } void LLWebRTCVoiceClient::tuningStart() @@ -762,7 +812,14 @@ bool LLWebRTCVoiceClient::inTuningMode() void LLWebRTCVoiceClient::tuningSetMicVolume(float volume) { - mTuningMicGain = volume; + if (volume != mTuningMicGain) + { + mTuningMicGain = volume; + if (mWebRTCDeviceInterface) + { + mWebRTCDeviceInterface->setTuningMicGain(volume); + } + } } void LLWebRTCVoiceClient::tuningSetSpeakerVolume(float volume) @@ -774,21 +831,10 @@ void LLWebRTCVoiceClient::tuningSetSpeakerVolume(float volume) } } -float LLWebRTCVoiceClient::getAudioLevel() -{ - if (mIsInTuningMode) - { - return (1.0f - mWebRTCDeviceInterface->getTuningAudioLevel() * LEVEL_SCALE_WEBRTC) * mTuningMicGain / 2.1f; - } - else - { - return (1.0f - mWebRTCDeviceInterface->getPeerConnectionAudioLevel() * LEVEL_SCALE_WEBRTC) * mMicGain / 2.1f; - } -} - float LLWebRTCVoiceClient::tuningGetEnergy(void) { - return getAudioLevel(); + float rms = mWebRTCDeviceInterface->getTuningAudioLevel(); + return TUNING_LEVEL_START_POINT - TUNING_LEVEL_SCALE * rms; } bool LLWebRTCVoiceClient::deviceSettingsAvailable() @@ -824,6 +870,11 @@ void LLWebRTCVoiceClient::setHidden(bool hidden) if (inSpatialChannel()) { + if (mWebRTCDeviceInterface) + { + mWebRTCDeviceInterface->setMute(mHidden || mMuteMic, + mHidden ? 0 : SET_HIDDEN_RESTORE_DELAY_MS); // delay 200ms so as to not pile up mutes/unmutes. + } if (mHidden) { // get out of the channel entirely @@ -990,7 +1041,6 @@ void LLWebRTCVoiceClient::updatePosition(void) { if (participant->mRegion != region->getRegionID()) { participant->mRegion = region->getRegionID(); - setMuteMic(mMuteMic); } } } @@ -1115,13 +1165,14 @@ void LLWebRTCVoiceClient::sendPositionUpdate(bool force) // Update our own volume on our participant, so it'll show up // in the UI. This is done on all sessions, so switching // sessions retains consistent volume levels. -void LLWebRTCVoiceClient::updateOwnVolume() { - F32 audio_level = 0.0; - if (!mMuteMic && !mTuningMode) +void LLWebRTCVoiceClient::updateOwnVolume() +{ + F32 audio_level = 0.0f; + if (!mMuteMic) { - audio_level = getAudioLevel(); + float rms = mWebRTCDeviceInterface->getPeerConnectionAudioLevel(); + audio_level = LEVEL_START_POINT - LEVEL_SCALE * rms; } - sessionState::for_each(boost::bind(predUpdateOwnVolume, _1, audio_level)); } @@ -1518,6 +1569,17 @@ void LLWebRTCVoiceClient::setMuteMic(bool muted) } mMuteMic = muted; + + if (mIsInTuningMode) + { + return; + } + + if (mWebRTCDeviceInterface) + { + mWebRTCDeviceInterface->setMute(muted, muted ? MUTE_FADE_DELAY_MS : 0); // delay for 40ms on mute to allow buffers to empty + } + // when you're hidden, your mic is always muted. if (!mHidden) { @@ -1556,7 +1618,10 @@ void LLWebRTCVoiceClient::setMicGain(F32 gain) if (gain != mMicGain) { mMicGain = gain; - mWebRTCDeviceInterface->setPeerConnectionGain(gain); + if (mWebRTCDeviceInterface) + { + mWebRTCDeviceInterface->setMicGain(gain); + } } } diff --git a/indra/newview/llvoicewebrtc.h b/indra/newview/llvoicewebrtc.h index 71347f206a5..722d81fdc2b 100644 --- a/indra/newview/llvoicewebrtc.h +++ b/indra/newview/llvoicewebrtc.h @@ -444,10 +444,6 @@ class LLWebRTCVoiceClient : public LLSingleton, private: - // helper function to retrieve the audio level - // Used in multiple places. - float getAudioLevel(); - // Coroutine support methods //--- void voiceConnectionCoro(); @@ -458,7 +454,6 @@ class LLWebRTCVoiceClient : public LLSingleton, LL::WorkQueue::weak_t mMainQueue; - bool mTuningMode; F32 mTuningMicGain; int mTuningSpeakerVolume; bool mDevicesListUpdated; // set to true when the device list has been updated diff --git a/indra/newview/skins/default/xui/en/floater_marketplace.xml b/indra/newview/skins/default/xui/en/floater_marketplace.xml index 2299e02c63e..99fb3a1ad82 100644 --- a/indra/newview/skins/default/xui/en/floater_marketplace.xml +++ b/indra/newview/skins/default/xui/en/floater_marketplace.xml @@ -1,26 +1,201 @@ - + legacy_header_height="18" + can_minimize="true" + can_close="true" + can_resize="true" + height="775" + layout="topleft" + min_height="500" + min_width="600" + name="Marketplace" + save_rect="true" + single_instance="true" + save_visibility="true" + title="MARKETPLACE" + tab_stop="true" + width="780"> + + + + + + + + + + + + + + + + + + + + diff --git a/indra/newview/skins/default/xui/en/floater_search.xml b/indra/newview/skins/default/xui/en/floater_search.xml index 76a486e211f..43c4aa1b9de 100644 --- a/indra/newview/skins/default/xui/en/floater_search.xml +++ b/indra/newview/skins/default/xui/en/floater_search.xml @@ -1,26 +1,202 @@ - + legacy_header_height="18" + can_minimize="true" + can_close="true" + can_resize="true" + height="775" + layout="topleft" + min_height="500" + min_width="600" + name="Search" + save_rect="true" + single_instance="true" + save_visibility="true" + title="SEARCH" + tab_stop="true" + width="780"> + + + + + + + + + + + + + + + + + + + + diff --git a/indra/newview/skins/default/xui/en/menu_gallery_inventory.xml b/indra/newview/skins/default/xui/en/menu_gallery_inventory.xml index d8090070bda..3cfe2e0e6ff 100644 --- a/indra/newview/skins/default/xui/en/menu_gallery_inventory.xml +++ b/indra/newview/skins/default/xui/en/menu_gallery_inventory.xml @@ -686,51 +686,6 @@ function="Inventory.DoToSelected" parameter="remove_from_favorites" /> - - - - - - - - - - - - - - - - - - - - - - - + + - - - - + + - - - - + + - - - - + + - - + + - - - - + + - - - + + - - - + + - - - + + - - + + fail + +Model upload is not yet available on Apple Silicon, but will be supported in an upcoming release. + +Workaround: Right-click the Second Life app in Finder, select +"Get Info", then check "Open using Rosetta" + fail + + + +Physics library is not present, some of the model uploader's functionality might not work or might not work correctly. + fail + +