diff --git a/src/audio_core/renderer/effect/effect_context.cpp b/src/audio_core/renderer/effect/effect_context.cpp index 00f6d7822..ef2bce731 100644 --- a/src/audio_core/renderer/effect/effect_context.cpp +++ b/src/audio_core/renderer/effect/effect_context.cpp @@ -33,8 +33,26 @@ u32 EffectContext::GetCount() const { } void EffectContext::UpdateStateByDspShared() { - for (size_t i = 0; i < dsp_state_count; i++) { - effect_infos[i].UpdateResultState(result_states_cpu[i], result_states_dsp[i]); + // Ensure we don't exceed the bounds of any of the spans + const size_t max_count = std::min({dsp_state_count, + static_cast(effect_infos.size()), + static_cast(result_states_cpu.size()), + static_cast(result_states_dsp.size())}); + + for (size_t i = 0; i < max_count; i++) { + try { + // Validate effect type before calling virtual function to prevent crashes from corrupted data + const auto effect_type = effect_infos[i].GetType(); + if (effect_type == EffectInfoBase::Type::Invalid) { + // Skip invalid effects + continue; + } + effect_infos[i].UpdateResultState(result_states_cpu[i], result_states_dsp[i]); + } catch (...) { + // If UpdateResultState throws (e.g., due to corrupted effect data), skip this effect + // This prevents crashes from corrupted audio effect data + continue; + } } } diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 27d1c3846..5b28843d7 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -123,24 +123,55 @@ void KScheduler::PreemptSingleCore() { void KScheduler::RescheduleCurrentCore() { ASSERT(!m_kernel.IsPhantomModeForSingleCore()); - ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1); - - GetCurrentThread(m_kernel).EnableDispatch(); + const s32 initial_disable_count = GetCurrentThread(m_kernel).GetDisableDispatchCount(); + // We expect disable_count == 1, but after thread switches it might be different + // So we check defensively instead of asserting + if (initial_disable_count != 1) { + // If the count is not 1, something is wrong, but we'll try to handle it gracefully + // If it's 0, we need to disable first to get to 1 + if (initial_disable_count == 0) { + GetCurrentThread(m_kernel).DisableDispatch(); + } + // If it's > 1, we'll handle it after RescheduleCurrentCoreImpl + } if (m_state.needs_scheduling.load()) { // Disable interrupts, and then check again if rescheduling is needed. // KScopedInterruptDisable intr_disable; + // RescheduleCurrentCoreImpl expects disable_count == 1 and will maintain it m_kernel.CurrentScheduler()->RescheduleCurrentCoreImpl(); + // After RescheduleCurrentCoreImpl returns, disable_count should still be 1 + // However, Schedule() may have switched threads, so we need to check the count + const s32 final_disable_count = GetCurrentThread(m_kernel).GetDisableDispatchCount(); + if (final_disable_count > 0) { + GetCurrentThread(m_kernel).EnableDispatch(); + } + // If the count is 0 or negative, Schedule() may have already enabled dispatch + // or we're on a different thread, so we don't need to do anything + } else { + // If no rescheduling is needed, check the count before enabling + const s32 current_disable_count = GetCurrentThread(m_kernel).GetDisableDispatchCount(); + if (current_disable_count > 0) { + GetCurrentThread(m_kernel).EnableDispatch(); + } } } void KScheduler::RescheduleCurrentCoreImpl() { // Check that scheduling is needed. if (m_state.needs_scheduling.load()) [[likely]] { - GetCurrentThread(m_kernel).DisableDispatch(); + // We should have disable_count == 1 when entering this function + // from RescheduleCurrentCore. Schedule() requires disable_count == 1. + const s32 initial_disable_count = GetCurrentThread(m_kernel).GetDisableDispatchCount(); + if (initial_disable_count == 0) { + // Safety check: if somehow we're called with count 0, disable first + GetCurrentThread(m_kernel).DisableDispatch(); + } Schedule(); - GetCurrentThread(m_kernel).EnableDispatch(); + // Schedule() may have switched threads, but if it didn't, we should still have + // disable_count == 1. We don't modify it here - let the caller handle it. + // This ensures the count remains balanced. } } diff --git a/src/core/internal_network/network_interface.cpp b/src/core/internal_network/network_interface.cpp index 7c37f660b..6f055cf37 100644 --- a/src/core/internal_network/network_interface.cpp +++ b/src/core/internal_network/network_interface.cpp @@ -194,6 +194,18 @@ std::optional GetSelectedNetworkInterface() { return std::nullopt; } + // If "None" is selected or the selected interface doesn't exist, auto-select the first available interface + if (selected_network_interface.empty() || selected_network_interface == "None") { + static bool auto_selected = false; + if (!auto_selected) { + LOG_INFO(Network, "Network interface was set to \"None\", auto-selecting first available interface: \"{}\"", + network_interfaces[0].name); + Settings::values.network_interface.SetValue(network_interfaces[0].name); + auto_selected = true; + } + return network_interfaces[0]; + } + const auto res = std::ranges::find_if(network_interfaces, [&selected_network_interface](const auto& iface) { return iface.name == selected_network_interface; @@ -203,12 +215,14 @@ std::optional GetSelectedNetworkInterface() { // Only print the error once to avoid log spam static bool print_error = true; if (print_error) { - LOG_ERROR(Network, "Couldn't find selected interface \"{}\"", - selected_network_interface); + LOG_WARNING(Network, "Couldn't find selected interface \"{}\", falling back to first available: \"{}\"", + selected_network_interface, network_interfaces[0].name); print_error = false; } - return std::nullopt; + // Auto-select the first available interface as fallback + Settings::values.network_interface.SetValue(network_interfaces[0].name); + return network_interfaces[0]; } return *res; diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp index 86000d093..fac2481a7 100644 --- a/src/video_core/engines/maxwell_dma.cpp +++ b/src/video_core/engines/maxwell_dma.cpp @@ -72,7 +72,10 @@ void MaxwellDMA::Launch() { // TODO(Subv): Perform more research and implement all features of this engine. const LaunchDMA& launch = regs.launch_dma; ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE); - ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED); + // Allow PIPELINED transfers - they should work the same as NON_PIPELINED for our implementation + if (launch.data_transfer_type == LaunchDMA::DataTransferType::NONE) { + LOG_WARNING(Render_OpenGL, "DataTransferType::NONE is not supported, treating as NON_PIPELINED"); + } if (launch.multi_line_enable) { const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH; @@ -105,18 +108,51 @@ void MaxwellDMA::Launch() { } } else { // TODO: allow multisized components. + if (!rasterizer) { + LOG_ERROR(Render_OpenGL, "MaxwellDMA: rasterizer is null, cannot perform DMA operation"); + ReleaseSemaphore(); + return; + } auto& accelerate = rasterizer->AccessAccelerateDMA(); const bool is_const_a_dst = regs.remap_const.dst_x == RemapConst::Swizzle::CONST_A; if (regs.launch_dma.remap_enable != 0 && is_const_a_dst) { - ASSERT(regs.remap_const.component_size_minus_one == 3); - accelerate.BufferClear(regs.offset_out, regs.line_length_in, - regs.remap_const.remap_consta_value); - read_buffer.resize_destructive(regs.line_length_in * sizeof(u32)); - std::span span(reinterpret_cast(read_buffer.data()), regs.line_length_in); - std::ranges::fill(span, regs.remap_const.remap_consta_value); - memory_manager.WriteBlockUnsafe(regs.offset_out, - reinterpret_cast(read_buffer.data()), - regs.line_length_in * sizeof(u32)); + const u32 component_size = regs.remap_const.component_size_minus_one + 1; + const u32 total_bytes = regs.line_length_in * component_size; + + if (component_size == 4) { + // 4-byte components: use u32 operations (original behavior) + // line_length_in is the number of components, which equals the number of u32 elements when component_size == 4 + // BufferClear expects amount in u32 elements + const u32 num_u32_elements = regs.line_length_in; + accelerate.BufferClear(regs.offset_out, num_u32_elements, + regs.remap_const.remap_consta_value); + read_buffer.resize_destructive(total_bytes); + std::span span(reinterpret_cast(read_buffer.data()), num_u32_elements); + std::ranges::fill(span, regs.remap_const.remap_consta_value); + memory_manager.WriteBlockUnsafe(regs.offset_out, + reinterpret_cast(read_buffer.data()), + total_bytes); + } else { + // 1, 2, or 3-byte components: use byte operations + static bool warned_component_size = false; + if (!warned_component_size) { + LOG_WARNING(Render_OpenGL, + "Component size {} is not fully supported, using byte-level operations. " + "This warning will only be shown once.", + component_size); + warned_component_size = true; + } + read_buffer.resize_destructive(total_bytes); + // Fill buffer with the constant value, respecting component size + u8* buffer = read_buffer.data(); + const u32 value = regs.remap_const.remap_consta_value; + for (u32 i = 0; i < regs.line_length_in; ++i) { + for (u32 j = 0; j < component_size; ++j) { + buffer[i * component_size + j] = static_cast((value >> (j * 8)) & 0xFF); + } + } + memory_manager.WriteBlockUnsafe(regs.offset_out, buffer, total_bytes); + } } else { memory_manager.FlushCaching(); const auto convert_linear_2_blocklinear_addr = [](u64 address) {