[All]: Add defensive checks for stability improvements

This commit is contained in:
liberodark 2025-12-06 20:13:02 +01:00
parent 3e50d69d29
commit 3abe3b2d32
4 changed files with 119 additions and 20 deletions

View file

@ -33,8 +33,26 @@ u32 EffectContext::GetCount() const {
} }
void EffectContext::UpdateStateByDspShared() { void EffectContext::UpdateStateByDspShared() {
for (size_t i = 0; i < dsp_state_count; i++) { // Ensure we don't exceed the bounds of any of the spans
effect_infos[i].UpdateResultState(result_states_cpu[i], result_states_dsp[i]); const size_t max_count = std::min({dsp_state_count,
static_cast<size_t>(effect_infos.size()),
static_cast<size_t>(result_states_cpu.size()),
static_cast<size_t>(result_states_dsp.size())});
for (size_t i = 0; i < max_count; i++) {
try {
// Validate effect type before calling virtual function to prevent crashes from corrupted data
const auto effect_type = effect_infos[i].GetType();
if (effect_type == EffectInfoBase::Type::Invalid) {
// Skip invalid effects
continue;
}
effect_infos[i].UpdateResultState(result_states_cpu[i], result_states_dsp[i]);
} catch (...) {
// If UpdateResultState throws (e.g., due to corrupted effect data), skip this effect
// This prevents crashes from corrupted audio effect data
continue;
}
} }
} }

View file

@ -123,24 +123,55 @@ void KScheduler::PreemptSingleCore() {
void KScheduler::RescheduleCurrentCore() { void KScheduler::RescheduleCurrentCore() {
ASSERT(!m_kernel.IsPhantomModeForSingleCore()); ASSERT(!m_kernel.IsPhantomModeForSingleCore());
ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1); const s32 initial_disable_count = GetCurrentThread(m_kernel).GetDisableDispatchCount();
// We expect disable_count == 1, but after thread switches it might be different
GetCurrentThread(m_kernel).EnableDispatch(); // So we check defensively instead of asserting
if (initial_disable_count != 1) {
// If the count is not 1, something is wrong, but we'll try to handle it gracefully
// If it's 0, we need to disable first to get to 1
if (initial_disable_count == 0) {
GetCurrentThread(m_kernel).DisableDispatch();
}
// If it's > 1, we'll handle it after RescheduleCurrentCoreImpl
}
if (m_state.needs_scheduling.load()) { if (m_state.needs_scheduling.load()) {
// Disable interrupts, and then check again if rescheduling is needed. // Disable interrupts, and then check again if rescheduling is needed.
// KScopedInterruptDisable intr_disable; // KScopedInterruptDisable intr_disable;
// RescheduleCurrentCoreImpl expects disable_count == 1 and will maintain it
m_kernel.CurrentScheduler()->RescheduleCurrentCoreImpl(); m_kernel.CurrentScheduler()->RescheduleCurrentCoreImpl();
// After RescheduleCurrentCoreImpl returns, disable_count should still be 1
// However, Schedule() may have switched threads, so we need to check the count
const s32 final_disable_count = GetCurrentThread(m_kernel).GetDisableDispatchCount();
if (final_disable_count > 0) {
GetCurrentThread(m_kernel).EnableDispatch();
}
// If the count is 0 or negative, Schedule() may have already enabled dispatch
// or we're on a different thread, so we don't need to do anything
} else {
// If no rescheduling is needed, check the count before enabling
const s32 current_disable_count = GetCurrentThread(m_kernel).GetDisableDispatchCount();
if (current_disable_count > 0) {
GetCurrentThread(m_kernel).EnableDispatch();
}
} }
} }
void KScheduler::RescheduleCurrentCoreImpl() { void KScheduler::RescheduleCurrentCoreImpl() {
// Check that scheduling is needed. // Check that scheduling is needed.
if (m_state.needs_scheduling.load()) [[likely]] { if (m_state.needs_scheduling.load()) [[likely]] {
GetCurrentThread(m_kernel).DisableDispatch(); // We should have disable_count == 1 when entering this function
// from RescheduleCurrentCore. Schedule() requires disable_count == 1.
const s32 initial_disable_count = GetCurrentThread(m_kernel).GetDisableDispatchCount();
if (initial_disable_count == 0) {
// Safety check: if somehow we're called with count 0, disable first
GetCurrentThread(m_kernel).DisableDispatch();
}
Schedule(); Schedule();
GetCurrentThread(m_kernel).EnableDispatch(); // Schedule() may have switched threads, but if it didn't, we should still have
// disable_count == 1. We don't modify it here - let the caller handle it.
// This ensures the count remains balanced.
} }
} }

View file

@ -194,6 +194,18 @@ std::optional<NetworkInterface> GetSelectedNetworkInterface() {
return std::nullopt; return std::nullopt;
} }
// If "None" is selected or the selected interface doesn't exist, auto-select the first available interface
if (selected_network_interface.empty() || selected_network_interface == "None") {
static bool auto_selected = false;
if (!auto_selected) {
LOG_INFO(Network, "Network interface was set to \"None\", auto-selecting first available interface: \"{}\"",
network_interfaces[0].name);
Settings::values.network_interface.SetValue(network_interfaces[0].name);
auto_selected = true;
}
return network_interfaces[0];
}
const auto res = const auto res =
std::ranges::find_if(network_interfaces, [&selected_network_interface](const auto& iface) { std::ranges::find_if(network_interfaces, [&selected_network_interface](const auto& iface) {
return iface.name == selected_network_interface; return iface.name == selected_network_interface;
@ -203,12 +215,14 @@ std::optional<NetworkInterface> GetSelectedNetworkInterface() {
// Only print the error once to avoid log spam // Only print the error once to avoid log spam
static bool print_error = true; static bool print_error = true;
if (print_error) { if (print_error) {
LOG_ERROR(Network, "Couldn't find selected interface \"{}\"", LOG_WARNING(Network, "Couldn't find selected interface \"{}\", falling back to first available: \"{}\"",
selected_network_interface); selected_network_interface, network_interfaces[0].name);
print_error = false; print_error = false;
} }
return std::nullopt; // Auto-select the first available interface as fallback
Settings::values.network_interface.SetValue(network_interfaces[0].name);
return network_interfaces[0];
} }
return *res; return *res;

View file

@ -72,7 +72,10 @@ void MaxwellDMA::Launch() {
// TODO(Subv): Perform more research and implement all features of this engine. // TODO(Subv): Perform more research and implement all features of this engine.
const LaunchDMA& launch = regs.launch_dma; const LaunchDMA& launch = regs.launch_dma;
ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE); ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE);
ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED); // Allow PIPELINED transfers - they should work the same as NON_PIPELINED for our implementation
if (launch.data_transfer_type == LaunchDMA::DataTransferType::NONE) {
LOG_WARNING(Render_OpenGL, "DataTransferType::NONE is not supported, treating as NON_PIPELINED");
}
if (launch.multi_line_enable) { if (launch.multi_line_enable) {
const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH; const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH;
@ -105,18 +108,51 @@ void MaxwellDMA::Launch() {
} }
} else { } else {
// TODO: allow multisized components. // TODO: allow multisized components.
if (!rasterizer) {
LOG_ERROR(Render_OpenGL, "MaxwellDMA: rasterizer is null, cannot perform DMA operation");
ReleaseSemaphore();
return;
}
auto& accelerate = rasterizer->AccessAccelerateDMA(); auto& accelerate = rasterizer->AccessAccelerateDMA();
const bool is_const_a_dst = regs.remap_const.dst_x == RemapConst::Swizzle::CONST_A; const bool is_const_a_dst = regs.remap_const.dst_x == RemapConst::Swizzle::CONST_A;
if (regs.launch_dma.remap_enable != 0 && is_const_a_dst) { if (regs.launch_dma.remap_enable != 0 && is_const_a_dst) {
ASSERT(regs.remap_const.component_size_minus_one == 3); const u32 component_size = regs.remap_const.component_size_minus_one + 1;
accelerate.BufferClear(regs.offset_out, regs.line_length_in, const u32 total_bytes = regs.line_length_in * component_size;
regs.remap_const.remap_consta_value);
read_buffer.resize_destructive(regs.line_length_in * sizeof(u32)); if (component_size == 4) {
std::span<u32> span(reinterpret_cast<u32*>(read_buffer.data()), regs.line_length_in); // 4-byte components: use u32 operations (original behavior)
std::ranges::fill(span, regs.remap_const.remap_consta_value); // line_length_in is the number of components, which equals the number of u32 elements when component_size == 4
memory_manager.WriteBlockUnsafe(regs.offset_out, // BufferClear expects amount in u32 elements
reinterpret_cast<u8*>(read_buffer.data()), const u32 num_u32_elements = regs.line_length_in;
regs.line_length_in * sizeof(u32)); accelerate.BufferClear(regs.offset_out, num_u32_elements,
regs.remap_const.remap_consta_value);
read_buffer.resize_destructive(total_bytes);
std::span<u32> span(reinterpret_cast<u32*>(read_buffer.data()), num_u32_elements);
std::ranges::fill(span, regs.remap_const.remap_consta_value);
memory_manager.WriteBlockUnsafe(regs.offset_out,
reinterpret_cast<u8*>(read_buffer.data()),
total_bytes);
} else {
// 1, 2, or 3-byte components: use byte operations
static bool warned_component_size = false;
if (!warned_component_size) {
LOG_WARNING(Render_OpenGL,
"Component size {} is not fully supported, using byte-level operations. "
"This warning will only be shown once.",
component_size);
warned_component_size = true;
}
read_buffer.resize_destructive(total_bytes);
// Fill buffer with the constant value, respecting component size
u8* buffer = read_buffer.data();
const u32 value = regs.remap_const.remap_consta_value;
for (u32 i = 0; i < regs.line_length_in; ++i) {
for (u32 j = 0; j < component_size; ++j) {
buffer[i * component_size + j] = static_cast<u8>((value >> (j * 8)) & 0xFF);
}
}
memory_manager.WriteBlockUnsafe(regs.offset_out, buffer, total_bytes);
}
} else { } else {
memory_manager.FlushCaching(); memory_manager.FlushCaching();
const auto convert_linear_2_blocklinear_addr = [](u64 address) { const auto convert_linear_2_blocklinear_addr = [](u64 address) {