diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/view/SettingsItem.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/view/SettingsItem.kt
index 1f090424b..e198b18a0 100644
--- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/view/SettingsItem.kt
+++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/view/SettingsItem.kt
@@ -82,8 +82,8 @@ abstract class SettingsItem(
IntSetting.CPU_BACKEND,
R.string.cpu_backend,
0,
- R.array.cpuBackendNames,
- R.array.cpuBackendValues
+ R.array.cpuBackendArm64Names,
+ R.array.cpuBackendArm64Values
)
)
put(
diff --git a/src/android/app/src/main/res/values/arrays.xml b/src/android/app/src/main/res/values/arrays.xml
index 2756e5cc9..ab435dce9 100644
--- a/src/android/app/src/main/res/values/arrays.xml
+++ b/src/android/app/src/main/res/values/arrays.xml
@@ -175,16 +175,24 @@
- 2
-
+
- @string/cpu_backend_dynarmic
- @string/cpu_backend_nce
-
+
- 0
- 1
+
+ - @string/cpu_backend_dynarmic
+
+
+
+ - 0
+
+
- @string/auto
- @string/cpu_accuracy_accurate
diff --git a/src/android/app/src/main/res/values/strings.xml b/src/android/app/src/main/res/values/strings.xml
index f07121f6a..95b90fd6d 100644
--- a/src/android/app/src/main/res/values/strings.xml
+++ b/src/android/app/src/main/res/values/strings.xml
@@ -185,7 +185,7 @@
Limits emulation speed to a specified percentage of normal speed.
Limit speed percent
Specifies the percentage to limit emulation speed. 100% is the normal speed. Values higher or lower will increase or decrease the speed limit.
- CPU Backend
+ CPU backend
CPU accuracy
%1$s%2$s
diff --git a/src/core/arm/nce/patch.cpp b/src/core/arm/nce/patch.cpp
index 30c3c6cdd..a08859d0b 100644
--- a/src/core/arm/nce/patch.cpp
+++ b/src/core/arm/nce/patch.cpp
@@ -90,6 +90,10 @@ void Patcher::PatchText(const Kernel::PhysicalMemory& program_image,
WriteMsrHandler(AddRelocations(), oaknut::XReg{static_cast(msr.GetRt())});
continue;
}
+
+ if (auto exclusive = Exclusive{inst}; exclusive.Verify()) {
+ m_exclusives.push_back(i);
+ }
}
// Determine patching mode for the final relocation step
@@ -163,11 +167,9 @@ void Patcher::RelocateAndCopy(Common::ProcessAddress load_base,
// Cortex-A57 seems to treat all exclusives as ordered, but newer processors do not.
// Convert to ordered to preserve this assumption.
- for (u32 i = ModuleCodeIndex; i < static_cast(text_words.size()); i++) {
- const u32 inst = text_words[i];
- if (auto exclusive = Exclusive{inst}; exclusive.Verify()) {
- text_words[i] = exclusive.AsOrdered();
- }
+ for (const ModuleTextAddress i : m_exclusives) {
+ auto exclusive = Exclusive{text_words[i]};
+ text_words[i] = exclusive.AsOrdered();
}
// Copy to program image
diff --git a/src/core/arm/nce/patch.h b/src/core/arm/nce/patch.h
index dcce1bfc6..112f839a4 100644
--- a/src/core/arm/nce/patch.h
+++ b/src/core/arm/nce/patch.h
@@ -93,6 +93,7 @@ private:
std::vector m_branch_to_patch_relocations{};
std::vector m_branch_to_module_relocations{};
std::vector m_write_module_pc_relocations{};
+ std::vector m_exclusives{};
oaknut::Label m_save_context{};
oaknut::Label m_load_context{};
PatchMode mode{PatchMode::None};
diff --git a/src/core/hle/kernel/k_page_table_base.cpp b/src/core/hle/kernel/k_page_table_base.cpp
index 2b5e77ccf..6691586ed 100644
--- a/src/core/hle/kernel/k_page_table_base.cpp
+++ b/src/core/hle/kernel/k_page_table_base.cpp
@@ -5678,15 +5678,8 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
case OperationType::ChangePermissions:
case OperationType::ChangePermissionsAndRefresh:
case OperationType::ChangePermissionsAndRefreshAndFlush: {
- const bool read = True(properties.perm & Kernel::KMemoryPermission::UserRead);
- const bool write = True(properties.perm & Kernel::KMemoryPermission::UserWrite);
- // todo: this doesn't really belong here and should go into m_memory to handle rasterizer
- // access todo: ignore exec on non-direct-mapped case
- const bool exec = True(properties.perm & Kernel::KMemoryPermission::UserExecute);
- if (Settings::IsFastmemEnabled()) {
- m_system.DeviceMemory().buffer.Protect(GetInteger(virt_addr), num_pages * PageSize,
- read, write, exec);
- }
+ m_memory->ProtectRegion(*m_impl, virt_addr, num_pages * PageSize,
+ ConvertToMemoryPermission(properties.perm));
R_SUCCEED();
}
default:
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index e5ca78ef4..5b376b202 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -78,6 +78,51 @@ struct Memory::Impl {
}
}
+ void ProtectRegion(Common::PageTable& page_table, VAddr vaddr, u64 size,
+ Common::MemoryPermission perms) {
+ ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
+ ASSERT_MSG((vaddr & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", vaddr);
+
+ if (!Settings::IsFastmemEnabled()) {
+ return;
+ }
+
+ const bool is_r = True(perms & Common::MemoryPermission::Read);
+ const bool is_w = True(perms & Common::MemoryPermission::Write);
+ const bool is_x =
+ True(perms & Common::MemoryPermission::Execute) && Settings::IsNceEnabled();
+
+ if (!current_page_table) {
+ system.DeviceMemory().buffer.Protect(vaddr, size, is_r, is_w, is_x);
+ return;
+ }
+
+ u64 protect_bytes{};
+ u64 protect_begin{};
+ for (u64 addr = vaddr; addr < vaddr + size; addr += YUZU_PAGESIZE) {
+ const Common::PageType page_type{
+ current_page_table->pointers[addr >> YUZU_PAGEBITS].Type()};
+ switch (page_type) {
+ case Common::PageType::RasterizerCachedMemory:
+ if (protect_bytes > 0) {
+ system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w,
+ is_x);
+ protect_bytes = 0;
+ }
+ break;
+ default:
+ if (protect_bytes == 0) {
+ protect_begin = addr;
+ }
+ protect_bytes += YUZU_PAGESIZE;
+ }
+ }
+
+ if (protect_bytes > 0) {
+ system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w, is_x);
+ }
+ }
+
[[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(u64 vaddr) const {
const Common::PhysicalAddress paddr{
current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
@@ -839,6 +884,11 @@ void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress b
impl->UnmapRegion(page_table, base, size);
}
+void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress vaddr, u64 size,
+ Common::MemoryPermission perms) {
+ impl->ProtectRegion(page_table, GetInteger(vaddr), size, perms);
+}
+
bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const {
const Kernel::KProcess& process = *system.ApplicationProcess();
const auto& page_table = process.GetPageTable().GetImpl();
diff --git a/src/core/memory.h b/src/core/memory.h
index e5fbc0025..ed8ebb5eb 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -97,6 +97,17 @@ public:
*/
void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size);
+ /**
+ * Protects a region of the emulated process address space with the new permissions.
+ *
+ * @param page_table The page table of the emulated process.
+ * @param base The start address to re-protect. Must be page-aligned.
+ * @param size The amount of bytes to protect. Must be page-aligned.
+ * @param perms The permissions the address range is mapped.
+ */
+ void ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
+ Common::MemoryPermission perms);
+
/**
* Checks whether or not the supplied address is a valid virtual
* address for the current process.