mirror of
https://git.citron-emu.org/citron/emulator
synced 2026-01-05 17:43:46 +00:00
chore: update project branding to CITRON
Signed-off-by: Zephyron <zephyron@citron-emu.org>
This commit is contained in:
@@ -1233,11 +1233,11 @@ else()
|
||||
hle/service/ssl/ssl_backend_none.cpp)
|
||||
endif()
|
||||
|
||||
if (YUZU_USE_PRECOMPILED_HEADERS)
|
||||
if (CITRON_USE_PRECOMPILED_HEADERS)
|
||||
target_precompile_headers(core PRIVATE precompiled_headers.h)
|
||||
endif()
|
||||
|
||||
if (YUZU_ENABLE_LTO)
|
||||
if (CITRON_ENABLE_LTO)
|
||||
set_property(TARGET core PROPERTY INTERPROCEDURAL_OPTIMIZATION TRUE)
|
||||
endif()
|
||||
|
||||
|
||||
@@ -47,8 +47,8 @@ enum class Architecture {
|
||||
/// Generic ARMv8 CPU interface
|
||||
class ArmInterface {
|
||||
public:
|
||||
YUZU_NON_COPYABLE(ArmInterface);
|
||||
YUZU_NON_MOVEABLE(ArmInterface);
|
||||
CITRON_NON_COPYABLE(ArmInterface);
|
||||
CITRON_NON_MOVEABLE(ArmInterface);
|
||||
|
||||
explicit ArmInterface(bool uses_wall_clock) : m_uses_wall_clock{uses_wall_clock} {}
|
||||
virtual ~ArmInterface() = default;
|
||||
|
||||
@@ -160,8 +160,8 @@ bool ArmNce::HandleGuestAccessFault(GuestContext* guest_ctx, void* raw_info, voi
|
||||
// Try to handle an invalid access.
|
||||
// TODO: handle accesses which split a page?
|
||||
const Common::ProcessAddress addr =
|
||||
(reinterpret_cast<u64>(info->si_addr) & ~Memory::YUZU_PAGEMASK);
|
||||
if (guest_ctx->system->ApplicationMemory().InvalidateNCE(addr, Memory::YUZU_PAGESIZE)) {
|
||||
(reinterpret_cast<u64>(info->si_addr) & ~Memory::CITRON_PAGEMASK);
|
||||
if (guest_ctx->system->ApplicationMemory().InvalidateNCE(addr, Memory::CITRON_PAGESIZE)) {
|
||||
// We handled the access successfully and are returning to guest code.
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -213,7 +213,7 @@ bool Patcher::RelocateAndCopy(Common::ProcessAddress load_base,
|
||||
}
|
||||
|
||||
size_t Patcher::GetSectionSize() const noexcept {
|
||||
return Common::AlignUp(m_patch_instructions.size() * sizeof(u32), Core::Memory::YUZU_PAGESIZE);
|
||||
return Common::AlignUp(m_patch_instructions.size() * sizeof(u32), Core::Memory::CITRON_PAGESIZE);
|
||||
}
|
||||
|
||||
void Patcher::WriteLoadContext() {
|
||||
|
||||
@@ -132,7 +132,7 @@ struct EmptyAllocator {
|
||||
template <typename DTraits>
|
||||
struct DeviceMemoryManagerAllocator {
|
||||
static constexpr size_t device_virtual_bits = DTraits::device_virtual_bits;
|
||||
static constexpr DAddr first_address = 1ULL << Memory::YUZU_PAGEBITS;
|
||||
static constexpr DAddr first_address = 1ULL << Memory::CITRON_PAGEBITS;
|
||||
static constexpr DAddr max_device_area = 1ULL << device_virtual_bits;
|
||||
|
||||
DeviceMemoryManagerAllocator() : main_allocator(first_address) {}
|
||||
@@ -162,18 +162,18 @@ struct DeviceMemoryManagerAllocator {
|
||||
template <typename Traits>
|
||||
DeviceMemoryManager<Traits>::DeviceMemoryManager(const DeviceMemory& device_memory_)
|
||||
: physical_base{reinterpret_cast<const uintptr_t>(device_memory_.buffer.BackingBasePointer())},
|
||||
device_inter{nullptr}, compressed_physical_ptr(device_as_size >> Memory::YUZU_PAGEBITS),
|
||||
device_inter{nullptr}, compressed_physical_ptr(device_as_size >> Memory::CITRON_PAGEBITS),
|
||||
compressed_device_addr(1ULL << ((Settings::values.memory_layout_mode.GetValue() ==
|
||||
Settings::MemoryLayout::Memory_4Gb
|
||||
? physical_min_bits
|
||||
: physical_max_bits) -
|
||||
Memory::YUZU_PAGEBITS)),
|
||||
continuity_tracker(device_as_size >> Memory::YUZU_PAGEBITS),
|
||||
cpu_backing_address(device_as_size >> Memory::YUZU_PAGEBITS) {
|
||||
Memory::CITRON_PAGEBITS)),
|
||||
continuity_tracker(device_as_size >> Memory::CITRON_PAGEBITS),
|
||||
cpu_backing_address(device_as_size >> Memory::CITRON_PAGEBITS) {
|
||||
impl = std::make_unique<DeviceMemoryManagerAllocator<Traits>>();
|
||||
cached_pages = std::make_unique<CachedPages>();
|
||||
|
||||
const size_t total_virtual = device_as_size >> Memory::YUZU_PAGEBITS;
|
||||
const size_t total_virtual = device_as_size >> Memory::CITRON_PAGEBITS;
|
||||
for (size_t i = 0; i < total_virtual; i++) {
|
||||
compressed_physical_ptr[i] = 0;
|
||||
continuity_tracker[i] = 1;
|
||||
@@ -183,7 +183,7 @@ DeviceMemoryManager<Traits>::DeviceMemoryManager(const DeviceMemory& device_memo
|
||||
Settings::MemoryLayout::Memory_4Gb
|
||||
? physical_min_bits
|
||||
: physical_max_bits) -
|
||||
Memory::YUZU_PAGEBITS);
|
||||
Memory::CITRON_PAGEBITS);
|
||||
for (size_t i = 0; i < total_phys; i++) {
|
||||
compressed_device_addr[i] = 0;
|
||||
}
|
||||
@@ -216,17 +216,17 @@ template <typename Traits>
|
||||
void DeviceMemoryManager<Traits>::Map(DAddr address, VAddr virtual_address, size_t size, Asid asid,
|
||||
bool track) {
|
||||
Core::Memory::Memory* process_memory = registered_processes[asid.id];
|
||||
size_t start_page_d = address >> Memory::YUZU_PAGEBITS;
|
||||
size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS;
|
||||
size_t start_page_d = address >> Memory::CITRON_PAGEBITS;
|
||||
size_t num_pages = Common::AlignUp(size, Memory::CITRON_PAGESIZE) >> Memory::CITRON_PAGEBITS;
|
||||
std::scoped_lock lk(mapping_guard);
|
||||
for (size_t i = 0; i < num_pages; i++) {
|
||||
const VAddr new_vaddress = virtual_address + i * Memory::YUZU_PAGESIZE;
|
||||
const VAddr new_vaddress = virtual_address + i * Memory::CITRON_PAGESIZE;
|
||||
auto* ptr = process_memory->GetPointerSilent(Common::ProcessAddress(new_vaddress));
|
||||
if (ptr == nullptr) [[unlikely]] {
|
||||
compressed_physical_ptr[start_page_d + i] = 0;
|
||||
continue;
|
||||
}
|
||||
auto phys_addr = static_cast<u32>(GetRawPhysicalAddr(ptr) >> Memory::YUZU_PAGEBITS) + 1U;
|
||||
auto phys_addr = static_cast<u32>(GetRawPhysicalAddr(ptr) >> Memory::CITRON_PAGEBITS) + 1U;
|
||||
compressed_physical_ptr[start_page_d + i] = phys_addr;
|
||||
InsertCPUBacking(start_page_d + i, new_vaddress, asid);
|
||||
const u32 base_dev = compressed_device_addr[phys_addr - 1U];
|
||||
@@ -249,8 +249,8 @@ void DeviceMemoryManager<Traits>::Map(DAddr address, VAddr virtual_address, size
|
||||
|
||||
template <typename Traits>
|
||||
void DeviceMemoryManager<Traits>::Unmap(DAddr address, size_t size) {
|
||||
size_t start_page_d = address >> Memory::YUZU_PAGEBITS;
|
||||
size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS;
|
||||
size_t start_page_d = address >> Memory::CITRON_PAGEBITS;
|
||||
size_t num_pages = Common::AlignUp(size, Memory::CITRON_PAGESIZE) >> Memory::CITRON_PAGEBITS;
|
||||
device_inter->InvalidateRegion(address, size);
|
||||
std::scoped_lock lk(mapping_guard);
|
||||
for (size_t i = 0; i < num_pages; i++) {
|
||||
@@ -278,13 +278,13 @@ template <typename Traits>
|
||||
void DeviceMemoryManager<Traits>::TrackContinuityImpl(DAddr address, VAddr virtual_address,
|
||||
size_t size, Asid asid) {
|
||||
Core::Memory::Memory* process_memory = registered_processes[asid.id];
|
||||
size_t start_page_d = address >> Memory::YUZU_PAGEBITS;
|
||||
size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS;
|
||||
size_t start_page_d = address >> Memory::CITRON_PAGEBITS;
|
||||
size_t num_pages = Common::AlignUp(size, Memory::CITRON_PAGESIZE) >> Memory::CITRON_PAGEBITS;
|
||||
uintptr_t last_ptr = 0;
|
||||
size_t page_count = 1;
|
||||
for (size_t i = num_pages; i > 0; i--) {
|
||||
size_t index = i - 1;
|
||||
const VAddr new_vaddress = virtual_address + index * Memory::YUZU_PAGESIZE;
|
||||
const VAddr new_vaddress = virtual_address + index * Memory::CITRON_PAGESIZE;
|
||||
const uintptr_t new_ptr = reinterpret_cast<uintptr_t>(
|
||||
process_memory->GetPointerSilent(Common::ProcessAddress(new_vaddress)));
|
||||
if (new_ptr + page_size == last_ptr) {
|
||||
@@ -333,26 +333,26 @@ void DeviceMemoryManager<Traits>::InnerGatherDeviceAddresses(Common::ScratchBuff
|
||||
template <typename Traits>
|
||||
template <typename T>
|
||||
T* DeviceMemoryManager<Traits>::GetPointer(DAddr address) {
|
||||
const size_t index = address >> Memory::YUZU_PAGEBITS;
|
||||
const size_t offset = address & Memory::YUZU_PAGEMASK;
|
||||
const size_t index = address >> Memory::CITRON_PAGEBITS;
|
||||
const size_t offset = address & Memory::CITRON_PAGEMASK;
|
||||
auto phys_addr = compressed_physical_ptr[index];
|
||||
if (phys_addr == 0) [[unlikely]] {
|
||||
return nullptr;
|
||||
}
|
||||
return GetPointerFromRaw<T>((static_cast<PAddr>(phys_addr - 1) << Memory::YUZU_PAGEBITS) +
|
||||
return GetPointerFromRaw<T>((static_cast<PAddr>(phys_addr - 1) << Memory::CITRON_PAGEBITS) +
|
||||
offset);
|
||||
}
|
||||
|
||||
template <typename Traits>
|
||||
template <typename T>
|
||||
const T* DeviceMemoryManager<Traits>::GetPointer(DAddr address) const {
|
||||
const size_t index = address >> Memory::YUZU_PAGEBITS;
|
||||
const size_t offset = address & Memory::YUZU_PAGEMASK;
|
||||
const size_t index = address >> Memory::CITRON_PAGEBITS;
|
||||
const size_t offset = address & Memory::CITRON_PAGEMASK;
|
||||
auto phys_addr = compressed_physical_ptr[index];
|
||||
if (phys_addr == 0) [[unlikely]] {
|
||||
return nullptr;
|
||||
}
|
||||
return GetPointerFromRaw<T>((static_cast<PAddr>(phys_addr - 1) << Memory::YUZU_PAGEBITS) +
|
||||
return GetPointerFromRaw<T>((static_cast<PAddr>(phys_addr - 1) << Memory::CITRON_PAGEBITS) +
|
||||
offset);
|
||||
}
|
||||
|
||||
@@ -382,15 +382,15 @@ template <typename Traits>
|
||||
void DeviceMemoryManager<Traits>::WalkBlock(DAddr addr, std::size_t size, auto on_unmapped,
|
||||
auto on_memory, auto increment) {
|
||||
std::size_t remaining_size = size;
|
||||
std::size_t page_index = addr >> Memory::YUZU_PAGEBITS;
|
||||
std::size_t page_offset = addr & Memory::YUZU_PAGEMASK;
|
||||
std::size_t page_index = addr >> Memory::CITRON_PAGEBITS;
|
||||
std::size_t page_offset = addr & Memory::CITRON_PAGEMASK;
|
||||
|
||||
while (remaining_size) {
|
||||
const size_t next_pages = static_cast<std::size_t>(continuity_tracker[page_index]);
|
||||
const std::size_t copy_amount =
|
||||
std::min((next_pages << Memory::YUZU_PAGEBITS) - page_offset, remaining_size);
|
||||
std::min((next_pages << Memory::CITRON_PAGEBITS) - page_offset, remaining_size);
|
||||
const auto current_vaddr =
|
||||
static_cast<u64>((page_index << Memory::YUZU_PAGEBITS) + page_offset);
|
||||
static_cast<u64>((page_index << Memory::CITRON_PAGEBITS) + page_offset);
|
||||
SCOPE_EXIT{
|
||||
page_index += next_pages;
|
||||
page_offset = 0;
|
||||
@@ -404,7 +404,7 @@ void DeviceMemoryManager<Traits>::WalkBlock(DAddr addr, std::size_t size, auto o
|
||||
continue;
|
||||
}
|
||||
auto* mem_ptr = GetPointerFromRaw<u8>(
|
||||
(static_cast<PAddr>(phys_addr - 1) << Memory::YUZU_PAGEBITS) + page_offset);
|
||||
(static_cast<PAddr>(phys_addr - 1) << Memory::CITRON_PAGEBITS) + page_offset);
|
||||
on_memory(copy_amount, mem_ptr);
|
||||
}
|
||||
}
|
||||
@@ -516,31 +516,31 @@ void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size
|
||||
const auto MarkRegionCaching = &DeviceMemoryManager<Traits>::DeviceMethods::MarkRegionCaching;
|
||||
|
||||
std::atomic_thread_fence(std::memory_order_acquire);
|
||||
const size_t page_end = Common::DivCeil(addr + size, Memory::YUZU_PAGESIZE);
|
||||
size_t page = addr >> Memory::YUZU_PAGEBITS;
|
||||
const size_t page_end = Common::DivCeil(addr + size, Memory::CITRON_PAGESIZE);
|
||||
size_t page = addr >> Memory::CITRON_PAGEBITS;
|
||||
auto [asid, base_vaddress] = ExtractCPUBacking(page);
|
||||
auto* memory_device_inter = registered_processes[asid.id];
|
||||
const auto release_pending = [&] {
|
||||
if (uncache_bytes > 0) {
|
||||
if (memory_device_inter != nullptr) {
|
||||
MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS,
|
||||
MarkRegionCaching(memory_device_inter, uncache_begin << Memory::CITRON_PAGEBITS,
|
||||
uncache_bytes, false);
|
||||
}
|
||||
uncache_bytes = 0;
|
||||
}
|
||||
if (cache_bytes > 0) {
|
||||
if (memory_device_inter != nullptr) {
|
||||
MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS,
|
||||
MarkRegionCaching(memory_device_inter, cache_begin << Memory::CITRON_PAGEBITS,
|
||||
cache_bytes, true);
|
||||
}
|
||||
cache_bytes = 0;
|
||||
}
|
||||
};
|
||||
size_t old_vpage = (base_vaddress >> Memory::YUZU_PAGEBITS) - 1;
|
||||
size_t old_vpage = (base_vaddress >> Memory::CITRON_PAGEBITS) - 1;
|
||||
for (; page != page_end; ++page) {
|
||||
CounterAtomicType& count = cached_pages->at(page >> subentries_shift).Count(page);
|
||||
auto [asid_2, vpage] = ExtractCPUBacking(page);
|
||||
vpage >>= Memory::YUZU_PAGEBITS;
|
||||
vpage >>= Memory::CITRON_PAGEBITS;
|
||||
|
||||
if (vpage == 0) [[unlikely]] {
|
||||
release_pending();
|
||||
@@ -566,9 +566,9 @@ void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size
|
||||
if (uncache_bytes == 0) {
|
||||
uncache_begin = vpage;
|
||||
}
|
||||
uncache_bytes += Memory::YUZU_PAGESIZE;
|
||||
uncache_bytes += Memory::CITRON_PAGESIZE;
|
||||
} else if (uncache_bytes > 0) {
|
||||
MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS,
|
||||
MarkRegionCaching(memory_device_inter, uncache_begin << Memory::CITRON_PAGEBITS,
|
||||
uncache_bytes, false);
|
||||
uncache_bytes = 0;
|
||||
}
|
||||
@@ -576,9 +576,9 @@ void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size
|
||||
if (cache_bytes == 0) {
|
||||
cache_begin = vpage;
|
||||
}
|
||||
cache_bytes += Memory::YUZU_PAGESIZE;
|
||||
cache_bytes += Memory::CITRON_PAGESIZE;
|
||||
} else if (cache_bytes > 0) {
|
||||
MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS,
|
||||
MarkRegionCaching(memory_device_inter, cache_begin << Memory::CITRON_PAGEBITS,
|
||||
cache_bytes, true);
|
||||
cache_bytes = 0;
|
||||
}
|
||||
|
||||
@@ -15,8 +15,8 @@ namespace FileSys {
|
||||
class DirectoryPathParser;
|
||||
|
||||
class Path {
|
||||
YUZU_NON_COPYABLE(Path);
|
||||
YUZU_NON_MOVEABLE(Path);
|
||||
CITRON_NON_COPYABLE(Path);
|
||||
CITRON_NON_MOVEABLE(Path);
|
||||
|
||||
private:
|
||||
static constexpr const char* EmptyPath = "";
|
||||
@@ -27,7 +27,7 @@ private:
|
||||
|
||||
public:
|
||||
class WriteBuffer {
|
||||
YUZU_NON_COPYABLE(WriteBuffer);
|
||||
CITRON_NON_COPYABLE(WriteBuffer);
|
||||
|
||||
private:
|
||||
char* m_buffer;
|
||||
|
||||
@@ -14,8 +14,8 @@ namespace FileSys {
|
||||
using namespace Common::Literals;
|
||||
|
||||
class AesCtrCounterExtendedStorage : public IReadOnlyStorage {
|
||||
YUZU_NON_COPYABLE(AesCtrCounterExtendedStorage);
|
||||
YUZU_NON_MOVEABLE(AesCtrCounterExtendedStorage);
|
||||
CITRON_NON_COPYABLE(AesCtrCounterExtendedStorage);
|
||||
CITRON_NON_MOVEABLE(AesCtrCounterExtendedStorage);
|
||||
|
||||
public:
|
||||
static constexpr size_t BlockSize = 0x10;
|
||||
|
||||
@@ -14,8 +14,8 @@
|
||||
namespace FileSys {
|
||||
|
||||
class AesCtrStorage : public IStorage {
|
||||
YUZU_NON_COPYABLE(AesCtrStorage);
|
||||
YUZU_NON_MOVEABLE(AesCtrStorage);
|
||||
CITRON_NON_COPYABLE(AesCtrStorage);
|
||||
CITRON_NON_MOVEABLE(AesCtrStorage);
|
||||
|
||||
public:
|
||||
static constexpr size_t BlockSize = 0x10;
|
||||
|
||||
@@ -13,8 +13,8 @@
|
||||
namespace FileSys {
|
||||
|
||||
class AesXtsStorage : public IReadOnlyStorage {
|
||||
YUZU_NON_COPYABLE(AesXtsStorage);
|
||||
YUZU_NON_MOVEABLE(AesXtsStorage);
|
||||
CITRON_NON_COPYABLE(AesXtsStorage);
|
||||
CITRON_NON_MOVEABLE(AesXtsStorage);
|
||||
|
||||
public:
|
||||
static constexpr size_t AesBlockSize = 0x10;
|
||||
|
||||
@@ -13,8 +13,8 @@ namespace FileSys {
|
||||
|
||||
template <size_t DataAlign_, size_t BufferAlign_>
|
||||
class AlignmentMatchingStorage : public IStorage {
|
||||
YUZU_NON_COPYABLE(AlignmentMatchingStorage);
|
||||
YUZU_NON_MOVEABLE(AlignmentMatchingStorage);
|
||||
CITRON_NON_COPYABLE(AlignmentMatchingStorage);
|
||||
CITRON_NON_MOVEABLE(AlignmentMatchingStorage);
|
||||
|
||||
public:
|
||||
static constexpr size_t DataAlign = DataAlign_;
|
||||
@@ -77,8 +77,8 @@ public:
|
||||
|
||||
template <size_t BufferAlign_>
|
||||
class AlignmentMatchingStoragePooledBuffer : public IStorage {
|
||||
YUZU_NON_COPYABLE(AlignmentMatchingStoragePooledBuffer);
|
||||
YUZU_NON_MOVEABLE(AlignmentMatchingStoragePooledBuffer);
|
||||
CITRON_NON_COPYABLE(AlignmentMatchingStoragePooledBuffer);
|
||||
CITRON_NON_MOVEABLE(AlignmentMatchingStoragePooledBuffer);
|
||||
|
||||
public:
|
||||
static constexpr size_t BufferAlign = BufferAlign_;
|
||||
|
||||
@@ -18,8 +18,8 @@ namespace FileSys {
|
||||
using namespace Common::Literals;
|
||||
|
||||
class BucketTree {
|
||||
YUZU_NON_COPYABLE(BucketTree);
|
||||
YUZU_NON_MOVEABLE(BucketTree);
|
||||
CITRON_NON_COPYABLE(BucketTree);
|
||||
CITRON_NON_MOVEABLE(BucketTree);
|
||||
|
||||
public:
|
||||
static constexpr u32 Magic = Common::MakeMagic('B', 'K', 'T', 'R');
|
||||
@@ -123,7 +123,7 @@ public:
|
||||
|
||||
private:
|
||||
class NodeBuffer {
|
||||
YUZU_NON_COPYABLE(NodeBuffer);
|
||||
CITRON_NON_COPYABLE(NodeBuffer);
|
||||
|
||||
public:
|
||||
NodeBuffer() : m_header() {}
|
||||
@@ -330,8 +330,8 @@ private:
|
||||
};
|
||||
|
||||
class BucketTree::Visitor {
|
||||
YUZU_NON_COPYABLE(Visitor);
|
||||
YUZU_NON_MOVEABLE(Visitor);
|
||||
CITRON_NON_COPYABLE(Visitor);
|
||||
CITRON_NON_MOVEABLE(Visitor);
|
||||
|
||||
public:
|
||||
constexpr Visitor()
|
||||
|
||||
@@ -17,8 +17,8 @@ namespace FileSys {
|
||||
using namespace Common::Literals;
|
||||
|
||||
class CompressedStorage : public IReadOnlyStorage {
|
||||
YUZU_NON_COPYABLE(CompressedStorage);
|
||||
YUZU_NON_MOVEABLE(CompressedStorage);
|
||||
CITRON_NON_COPYABLE(CompressedStorage);
|
||||
CITRON_NON_MOVEABLE(CompressedStorage);
|
||||
|
||||
public:
|
||||
static constexpr size_t NodeSize = 16_KiB;
|
||||
@@ -47,8 +47,8 @@ public:
|
||||
|
||||
private:
|
||||
class CompressedStorageCore {
|
||||
YUZU_NON_COPYABLE(CompressedStorageCore);
|
||||
YUZU_NON_MOVEABLE(CompressedStorageCore);
|
||||
CITRON_NON_COPYABLE(CompressedStorageCore);
|
||||
CITRON_NON_MOVEABLE(CompressedStorageCore);
|
||||
|
||||
public:
|
||||
CompressedStorageCore() : m_table(), m_data_storage() {}
|
||||
@@ -674,8 +674,8 @@ private:
|
||||
};
|
||||
|
||||
class CacheManager {
|
||||
YUZU_NON_COPYABLE(CacheManager);
|
||||
YUZU_NON_MOVEABLE(CacheManager);
|
||||
CITRON_NON_COPYABLE(CacheManager);
|
||||
CITRON_NON_MOVEABLE(CacheManager);
|
||||
|
||||
private:
|
||||
struct AccessRange {
|
||||
|
||||
@@ -57,8 +57,8 @@ struct HierarchicalIntegrityVerificationSizeSet {
|
||||
static_assert(std::is_trivial_v<HierarchicalIntegrityVerificationSizeSet>);
|
||||
|
||||
class HierarchicalIntegrityVerificationStorage : public IReadOnlyStorage {
|
||||
YUZU_NON_COPYABLE(HierarchicalIntegrityVerificationStorage);
|
||||
YUZU_NON_MOVEABLE(HierarchicalIntegrityVerificationStorage);
|
||||
CITRON_NON_COPYABLE(HierarchicalIntegrityVerificationStorage);
|
||||
CITRON_NON_MOVEABLE(HierarchicalIntegrityVerificationStorage);
|
||||
|
||||
public:
|
||||
using GenerateRandomFunction = void (*)(void* dst, size_t size);
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
namespace FileSys {
|
||||
|
||||
class HierarchicalSha256Storage : public IReadOnlyStorage {
|
||||
YUZU_NON_COPYABLE(HierarchicalSha256Storage);
|
||||
YUZU_NON_MOVEABLE(HierarchicalSha256Storage);
|
||||
CITRON_NON_COPYABLE(HierarchicalSha256Storage);
|
||||
CITRON_NON_MOVEABLE(HierarchicalSha256Storage);
|
||||
|
||||
public:
|
||||
static constexpr s32 LayerCount = 3;
|
||||
|
||||
@@ -13,8 +13,8 @@
|
||||
namespace FileSys {
|
||||
|
||||
class IndirectStorage : public IReadOnlyStorage {
|
||||
YUZU_NON_COPYABLE(IndirectStorage);
|
||||
YUZU_NON_MOVEABLE(IndirectStorage);
|
||||
CITRON_NON_COPYABLE(IndirectStorage);
|
||||
CITRON_NON_MOVEABLE(IndirectStorage);
|
||||
|
||||
public:
|
||||
static constexpr s32 StorageCount = 2;
|
||||
|
||||
@@ -11,8 +11,8 @@
|
||||
namespace FileSys {
|
||||
|
||||
class IntegrityVerificationStorage : public IReadOnlyStorage {
|
||||
YUZU_NON_COPYABLE(IntegrityVerificationStorage);
|
||||
YUZU_NON_MOVEABLE(IntegrityVerificationStorage);
|
||||
CITRON_NON_COPYABLE(IntegrityVerificationStorage);
|
||||
CITRON_NON_MOVEABLE(IntegrityVerificationStorage);
|
||||
|
||||
public:
|
||||
static constexpr s64 HashSize = 256 / 8;
|
||||
|
||||
@@ -8,8 +8,8 @@
|
||||
namespace FileSys {
|
||||
|
||||
class MemoryResourceBufferHoldStorage : public IStorage {
|
||||
YUZU_NON_COPYABLE(MemoryResourceBufferHoldStorage);
|
||||
YUZU_NON_MOVEABLE(MemoryResourceBufferHoldStorage);
|
||||
CITRON_NON_COPYABLE(MemoryResourceBufferHoldStorage);
|
||||
CITRON_NON_MOVEABLE(MemoryResourceBufferHoldStorage);
|
||||
|
||||
public:
|
||||
MemoryResourceBufferHoldStorage(VirtualFile storage, size_t buffer_size)
|
||||
|
||||
@@ -28,8 +28,8 @@ constexpr inline s32 IntegrityDataCacheCountForMeta = 16;
|
||||
constexpr inline s32 IntegrityHashCacheCountForMeta = 2;
|
||||
|
||||
class SharedNcaBodyStorage : public IReadOnlyStorage {
|
||||
YUZU_NON_COPYABLE(SharedNcaBodyStorage);
|
||||
YUZU_NON_MOVEABLE(SharedNcaBodyStorage);
|
||||
CITRON_NON_COPYABLE(SharedNcaBodyStorage);
|
||||
CITRON_NON_MOVEABLE(SharedNcaBodyStorage);
|
||||
|
||||
private:
|
||||
VirtualFile m_storage;
|
||||
|
||||
@@ -88,8 +88,8 @@ constexpr inline s32 GetKeyTypeValue(u8 key_index, u8 key_generation) {
|
||||
}
|
||||
|
||||
class NcaReader {
|
||||
YUZU_NON_COPYABLE(NcaReader);
|
||||
YUZU_NON_MOVEABLE(NcaReader);
|
||||
CITRON_NON_COPYABLE(NcaReader);
|
||||
CITRON_NON_MOVEABLE(NcaReader);
|
||||
|
||||
public:
|
||||
NcaReader();
|
||||
@@ -154,8 +154,8 @@ private:
|
||||
};
|
||||
|
||||
class NcaFsHeaderReader {
|
||||
YUZU_NON_COPYABLE(NcaFsHeaderReader);
|
||||
YUZU_NON_MOVEABLE(NcaFsHeaderReader);
|
||||
CITRON_NON_COPYABLE(NcaFsHeaderReader);
|
||||
CITRON_NON_MOVEABLE(NcaFsHeaderReader);
|
||||
|
||||
public:
|
||||
NcaFsHeaderReader() : m_fs_index(-1) {
|
||||
@@ -207,8 +207,8 @@ private:
|
||||
};
|
||||
|
||||
class NcaFileSystemDriver {
|
||||
YUZU_NON_COPYABLE(NcaFileSystemDriver);
|
||||
YUZU_NON_MOVEABLE(NcaFileSystemDriver);
|
||||
CITRON_NON_COPYABLE(NcaFileSystemDriver);
|
||||
CITRON_NON_MOVEABLE(NcaFileSystemDriver);
|
||||
|
||||
public:
|
||||
struct StorageContext {
|
||||
|
||||
@@ -16,7 +16,7 @@ constexpr inline size_t BufferPoolAlignment = 4_KiB;
|
||||
constexpr inline size_t BufferPoolWorkSize = 320;
|
||||
|
||||
class PooledBuffer {
|
||||
YUZU_NON_COPYABLE(PooledBuffer);
|
||||
CITRON_NON_COPYABLE(PooledBuffer);
|
||||
|
||||
public:
|
||||
// Constructor/Destructor.
|
||||
|
||||
@@ -8,8 +8,8 @@
|
||||
namespace FileSys {
|
||||
|
||||
class SparseStorage : public IndirectStorage {
|
||||
YUZU_NON_COPYABLE(SparseStorage);
|
||||
YUZU_NON_MOVEABLE(SparseStorage);
|
||||
CITRON_NON_COPYABLE(SparseStorage);
|
||||
CITRON_NON_MOVEABLE(SparseStorage);
|
||||
|
||||
private:
|
||||
class ZeroStorage : public IReadOnlyStorage {
|
||||
|
||||
@@ -8,8 +8,8 @@
|
||||
namespace FileSys {
|
||||
|
||||
class RegionSwitchStorage : public IReadOnlyStorage {
|
||||
YUZU_NON_COPYABLE(RegionSwitchStorage);
|
||||
YUZU_NON_MOVEABLE(RegionSwitchStorage);
|
||||
CITRON_NON_COPYABLE(RegionSwitchStorage);
|
||||
CITRON_NON_MOVEABLE(RegionSwitchStorage);
|
||||
|
||||
public:
|
||||
struct Region {
|
||||
|
||||
@@ -30,8 +30,8 @@ enum class VfsEntryType {
|
||||
// functionality, they will need to override.
|
||||
class VfsFilesystem {
|
||||
public:
|
||||
YUZU_NON_COPYABLE(VfsFilesystem);
|
||||
YUZU_NON_MOVEABLE(VfsFilesystem);
|
||||
CITRON_NON_COPYABLE(VfsFilesystem);
|
||||
CITRON_NON_MOVEABLE(VfsFilesystem);
|
||||
|
||||
explicit VfsFilesystem(VirtualDir root);
|
||||
virtual ~VfsFilesystem();
|
||||
@@ -81,8 +81,8 @@ protected:
|
||||
// A class representing a file in an abstract filesystem.
|
||||
class VfsFile {
|
||||
public:
|
||||
YUZU_NON_COPYABLE(VfsFile);
|
||||
YUZU_NON_MOVEABLE(VfsFile);
|
||||
CITRON_NON_COPYABLE(VfsFile);
|
||||
CITRON_NON_MOVEABLE(VfsFile);
|
||||
|
||||
VfsFile() = default;
|
||||
virtual ~VfsFile();
|
||||
@@ -184,8 +184,8 @@ public:
|
||||
// A class representing a directory in an abstract filesystem.
|
||||
class VfsDirectory {
|
||||
public:
|
||||
YUZU_NON_COPYABLE(VfsDirectory);
|
||||
YUZU_NON_MOVEABLE(VfsDirectory);
|
||||
CITRON_NON_COPYABLE(VfsDirectory);
|
||||
CITRON_NON_MOVEABLE(VfsDirectory);
|
||||
|
||||
VfsDirectory() = default;
|
||||
virtual ~VfsDirectory();
|
||||
|
||||
@@ -27,8 +27,8 @@ private:
|
||||
static constexpr inline ClassTokenType ClassToken() { return ::Kernel::ClassToken<CLASS>; } \
|
||||
\
|
||||
public: \
|
||||
YUZU_NON_COPYABLE(CLASS); \
|
||||
YUZU_NON_MOVEABLE(CLASS); \
|
||||
CITRON_NON_COPYABLE(CLASS); \
|
||||
CITRON_NON_MOVEABLE(CLASS); \
|
||||
\
|
||||
using BaseClass = BASE_CLASS; \
|
||||
static constexpr TypeObj GetStaticTypeObj() { \
|
||||
@@ -211,7 +211,7 @@ private:
|
||||
template <typename T>
|
||||
class KScopedAutoObject {
|
||||
public:
|
||||
YUZU_NON_COPYABLE(KScopedAutoObject);
|
||||
CITRON_NON_COPYABLE(KScopedAutoObject);
|
||||
|
||||
constexpr KScopedAutoObject() = default;
|
||||
|
||||
|
||||
@@ -16,8 +16,8 @@ class KProcess;
|
||||
|
||||
class KAutoObjectWithListContainer {
|
||||
public:
|
||||
YUZU_NON_COPYABLE(KAutoObjectWithListContainer);
|
||||
YUZU_NON_MOVEABLE(KAutoObjectWithListContainer);
|
||||
CITRON_NON_COPYABLE(KAutoObjectWithListContainer);
|
||||
CITRON_NON_MOVEABLE(KAutoObjectWithListContainer);
|
||||
|
||||
using ListType = boost::intrusive::rbtree<KAutoObjectWithList>;
|
||||
|
||||
|
||||
@@ -12,8 +12,8 @@ namespace Kernel {
|
||||
|
||||
template <typename T, bool ClearNode = false>
|
||||
class KDynamicResourceManager {
|
||||
YUZU_NON_COPYABLE(KDynamicResourceManager);
|
||||
YUZU_NON_MOVEABLE(KDynamicResourceManager);
|
||||
CITRON_NON_COPYABLE(KDynamicResourceManager);
|
||||
CITRON_NON_MOVEABLE(KDynamicResourceManager);
|
||||
|
||||
public:
|
||||
using DynamicSlabType = KDynamicSlabHeap<T, ClearNode>;
|
||||
|
||||
@@ -13,8 +13,8 @@ namespace Kernel {
|
||||
|
||||
template <typename T, bool ClearNode = false>
|
||||
class KDynamicSlabHeap : protected impl::KSlabHeapImpl {
|
||||
YUZU_NON_COPYABLE(KDynamicSlabHeap);
|
||||
YUZU_NON_MOVEABLE(KDynamicSlabHeap);
|
||||
CITRON_NON_COPYABLE(KDynamicSlabHeap);
|
||||
CITRON_NON_MOVEABLE(KDynamicSlabHeap);
|
||||
|
||||
public:
|
||||
constexpr KDynamicSlabHeap() = default;
|
||||
|
||||
@@ -21,8 +21,8 @@ namespace Kernel {
|
||||
class KernelCore;
|
||||
|
||||
class KHandleTable {
|
||||
YUZU_NON_COPYABLE(KHandleTable);
|
||||
YUZU_NON_MOVEABLE(KHandleTable);
|
||||
CITRON_NON_COPYABLE(KHandleTable);
|
||||
CITRON_NON_MOVEABLE(KHandleTable);
|
||||
|
||||
public:
|
||||
static constexpr size_t MaxTableSize = 1024;
|
||||
|
||||
@@ -17,8 +17,8 @@ class KMemoryRegion final : public Common::IntrusiveRedBlackTreeBaseNode<KMemory
|
||||
friend class KMemoryRegionTree;
|
||||
|
||||
public:
|
||||
YUZU_NON_COPYABLE(KMemoryRegion);
|
||||
YUZU_NON_MOVEABLE(KMemoryRegion);
|
||||
CITRON_NON_COPYABLE(KMemoryRegion);
|
||||
CITRON_NON_MOVEABLE(KMemoryRegion);
|
||||
|
||||
constexpr KMemoryRegion() = default;
|
||||
constexpr KMemoryRegion(u64 address, u64 last_address)
|
||||
@@ -123,8 +123,8 @@ private:
|
||||
Common::IntrusiveRedBlackTreeBaseTraits<KMemoryRegion>::TreeType<KMemoryRegion>;
|
||||
|
||||
public:
|
||||
YUZU_NON_COPYABLE(KMemoryRegionTree);
|
||||
YUZU_NON_MOVEABLE(KMemoryRegionTree);
|
||||
CITRON_NON_COPYABLE(KMemoryRegionTree);
|
||||
CITRON_NON_MOVEABLE(KMemoryRegionTree);
|
||||
|
||||
using value_type = TreeType::value_type;
|
||||
using size_type = TreeType::size_type;
|
||||
@@ -327,8 +327,8 @@ private:
|
||||
|
||||
class KMemoryRegionAllocator final {
|
||||
public:
|
||||
YUZU_NON_COPYABLE(KMemoryRegionAllocator);
|
||||
YUZU_NON_MOVEABLE(KMemoryRegionAllocator);
|
||||
CITRON_NON_COPYABLE(KMemoryRegionAllocator);
|
||||
CITRON_NON_MOVEABLE(KMemoryRegionAllocator);
|
||||
|
||||
static constexpr size_t MaxMemoryRegions = 200;
|
||||
|
||||
|
||||
@@ -14,8 +14,8 @@ namespace Kernel {
|
||||
namespace {
|
||||
|
||||
class KScopedLightLockPair {
|
||||
YUZU_NON_COPYABLE(KScopedLightLockPair);
|
||||
YUZU_NON_MOVEABLE(KScopedLightLockPair);
|
||||
CITRON_NON_COPYABLE(KScopedLightLockPair);
|
||||
CITRON_NON_MOVEABLE(KScopedLightLockPair);
|
||||
|
||||
private:
|
||||
KLightLock* m_lower;
|
||||
|
||||
@@ -49,8 +49,8 @@ class KResourceLimit;
|
||||
class KSystemResource;
|
||||
|
||||
class KPageTableBase {
|
||||
YUZU_NON_COPYABLE(KPageTableBase);
|
||||
YUZU_NON_MOVEABLE(KPageTableBase);
|
||||
CITRON_NON_COPYABLE(KPageTableBase);
|
||||
CITRON_NON_MOVEABLE(KPageTableBase);
|
||||
|
||||
public:
|
||||
using TraversalEntry = Common::PageTable::TraversalEntry;
|
||||
|
||||
@@ -33,8 +33,8 @@ class KScopedSchedulerLockAndSleep;
|
||||
|
||||
class KScheduler final {
|
||||
public:
|
||||
YUZU_NON_COPYABLE(KScheduler);
|
||||
YUZU_NON_MOVEABLE(KScheduler);
|
||||
CITRON_NON_COPYABLE(KScheduler);
|
||||
CITRON_NON_MOVEABLE(KScheduler);
|
||||
|
||||
using LockType = KAbstractSchedulerLock<KScheduler>;
|
||||
|
||||
|
||||
@@ -18,8 +18,8 @@ class KernelCore;
|
||||
namespace impl {
|
||||
|
||||
class KSlabHeapImpl {
|
||||
YUZU_NON_COPYABLE(KSlabHeapImpl);
|
||||
YUZU_NON_MOVEABLE(KSlabHeapImpl);
|
||||
CITRON_NON_COPYABLE(KSlabHeapImpl);
|
||||
CITRON_NON_MOVEABLE(KSlabHeapImpl);
|
||||
|
||||
public:
|
||||
struct Node {
|
||||
@@ -72,8 +72,8 @@ private:
|
||||
|
||||
template <bool SupportDynamicExpansion>
|
||||
class KSlabHeapBase : protected impl::KSlabHeapImpl {
|
||||
YUZU_NON_COPYABLE(KSlabHeapBase);
|
||||
YUZU_NON_MOVEABLE(KSlabHeapBase);
|
||||
CITRON_NON_COPYABLE(KSlabHeapBase);
|
||||
CITRON_NON_MOVEABLE(KSlabHeapBase);
|
||||
|
||||
private:
|
||||
size_t m_obj_size{};
|
||||
|
||||
@@ -14,8 +14,8 @@ class KSpinLock {
|
||||
public:
|
||||
explicit KSpinLock() = default;
|
||||
|
||||
YUZU_NON_COPYABLE(KSpinLock);
|
||||
YUZU_NON_MOVEABLE(KSpinLock);
|
||||
CITRON_NON_COPYABLE(KSpinLock);
|
||||
CITRON_NON_MOVEABLE(KSpinLock);
|
||||
|
||||
void Lock();
|
||||
void Unlock();
|
||||
|
||||
@@ -26,8 +26,8 @@ public:
|
||||
PhysicalCore(KernelCore& kernel, std::size_t core_index);
|
||||
~PhysicalCore();
|
||||
|
||||
YUZU_NON_COPYABLE(PhysicalCore);
|
||||
YUZU_NON_MOVEABLE(PhysicalCore);
|
||||
CITRON_NON_COPYABLE(PhysicalCore);
|
||||
CITRON_NON_MOVEABLE(PhysicalCore);
|
||||
|
||||
// Execute guest code running on the given thread.
|
||||
void RunThread(KThread* thread);
|
||||
|
||||
@@ -298,8 +298,8 @@ private:
|
||||
namespace ResultImpl {
|
||||
template <auto EvaluateResult, class F>
|
||||
class ScopedResultGuard {
|
||||
YUZU_NON_COPYABLE(ScopedResultGuard);
|
||||
YUZU_NON_MOVEABLE(ScopedResultGuard);
|
||||
CITRON_NON_COPYABLE(ScopedResultGuard);
|
||||
CITRON_NON_MOVEABLE(ScopedResultGuard);
|
||||
|
||||
private:
|
||||
Result& m_ref;
|
||||
|
||||
@@ -11,7 +11,7 @@ namespace Service::JIT {
|
||||
|
||||
class CodeMemory {
|
||||
public:
|
||||
YUZU_NON_COPYABLE(CodeMemory);
|
||||
CITRON_NON_COPYABLE(CodeMemory);
|
||||
|
||||
explicit CodeMemory() = default;
|
||||
|
||||
|
||||
@@ -13,8 +13,8 @@
|
||||
#include "core/memory.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
|
||||
using Core::Memory::YUZU_PAGESIZE;
|
||||
constexpr size_t BIG_PAGE_SIZE = YUZU_PAGESIZE * 16;
|
||||
using Core::Memory::CITRON_PAGESIZE;
|
||||
constexpr size_t BIG_PAGE_SIZE = CITRON_PAGESIZE * 16;
|
||||
|
||||
namespace Service::Nvidia::NvCore {
|
||||
NvMap::Handle::Handle(u64 size_, Id id_)
|
||||
@@ -32,7 +32,7 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress,
|
||||
|
||||
flags = pFlags;
|
||||
kind = pKind;
|
||||
align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign;
|
||||
align = pAlign < CITRON_PAGESIZE ? CITRON_PAGESIZE : pAlign;
|
||||
session_id = pSessionId;
|
||||
|
||||
// This flag is only applicable for handles with an address passed
|
||||
@@ -43,7 +43,7 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress,
|
||||
"Mapping nvmap handles without a CPU side address is unimplemented!");
|
||||
}
|
||||
|
||||
size = Common::AlignUp(size, YUZU_PAGESIZE);
|
||||
size = Common::AlignUp(size, CITRON_PAGESIZE);
|
||||
aligned_size = Common::AlignUp(size, align);
|
||||
address = pAddress;
|
||||
allocated = true;
|
||||
|
||||
@@ -152,7 +152,7 @@ NvResult nvhost_as_gpu::AllocateSpace(IoctlAllocSpace& params) {
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
if (params.page_size != VM::YUZU_PAGESIZE && params.page_size != vm.big_page_size) {
|
||||
if (params.page_size != VM::CITRON_PAGESIZE && params.page_size != vm.big_page_size) {
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
@@ -162,10 +162,10 @@ NvResult nvhost_as_gpu::AllocateSpace(IoctlAllocSpace& params) {
|
||||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
const u32 page_size_bits{params.page_size == VM::YUZU_PAGESIZE ? VM::PAGE_SIZE_BITS
|
||||
const u32 page_size_bits{params.page_size == VM::CITRON_PAGESIZE ? VM::PAGE_SIZE_BITS
|
||||
: vm.big_page_size_bits};
|
||||
|
||||
auto& allocator{params.page_size == VM::YUZU_PAGESIZE ? *vm.small_page_allocator
|
||||
auto& allocator{params.page_size == VM::CITRON_PAGESIZE ? *vm.small_page_allocator
|
||||
: *vm.big_page_allocator};
|
||||
|
||||
if ((params.flags & MappingFlags::Fixed) != MappingFlags::None) {
|
||||
@@ -189,7 +189,7 @@ NvResult nvhost_as_gpu::AllocateSpace(IoctlAllocSpace& params) {
|
||||
.mappings{},
|
||||
.page_size = params.page_size,
|
||||
.sparse = (params.flags & MappingFlags::Sparse) != MappingFlags::None,
|
||||
.big_pages = params.page_size != VM::YUZU_PAGESIZE,
|
||||
.big_pages = params.page_size != VM::CITRON_PAGESIZE,
|
||||
};
|
||||
|
||||
return NvResult::Success;
|
||||
@@ -201,7 +201,7 @@ void nvhost_as_gpu::FreeMappingLocked(u64 offset) {
|
||||
if (!mapping->fixed) {
|
||||
auto& allocator{mapping->big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
|
||||
u32 page_size_bits{mapping->big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
|
||||
u32 page_size{mapping->big_page ? vm.big_page_size : VM::YUZU_PAGESIZE};
|
||||
u32 page_size{mapping->big_page ? vm.big_page_size : VM::CITRON_PAGESIZE};
|
||||
u64 aligned_size{Common::AlignUp(mapping->size, page_size)};
|
||||
|
||||
allocator.Free(static_cast<u32>(mapping->offset >> page_size_bits),
|
||||
@@ -248,9 +248,9 @@ NvResult nvhost_as_gpu::FreeSpace(IoctlFreeSpace& params) {
|
||||
gmmu->Unmap(params.offset, allocation.size);
|
||||
}
|
||||
|
||||
auto& allocator{params.page_size == VM::YUZU_PAGESIZE ? *vm.small_page_allocator
|
||||
auto& allocator{params.page_size == VM::CITRON_PAGESIZE ? *vm.small_page_allocator
|
||||
: *vm.big_page_allocator};
|
||||
u32 page_size_bits{params.page_size == VM::YUZU_PAGESIZE ? VM::PAGE_SIZE_BITS
|
||||
u32 page_size_bits{params.page_size == VM::CITRON_PAGESIZE ? VM::PAGE_SIZE_BITS
|
||||
: vm.big_page_size_bits};
|
||||
|
||||
allocator.Free(static_cast<u32>(params.offset >> page_size_bits),
|
||||
@@ -360,7 +360,7 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
|
||||
bool big_page{[&]() {
|
||||
if (Common::IsAligned(handle->align, vm.big_page_size)) {
|
||||
return true;
|
||||
} else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE)) {
|
||||
} else if (Common::IsAligned(handle->align, VM::CITRON_PAGESIZE)) {
|
||||
return false;
|
||||
} else {
|
||||
ASSERT(false);
|
||||
@@ -387,7 +387,7 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
|
||||
mapping_map[params.offset] = mapping;
|
||||
} else {
|
||||
auto& allocator{big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
|
||||
u32 page_size{big_page ? vm.big_page_size : VM::YUZU_PAGESIZE};
|
||||
u32 page_size{big_page ? vm.big_page_size : VM::CITRON_PAGESIZE};
|
||||
u32 page_size_bits{big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
|
||||
|
||||
params.offset = static_cast<u64>(allocator.Allocate(
|
||||
@@ -461,7 +461,7 @@ void nvhost_as_gpu::GetVARegionsImpl(IoctlGetVaRegions& params) {
|
||||
params.regions = std::array<VaRegion, 2>{
|
||||
VaRegion{
|
||||
.offset = vm.small_page_allocator->GetVAStart() << VM::PAGE_SIZE_BITS,
|
||||
.page_size = VM::YUZU_PAGESIZE,
|
||||
.page_size = VM::CITRON_PAGESIZE,
|
||||
._pad0_{},
|
||||
.pages = vm.small_page_allocator->GetVALimit() - vm.small_page_allocator->GetVAStart(),
|
||||
},
|
||||
|
||||
@@ -190,8 +190,8 @@ private:
|
||||
std::mutex mutex; //!< Locks all AS operations
|
||||
|
||||
struct VM {
|
||||
static constexpr u32 YUZU_PAGESIZE{0x1000};
|
||||
static constexpr u32 PAGE_SIZE_BITS{std::countr_zero(YUZU_PAGESIZE)};
|
||||
static constexpr u32 CITRON_PAGESIZE{0x1000};
|
||||
static constexpr u32 PAGE_SIZE_BITS{std::countr_zero(CITRON_PAGESIZE)};
|
||||
|
||||
static constexpr u32 SUPPORTED_BIG_PAGE_SIZES{0x30000};
|
||||
static constexpr u32 DEFAULT_BIG_PAGE_SIZE{0x20000};
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
using Core::Memory::YUZU_PAGESIZE;
|
||||
using Core::Memory::CITRON_PAGESIZE;
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
@@ -82,7 +82,7 @@ NvResult nvmap::IocCreate(IocCreateParams& params) {
|
||||
|
||||
std::shared_ptr<NvCore::NvMap::Handle> handle_description{};
|
||||
auto result =
|
||||
file.CreateHandle(Common::AlignUp(params.size, YUZU_PAGESIZE), handle_description);
|
||||
file.CreateHandle(Common::AlignUp(params.size, CITRON_PAGESIZE), handle_description);
|
||||
if (result != NvResult::Success) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Failed to create Object");
|
||||
return result;
|
||||
@@ -108,8 +108,8 @@ NvResult nvmap::IocAlloc(IocAllocParams& params, DeviceFD fd) {
|
||||
}
|
||||
|
||||
// Force page size alignment at a minimum
|
||||
if (params.align < YUZU_PAGESIZE) {
|
||||
params.align = YUZU_PAGESIZE;
|
||||
if (params.align < CITRON_PAGESIZE) {
|
||||
params.align = CITRON_PAGESIZE;
|
||||
}
|
||||
|
||||
auto handle_description{file.GetHandle(params.handle)};
|
||||
|
||||
@@ -228,10 +228,10 @@ struct ProcessContext {
|
||||
R_UNLESS(bss_size == expected_bss_size, RO::ResultInvalidNro);
|
||||
|
||||
// Validate all sizes are aligned.
|
||||
R_UNLESS(Common::IsAligned(text_size, Core::Memory::YUZU_PAGESIZE), RO::ResultInvalidNro);
|
||||
R_UNLESS(Common::IsAligned(ro_size, Core::Memory::YUZU_PAGESIZE), RO::ResultInvalidNro);
|
||||
R_UNLESS(Common::IsAligned(rw_size, Core::Memory::YUZU_PAGESIZE), RO::ResultInvalidNro);
|
||||
R_UNLESS(Common::IsAligned(bss_size, Core::Memory::YUZU_PAGESIZE), RO::ResultInvalidNro);
|
||||
R_UNLESS(Common::IsAligned(text_size, Core::Memory::CITRON_PAGESIZE), RO::ResultInvalidNro);
|
||||
R_UNLESS(Common::IsAligned(ro_size, Core::Memory::CITRON_PAGESIZE), RO::ResultInvalidNro);
|
||||
R_UNLESS(Common::IsAligned(rw_size, Core::Memory::CITRON_PAGESIZE), RO::ResultInvalidNro);
|
||||
R_UNLESS(Common::IsAligned(bss_size, Core::Memory::CITRON_PAGESIZE), RO::ResultInvalidNro);
|
||||
|
||||
// Validate sections are in order.
|
||||
R_UNLESS(text_ofs <= ro_ofs, RO::ResultInvalidNro);
|
||||
@@ -286,16 +286,16 @@ private:
|
||||
};
|
||||
|
||||
Result ValidateAddressAndNonZeroSize(u64 address, u64 size) {
|
||||
R_UNLESS(Common::IsAligned(address, Core::Memory::YUZU_PAGESIZE), RO::ResultInvalidAddress);
|
||||
R_UNLESS(Common::IsAligned(address, Core::Memory::CITRON_PAGESIZE), RO::ResultInvalidAddress);
|
||||
R_UNLESS(size != 0, RO::ResultInvalidSize);
|
||||
R_UNLESS(Common::IsAligned(size, Core::Memory::YUZU_PAGESIZE), RO::ResultInvalidSize);
|
||||
R_UNLESS(Common::IsAligned(size, Core::Memory::CITRON_PAGESIZE), RO::ResultInvalidSize);
|
||||
R_UNLESS(address < address + size, RO::ResultInvalidSize);
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result ValidateAddressAndSize(u64 address, u64 size) {
|
||||
R_UNLESS(Common::IsAligned(address, Core::Memory::YUZU_PAGESIZE), RO::ResultInvalidAddress);
|
||||
R_UNLESS(Common::IsAligned(size, Core::Memory::YUZU_PAGESIZE), RO::ResultInvalidSize);
|
||||
R_UNLESS(Common::IsAligned(address, Core::Memory::CITRON_PAGESIZE), RO::ResultInvalidAddress);
|
||||
R_UNLESS(Common::IsAligned(size, Core::Memory::CITRON_PAGESIZE), RO::ResultInvalidSize);
|
||||
R_UNLESS(size == 0 || address < address + size, RO::ResultInvalidSize);
|
||||
R_SUCCEED();
|
||||
}
|
||||
@@ -369,7 +369,7 @@ public:
|
||||
ASSERT(context != nullptr);
|
||||
|
||||
// Validate address.
|
||||
R_UNLESS(Common::IsAligned(nrr_address, Core::Memory::YUZU_PAGESIZE),
|
||||
R_UNLESS(Common::IsAligned(nrr_address, Core::Memory::CITRON_PAGESIZE),
|
||||
RO::ResultInvalidAddress);
|
||||
|
||||
// Check the NRR is loaded.
|
||||
@@ -436,7 +436,7 @@ public:
|
||||
ASSERT(context != nullptr);
|
||||
|
||||
// Validate address.
|
||||
R_UNLESS(Common::IsAligned(nro_address, Core::Memory::YUZU_PAGESIZE),
|
||||
R_UNLESS(Common::IsAligned(nro_address, Core::Memory::CITRON_PAGESIZE),
|
||||
RO::ResultInvalidAddress);
|
||||
|
||||
// Check the NRO is loaded.
|
||||
|
||||
@@ -932,7 +932,7 @@ Result ISystemSettingsServer::SetPrimaryAlbumStorage(PrimaryAlbumStorage primary
|
||||
Result ISystemSettingsServer::GetBatteryLot(Out<BatteryLot> out_battery_lot) {
|
||||
LOG_INFO(Service_SET, "called");
|
||||
|
||||
*out_battery_lot = {"YUZU0EMULATOR14022024"};
|
||||
*out_battery_lot = {"CITRON0EMULATOR14022024"};
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ template <typename T>
|
||||
struct CFReleaser {
|
||||
T ptr;
|
||||
|
||||
YUZU_NON_COPYABLE(CFReleaser);
|
||||
CITRON_NON_COPYABLE(CFReleaser);
|
||||
constexpr CFReleaser() : ptr(nullptr) {}
|
||||
constexpr CFReleaser(T ptr) : ptr(ptr) {}
|
||||
constexpr operator T() {
|
||||
|
||||
@@ -23,7 +23,7 @@ namespace {
|
||||
|
||||
Result AllocateSharedBufferMemory(std::unique_ptr<Kernel::KPageGroup>* out_page_group,
|
||||
Core::System& system, u32 size) {
|
||||
using Core::Memory::YUZU_PAGESIZE;
|
||||
using Core::Memory::CITRON_PAGESIZE;
|
||||
|
||||
// Allocate memory for the system shared buffer.
|
||||
auto& kernel = system.Kernel();
|
||||
@@ -34,7 +34,7 @@ Result AllocateSharedBufferMemory(std::unique_ptr<Kernel::KPageGroup>* out_page_
|
||||
|
||||
// Allocate memory from secure pool.
|
||||
R_TRY(kernel.MemoryManager().AllocateAndOpen(
|
||||
pg.get(), size / YUZU_PAGESIZE,
|
||||
pg.get(), size / CITRON_PAGESIZE,
|
||||
Kernel::KMemoryManager::EncodeOption(Kernel::KMemoryManager::Pool::Secure,
|
||||
Kernel::KMemoryManager::Direction::FromBack)));
|
||||
|
||||
@@ -58,13 +58,13 @@ Result AllocateSharedBufferMemory(std::unique_ptr<Kernel::KPageGroup>* out_page_
|
||||
Result MapSharedBufferIntoProcessAddressSpace(Common::ProcessAddress* out_map_address,
|
||||
std::unique_ptr<Kernel::KPageGroup>& pg,
|
||||
Kernel::KProcess* process, Core::System& system) {
|
||||
using Core::Memory::YUZU_PAGESIZE;
|
||||
using Core::Memory::CITRON_PAGESIZE;
|
||||
|
||||
auto& page_table = process->GetPageTable();
|
||||
|
||||
// Get bounds of where mapping is possible.
|
||||
const VAddr alias_code_begin = GetInteger(page_table.GetAliasCodeRegionStart());
|
||||
const VAddr alias_code_size = page_table.GetAliasCodeRegionSize() / YUZU_PAGESIZE;
|
||||
const VAddr alias_code_size = page_table.GetAliasCodeRegionSize() / CITRON_PAGESIZE;
|
||||
const auto state = Kernel::KMemoryState::IoMemory;
|
||||
const auto perm = Kernel::KMemoryPermission::UserReadWrite;
|
||||
std::mt19937_64 rng{process->GetRandomEntropy(0)};
|
||||
@@ -73,7 +73,7 @@ Result MapSharedBufferIntoProcessAddressSpace(Common::ProcessAddress* out_map_ad
|
||||
Result res = ResultSuccess;
|
||||
int i;
|
||||
for (i = 0; i < 64; i++) {
|
||||
*out_map_address = alias_code_begin + ((rng() % alias_code_size) * YUZU_PAGESIZE);
|
||||
*out_map_address = alias_code_begin + ((rng() % alias_code_size) * CITRON_PAGESIZE);
|
||||
res = page_table.MapPageGroup(*out_map_address, *pg, state, perm);
|
||||
if (R_SUCCEEDED(res)) {
|
||||
break;
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
#ifdef _WIN32
|
||||
#include <winsock2.h>
|
||||
#include <ws2tcpip.h>
|
||||
#elif YUZU_UNIX
|
||||
#elif CITRON_UNIX
|
||||
#include <arpa/inet.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
@@ -77,7 +77,7 @@ SOCKET GetInterruptSocket() {
|
||||
sockaddr TranslateFromSockAddrIn(SockAddrIn input) {
|
||||
sockaddr_in result;
|
||||
|
||||
#if YUZU_UNIX
|
||||
#if CITRON_UNIX
|
||||
result.sin_len = sizeof(result);
|
||||
#endif
|
||||
|
||||
@@ -162,7 +162,7 @@ Errno TranslateNativeError(int e, CallType call_type = CallType::Other) {
|
||||
}
|
||||
}
|
||||
|
||||
#elif YUZU_UNIX // ^ _WIN32 v YUZU_UNIX
|
||||
#elif CITRON_UNIX // ^ _WIN32 v CITRON_UNIX
|
||||
|
||||
using SOCKET = int;
|
||||
using WSAPOLLFD = pollfd;
|
||||
@@ -835,7 +835,7 @@ std::pair<s32, Errno> Socket::Send(std::span<const u8> message, int flags) {
|
||||
ASSERT(flags == 0);
|
||||
|
||||
int native_flags = 0;
|
||||
#if YUZU_UNIX
|
||||
#if CITRON_UNIX
|
||||
native_flags |= MSG_NOSIGNAL; // do not send us SIGPIPE
|
||||
#endif
|
||||
const auto result = send(fd, reinterpret_cast<const char*>(message.data()),
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <winsock2.h>
|
||||
#elif YUZU_UNIX
|
||||
#elif CITRON_UNIX
|
||||
#include <netinet/in.h>
|
||||
#endif
|
||||
|
||||
@@ -104,7 +104,7 @@ constexpr IPv4Address TranslateIPv4(in_addr addr) {
|
||||
auto& bytes = addr.S_un.S_un_b;
|
||||
return IPv4Address{bytes.s_b1, bytes.s_b2, bytes.s_b3, bytes.s_b4};
|
||||
}
|
||||
#elif YUZU_UNIX
|
||||
#elif CITRON_UNIX
|
||||
constexpr IPv4Address TranslateIPv4(in_addr addr) {
|
||||
const u32 bytes = addr.s_addr;
|
||||
return IPv4Address{static_cast<u8>(bytes), static_cast<u8>(bytes >> 8),
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
#include "core/internal_network/socket_proxy.h"
|
||||
#include "network/network.h"
|
||||
|
||||
#if YUZU_UNIX
|
||||
#if CITRON_UNIX
|
||||
#include <sys/socket.h>
|
||||
#endif
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
#include <utility>
|
||||
|
||||
#if defined(_WIN32)
|
||||
#elif !YUZU_UNIX
|
||||
#elif !CITRON_UNIX
|
||||
#error "Platform not implemented"
|
||||
#endif
|
||||
|
||||
@@ -24,7 +24,7 @@ struct ProxyPacket;
|
||||
|
||||
class SocketBase {
|
||||
public:
|
||||
#ifdef YUZU_UNIX
|
||||
#ifdef CITRON_UNIX
|
||||
using SOCKET = int;
|
||||
static constexpr SOCKET INVALID_SOCKET = -1;
|
||||
static constexpr SOCKET SOCKET_ERROR = -1;
|
||||
@@ -39,8 +39,8 @@ public:
|
||||
explicit SocketBase(SOCKET fd_) : fd{fd_} {}
|
||||
virtual ~SocketBase() = default;
|
||||
|
||||
YUZU_NON_COPYABLE(SocketBase);
|
||||
YUZU_NON_MOVEABLE(SocketBase);
|
||||
CITRON_NON_COPYABLE(SocketBase);
|
||||
CITRON_NON_MOVEABLE(SocketBase);
|
||||
|
||||
virtual Errno Initialize(Domain domain, Type type, Protocol protocol) = 0;
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ namespace Loader {
|
||||
|
||||
namespace {
|
||||
constexpr u32 PageAlignSize(u32 size) {
|
||||
return static_cast<u32>((size + Core::Memory::YUZU_PAGEMASK) & ~Core::Memory::YUZU_PAGEMASK);
|
||||
return static_cast<u32>((size + Core::Memory::CITRON_PAGEMASK) & ~Core::Memory::CITRON_PAGEMASK);
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
|
||||
@@ -143,8 +143,8 @@ std::ostream& operator<<(std::ostream& os, ResultStatus status);
|
||||
/// Interface for loading an application
|
||||
class AppLoader {
|
||||
public:
|
||||
YUZU_NON_COPYABLE(AppLoader);
|
||||
YUZU_NON_MOVEABLE(AppLoader);
|
||||
CITRON_NON_COPYABLE(AppLoader);
|
||||
CITRON_NON_MOVEABLE(AppLoader);
|
||||
|
||||
struct LoadParameters {
|
||||
s32 main_thread_priority;
|
||||
|
||||
@@ -140,7 +140,7 @@ bool AppLoader_NRO::IsHomebrew() {
|
||||
}
|
||||
|
||||
static constexpr u32 PageAlignSize(u32 size) {
|
||||
return static_cast<u32>((size + Core::Memory::YUZU_PAGEMASK) & ~Core::Memory::YUZU_PAGEMASK);
|
||||
return static_cast<u32>((size + Core::Memory::CITRON_PAGEMASK) & ~Core::Memory::CITRON_PAGEMASK);
|
||||
}
|
||||
|
||||
static bool LoadNroImpl(Core::System& system, Kernel::KProcess& process,
|
||||
|
||||
@@ -49,7 +49,7 @@ std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data,
|
||||
}
|
||||
|
||||
constexpr u32 PageAlignSize(u32 size) {
|
||||
return static_cast<u32>((size + Core::Memory::YUZU_PAGEMASK) & ~Core::Memory::YUZU_PAGEMASK);
|
||||
return static_cast<u32>((size + Core::Memory::CITRON_PAGEMASK) & ~Core::Memory::CITRON_PAGEMASK);
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
|
||||
@@ -66,11 +66,11 @@ struct Memory::Impl {
|
||||
void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
|
||||
Common::PhysicalAddress target, Common::MemoryPermission perms,
|
||||
bool separate_heap) {
|
||||
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
|
||||
ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
|
||||
ASSERT_MSG((size & CITRON_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
|
||||
ASSERT_MSG((base & CITRON_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
|
||||
ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}",
|
||||
GetInteger(target));
|
||||
MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, target,
|
||||
MapPages(page_table, base / CITRON_PAGESIZE, size / CITRON_PAGESIZE, target,
|
||||
Common::PageType::Memory);
|
||||
|
||||
if (current_page_table->fastmem_arena) {
|
||||
@@ -81,9 +81,9 @@ struct Memory::Impl {
|
||||
|
||||
void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
|
||||
bool separate_heap) {
|
||||
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
|
||||
ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
|
||||
MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0,
|
||||
ASSERT_MSG((size & CITRON_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
|
||||
ASSERT_MSG((base & CITRON_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
|
||||
MapPages(page_table, base / CITRON_PAGESIZE, size / CITRON_PAGESIZE, 0,
|
||||
Common::PageType::Unmapped);
|
||||
|
||||
if (current_page_table->fastmem_arena) {
|
||||
@@ -93,8 +93,8 @@ struct Memory::Impl {
|
||||
|
||||
void ProtectRegion(Common::PageTable& page_table, VAddr vaddr, u64 size,
|
||||
Common::MemoryPermission perms) {
|
||||
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
|
||||
ASSERT_MSG((vaddr & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", vaddr);
|
||||
ASSERT_MSG((size & CITRON_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
|
||||
ASSERT_MSG((vaddr & CITRON_PAGEMASK) == 0, "non-page aligned base: {:016X}", vaddr);
|
||||
|
||||
if (!current_page_table->fastmem_arena) {
|
||||
return;
|
||||
@@ -102,9 +102,9 @@ struct Memory::Impl {
|
||||
|
||||
u64 protect_bytes{};
|
||||
u64 protect_begin{};
|
||||
for (u64 addr = vaddr; addr < vaddr + size; addr += YUZU_PAGESIZE) {
|
||||
for (u64 addr = vaddr; addr < vaddr + size; addr += CITRON_PAGESIZE) {
|
||||
const Common::PageType page_type{
|
||||
current_page_table->pointers[addr >> YUZU_PAGEBITS].Type()};
|
||||
current_page_table->pointers[addr >> CITRON_PAGEBITS].Type()};
|
||||
switch (page_type) {
|
||||
case Common::PageType::RasterizerCachedMemory:
|
||||
if (protect_bytes > 0) {
|
||||
@@ -116,7 +116,7 @@ struct Memory::Impl {
|
||||
if (protect_bytes == 0) {
|
||||
protect_begin = addr;
|
||||
}
|
||||
protect_bytes += YUZU_PAGESIZE;
|
||||
protect_bytes += CITRON_PAGESIZE;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -127,7 +127,7 @@ struct Memory::Impl {
|
||||
|
||||
[[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(u64 vaddr) const {
|
||||
const Common::PhysicalAddress paddr{
|
||||
current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
|
||||
current_page_table->backing_addr[vaddr >> CITRON_PAGEBITS]};
|
||||
|
||||
if (!paddr) {
|
||||
return {};
|
||||
@@ -138,7 +138,7 @@ struct Memory::Impl {
|
||||
|
||||
[[nodiscard]] u8* GetPointerFromDebugMemory(u64 vaddr) const {
|
||||
const Common::PhysicalAddress paddr{
|
||||
current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
|
||||
current_page_table->backing_addr[vaddr >> CITRON_PAGEBITS]};
|
||||
|
||||
if (paddr == 0) {
|
||||
return {};
|
||||
@@ -247,8 +247,8 @@ struct Memory::Impl {
|
||||
auto on_memory, auto on_rasterizer, auto increment) {
|
||||
const auto& page_table = *current_page_table;
|
||||
std::size_t remaining_size = size;
|
||||
std::size_t page_index = addr >> YUZU_PAGEBITS;
|
||||
std::size_t page_offset = addr & YUZU_PAGEMASK;
|
||||
std::size_t page_index = addr >> CITRON_PAGEBITS;
|
||||
std::size_t page_offset = addr & CITRON_PAGEMASK;
|
||||
bool user_accessible = true;
|
||||
|
||||
if (!AddressSpaceContains(page_table, addr, size)) [[unlikely]] {
|
||||
@@ -258,9 +258,9 @@ struct Memory::Impl {
|
||||
|
||||
while (remaining_size) {
|
||||
const std::size_t copy_amount =
|
||||
std::min(static_cast<std::size_t>(YUZU_PAGESIZE) - page_offset, remaining_size);
|
||||
std::min(static_cast<std::size_t>(CITRON_PAGESIZE) - page_offset, remaining_size);
|
||||
const auto current_vaddr =
|
||||
static_cast<u64>((page_index << YUZU_PAGEBITS) + page_offset);
|
||||
static_cast<u64>((page_index << CITRON_PAGEBITS) + page_offset);
|
||||
|
||||
const auto [pointer, type] = page_table.pointers[page_index].PointerType();
|
||||
switch (type) {
|
||||
@@ -271,7 +271,7 @@ struct Memory::Impl {
|
||||
}
|
||||
case Common::PageType::Memory: {
|
||||
u8* mem_ptr =
|
||||
reinterpret_cast<u8*>(pointer + page_offset + (page_index << YUZU_PAGEBITS));
|
||||
reinterpret_cast<u8*>(pointer + page_offset + (page_index << CITRON_PAGEBITS));
|
||||
on_memory(copy_amount, mem_ptr);
|
||||
break;
|
||||
}
|
||||
@@ -336,16 +336,16 @@ struct Memory::Impl {
|
||||
}
|
||||
|
||||
const u8* GetSpan(const VAddr src_addr, const std::size_t size) const {
|
||||
if (current_page_table->blocks[src_addr >> YUZU_PAGEBITS] ==
|
||||
current_page_table->blocks[(src_addr + size) >> YUZU_PAGEBITS]) {
|
||||
if (current_page_table->blocks[src_addr >> CITRON_PAGEBITS] ==
|
||||
current_page_table->blocks[(src_addr + size) >> CITRON_PAGEBITS]) {
|
||||
return GetPointerSilent(src_addr);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
u8* GetSpan(const VAddr src_addr, const std::size_t size) {
|
||||
if (current_page_table->blocks[src_addr >> YUZU_PAGEBITS] ==
|
||||
current_page_table->blocks[(src_addr + size) >> YUZU_PAGEBITS]) {
|
||||
if (current_page_table->blocks[src_addr >> CITRON_PAGEBITS] ==
|
||||
current_page_table->blocks[(src_addr + size) >> CITRON_PAGEBITS]) {
|
||||
return GetPointerSilent(src_addr);
|
||||
}
|
||||
return nullptr;
|
||||
@@ -499,10 +499,10 @@ struct Memory::Impl {
|
||||
// Iterate over a contiguous CPU address space, marking/unmarking the region.
|
||||
// The region is at a granularity of CPU pages.
|
||||
|
||||
const u64 num_pages = ((vaddr + size - 1) >> YUZU_PAGEBITS) - (vaddr >> YUZU_PAGEBITS) + 1;
|
||||
for (u64 i = 0; i < num_pages; ++i, vaddr += YUZU_PAGESIZE) {
|
||||
const u64 num_pages = ((vaddr + size - 1) >> CITRON_PAGEBITS) - (vaddr >> CITRON_PAGEBITS) + 1;
|
||||
for (u64 i = 0; i < num_pages; ++i, vaddr += CITRON_PAGESIZE) {
|
||||
const Common::PageType page_type{
|
||||
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Type()};
|
||||
current_page_table->pointers[vaddr >> CITRON_PAGEBITS].Type()};
|
||||
if (debug) {
|
||||
// Switch page type to debug if now debug
|
||||
switch (page_type) {
|
||||
@@ -514,7 +514,7 @@ struct Memory::Impl {
|
||||
// Page is already marked.
|
||||
break;
|
||||
case Common::PageType::Memory:
|
||||
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
||||
current_page_table->pointers[vaddr >> CITRON_PAGEBITS].Store(
|
||||
0, Common::PageType::DebugMemory);
|
||||
break;
|
||||
default:
|
||||
@@ -531,9 +531,9 @@ struct Memory::Impl {
|
||||
// Don't mess with already non-debug or rasterizer memory.
|
||||
break;
|
||||
case Common::PageType::DebugMemory: {
|
||||
u8* const pointer{GetPointerFromDebugMemory(vaddr & ~YUZU_PAGEMASK)};
|
||||
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
||||
reinterpret_cast<uintptr_t>(pointer) - (vaddr & ~YUZU_PAGEMASK),
|
||||
u8* const pointer{GetPointerFromDebugMemory(vaddr & ~CITRON_PAGEMASK)};
|
||||
current_page_table->pointers[vaddr >> CITRON_PAGEBITS].Store(
|
||||
reinterpret_cast<uintptr_t>(pointer) - (vaddr & ~CITRON_PAGEMASK),
|
||||
Common::PageType::Memory);
|
||||
break;
|
||||
}
|
||||
@@ -565,10 +565,10 @@ struct Memory::Impl {
|
||||
// granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size
|
||||
// is different). This assumes the specified GPU address region is contiguous as well.
|
||||
|
||||
const u64 num_pages = ((vaddr + size - 1) >> YUZU_PAGEBITS) - (vaddr >> YUZU_PAGEBITS) + 1;
|
||||
for (u64 i = 0; i < num_pages; ++i, vaddr += YUZU_PAGESIZE) {
|
||||
const u64 num_pages = ((vaddr + size - 1) >> CITRON_PAGEBITS) - (vaddr >> CITRON_PAGEBITS) + 1;
|
||||
for (u64 i = 0; i < num_pages; ++i, vaddr += CITRON_PAGESIZE) {
|
||||
const Common::PageType page_type{
|
||||
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Type()};
|
||||
current_page_table->pointers[vaddr >> CITRON_PAGEBITS].Type()};
|
||||
if (cached) {
|
||||
// Switch page type to cached if now cached
|
||||
switch (page_type) {
|
||||
@@ -578,7 +578,7 @@ struct Memory::Impl {
|
||||
break;
|
||||
case Common::PageType::DebugMemory:
|
||||
case Common::PageType::Memory:
|
||||
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
||||
current_page_table->pointers[vaddr >> CITRON_PAGEBITS].Store(
|
||||
0, Common::PageType::RasterizerCachedMemory);
|
||||
break;
|
||||
case Common::PageType::RasterizerCachedMemory:
|
||||
@@ -601,16 +601,16 @@ struct Memory::Impl {
|
||||
// that this area is already unmarked as cached.
|
||||
break;
|
||||
case Common::PageType::RasterizerCachedMemory: {
|
||||
u8* const pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~YUZU_PAGEMASK)};
|
||||
u8* const pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~CITRON_PAGEMASK)};
|
||||
if (pointer == nullptr) {
|
||||
// It's possible that this function has been called while updating the
|
||||
// pagetable after unmapping a VMA. In that case the underlying VMA will no
|
||||
// longer exist, and we should just leave the pagetable entry blank.
|
||||
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
||||
current_page_table->pointers[vaddr >> CITRON_PAGEBITS].Store(
|
||||
0, Common::PageType::Unmapped);
|
||||
} else {
|
||||
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
||||
reinterpret_cast<uintptr_t>(pointer) - (vaddr & ~YUZU_PAGEMASK),
|
||||
current_page_table->pointers[vaddr >> CITRON_PAGEBITS].Store(
|
||||
reinterpret_cast<uintptr_t>(pointer) - (vaddr & ~CITRON_PAGEMASK),
|
||||
Common::PageType::Memory);
|
||||
}
|
||||
break;
|
||||
@@ -636,7 +636,7 @@ struct Memory::Impl {
|
||||
auto base = GetInteger(base_address);
|
||||
|
||||
LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", GetInteger(target),
|
||||
base * YUZU_PAGESIZE, (base + size) * YUZU_PAGESIZE);
|
||||
base * CITRON_PAGESIZE, (base + size) * CITRON_PAGESIZE);
|
||||
|
||||
const auto end = base + size;
|
||||
ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
|
||||
@@ -644,7 +644,7 @@ struct Memory::Impl {
|
||||
|
||||
if (!target) {
|
||||
ASSERT_MSG(type != Common::PageType::Memory,
|
||||
"Mapping memory page without a pointer @ {:016x}", base * YUZU_PAGESIZE);
|
||||
"Mapping memory page without a pointer @ {:016x}", base * CITRON_PAGESIZE);
|
||||
|
||||
while (base != end) {
|
||||
page_table.pointers[base].Store(0, type);
|
||||
@@ -657,17 +657,17 @@ struct Memory::Impl {
|
||||
while (base != end) {
|
||||
auto host_ptr =
|
||||
reinterpret_cast<uintptr_t>(system.DeviceMemory().GetPointer<u8>(target)) -
|
||||
(base << YUZU_PAGEBITS);
|
||||
auto backing = GetInteger(target) - (base << YUZU_PAGEBITS);
|
||||
(base << CITRON_PAGEBITS);
|
||||
auto backing = GetInteger(target) - (base << CITRON_PAGEBITS);
|
||||
page_table.pointers[base].Store(host_ptr, type);
|
||||
page_table.backing_addr[base] = backing;
|
||||
page_table.blocks[base] = orig_base << YUZU_PAGEBITS;
|
||||
page_table.blocks[base] = orig_base << CITRON_PAGEBITS;
|
||||
|
||||
ASSERT_MSG(page_table.pointers[base].Pointer(),
|
||||
"memory mapping base yield a nullptr within the table");
|
||||
|
||||
base += 1;
|
||||
target += YUZU_PAGESIZE;
|
||||
target += CITRON_PAGESIZE;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -682,7 +682,7 @@ struct Memory::Impl {
|
||||
}
|
||||
|
||||
// Avoid adding any extra logic to this fast-path block
|
||||
const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw();
|
||||
const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> CITRON_PAGEBITS].Raw();
|
||||
if (const uintptr_t pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
|
||||
return reinterpret_cast<u8*>(pointer + vaddr);
|
||||
}
|
||||
@@ -837,7 +837,7 @@ struct Memory::Impl {
|
||||
};
|
||||
gpu_device_memory->ApplyOpOnPointer(p, scratch_buffers[core], [&](DAddr address) {
|
||||
auto& current_area = rasterizer_write_areas[core];
|
||||
PAddr subaddress = address >> YUZU_PAGEBITS;
|
||||
PAddr subaddress = address >> CITRON_PAGEBITS;
|
||||
bool do_collection = current_area.last_address == subaddress;
|
||||
if (!do_collection) [[unlikely]] {
|
||||
do_collection = system.GPU().OnCPUWrite(address, size);
|
||||
@@ -925,7 +925,7 @@ void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress
|
||||
|
||||
bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const {
|
||||
const auto& page_table = *impl->current_page_table;
|
||||
const size_t page = vaddr >> YUZU_PAGEBITS;
|
||||
const size_t page = vaddr >> CITRON_PAGEBITS;
|
||||
if (page >= page_table.pointers.size()) {
|
||||
return false;
|
||||
}
|
||||
@@ -936,9 +936,9 @@ bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const {
|
||||
|
||||
bool Memory::IsValidVirtualAddressRange(Common::ProcessAddress base, u64 size) const {
|
||||
Common::ProcessAddress end = base + size;
|
||||
Common::ProcessAddress page = Common::AlignDown(GetInteger(base), YUZU_PAGESIZE);
|
||||
Common::ProcessAddress page = Common::AlignDown(GetInteger(base), CITRON_PAGESIZE);
|
||||
|
||||
for (; page < end; page += YUZU_PAGESIZE) {
|
||||
for (; page < end; page += CITRON_PAGESIZE) {
|
||||
if (!IsValidVirtualAddress(page)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -39,9 +39,9 @@ namespace Core::Memory {
|
||||
* Page size used by the ARM architecture. This is the smallest granularity with which memory can
|
||||
* be mapped.
|
||||
*/
|
||||
constexpr std::size_t YUZU_PAGEBITS = 12;
|
||||
constexpr u64 YUZU_PAGESIZE = 1ULL << YUZU_PAGEBITS;
|
||||
constexpr u64 YUZU_PAGEMASK = YUZU_PAGESIZE - 1;
|
||||
constexpr std::size_t CITRON_PAGEBITS = 12;
|
||||
constexpr u64 CITRON_PAGESIZE = 1ULL << CITRON_PAGEBITS;
|
||||
constexpr u64 CITRON_PAGEMASK = CITRON_PAGESIZE - 1;
|
||||
|
||||
/// Virtual user-space memory regions
|
||||
enum : u64 {
|
||||
|
||||
Reference in New Issue
Block a user