Merge pull request #6028 from bunnei/raster-cache
video_core: rasterizer_accelerated: Use a flat array instead of interval_map for cached pages.
This commit is contained in:
commit
4735d18bb9
|
@ -2,63 +2,43 @@
|
||||||
// Licensed under GPLv2 or any later version
|
// Licensed under GPLv2 or any later version
|
||||||
// Refer to the license.txt file included.
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
#include <mutex>
|
|
||||||
|
|
||||||
#include <boost/icl/interval_map.hpp>
|
|
||||||
#include <boost/range/iterator_range.hpp>
|
|
||||||
|
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
|
#include "common/div_ceil.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
#include "video_core/rasterizer_accelerated.h"
|
#include "video_core/rasterizer_accelerated.h"
|
||||||
|
|
||||||
namespace VideoCore {
|
namespace VideoCore {
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
template <typename Map, typename Interval>
|
|
||||||
constexpr auto RangeFromInterval(Map& map, const Interval& interval) {
|
|
||||||
return boost::make_iterator_range(map.equal_range(interval));
|
|
||||||
}
|
|
||||||
|
|
||||||
} // Anonymous namespace
|
|
||||||
|
|
||||||
RasterizerAccelerated::RasterizerAccelerated(Core::Memory::Memory& cpu_memory_)
|
RasterizerAccelerated::RasterizerAccelerated(Core::Memory::Memory& cpu_memory_)
|
||||||
: cpu_memory{cpu_memory_} {}
|
: cpu_memory{cpu_memory_} {}
|
||||||
|
|
||||||
RasterizerAccelerated::~RasterizerAccelerated() = default;
|
RasterizerAccelerated::~RasterizerAccelerated() = default;
|
||||||
|
|
||||||
void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
|
void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
|
||||||
std::lock_guard lock{pages_mutex};
|
const auto page_end = Common::DivCeil(addr + size, Core::Memory::PAGE_SIZE);
|
||||||
const u64 page_start{addr >> Core::Memory::PAGE_BITS};
|
for (auto page = addr >> Core::Memory::PAGE_BITS; page != page_end; ++page) {
|
||||||
const u64 page_end{(addr + size + Core::Memory::PAGE_SIZE - 1) >> Core::Memory::PAGE_BITS};
|
auto& count = cached_pages.at(page >> 3).Count(page);
|
||||||
|
|
||||||
// Interval maps will erase segments if count reaches 0, so if delta is negative we have to
|
|
||||||
// subtract after iterating
|
|
||||||
const auto pages_interval = CachedPageMap::interval_type::right_open(page_start, page_end);
|
|
||||||
if (delta > 0) {
|
if (delta > 0) {
|
||||||
cached_pages.add({pages_interval, delta});
|
ASSERT_MSG(count < UINT8_MAX, "Count may overflow!");
|
||||||
}
|
} else if (delta < 0) {
|
||||||
|
ASSERT_MSG(count > 0, "Count may underflow!");
|
||||||
for (const auto& pair : RangeFromInterval(cached_pages, pages_interval)) {
|
|
||||||
const auto interval = pair.first & pages_interval;
|
|
||||||
const int count = pair.second;
|
|
||||||
|
|
||||||
const VAddr interval_start_addr = boost::icl::first(interval) << Core::Memory::PAGE_BITS;
|
|
||||||
const VAddr interval_end_addr = boost::icl::last_next(interval) << Core::Memory::PAGE_BITS;
|
|
||||||
const u64 interval_size = interval_end_addr - interval_start_addr;
|
|
||||||
|
|
||||||
if (delta > 0 && count == delta) {
|
|
||||||
cpu_memory.RasterizerMarkRegionCached(interval_start_addr, interval_size, true);
|
|
||||||
} else if (delta < 0 && count == -delta) {
|
|
||||||
cpu_memory.RasterizerMarkRegionCached(interval_start_addr, interval_size, false);
|
|
||||||
} else {
|
} else {
|
||||||
ASSERT(count >= 0);
|
ASSERT_MSG(true, "Delta must be non-zero!");
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (delta < 0) {
|
// Adds or subtracts 1, as count is a unsigned 8-bit value
|
||||||
cached_pages.add({pages_interval, delta});
|
count += static_cast<u8>(delta);
|
||||||
|
|
||||||
|
// Assume delta is either -1 or 1
|
||||||
|
if (count == 0) {
|
||||||
|
cpu_memory.RasterizerMarkRegionCached(page << Core::Memory::PAGE_BITS,
|
||||||
|
Core::Memory::PAGE_SIZE, false);
|
||||||
|
} else if (count == 1 && delta > 0) {
|
||||||
|
cpu_memory.RasterizerMarkRegionCached(page << Core::Memory::PAGE_BITS,
|
||||||
|
Core::Memory::PAGE_SIZE, true);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,9 +4,8 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <mutex>
|
#include <array>
|
||||||
|
#include <atomic>
|
||||||
#include <boost/icl/interval_map.hpp>
|
|
||||||
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "video_core/rasterizer_interface.h"
|
#include "video_core/rasterizer_interface.h"
|
||||||
|
@ -26,10 +25,24 @@ public:
|
||||||
void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) override;
|
void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
using CachedPageMap = boost::icl::interval_map<u64, int>;
|
class CacheEntry final {
|
||||||
CachedPageMap cached_pages;
|
public:
|
||||||
std::mutex pages_mutex;
|
CacheEntry() = default;
|
||||||
|
|
||||||
|
std::atomic_uint8_t& Count(std::size_t page) {
|
||||||
|
return values[page & 7];
|
||||||
|
}
|
||||||
|
|
||||||
|
const std::atomic_uint8_t& Count(std::size_t page) const {
|
||||||
|
return values[page & 7];
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::array<std::atomic_uint8_t, 8> values{};
|
||||||
|
};
|
||||||
|
static_assert(sizeof(CacheEntry) == 8, "CacheEntry should be 8 bytes!");
|
||||||
|
|
||||||
|
std::array<CacheEntry, 0x800000> cached_pages;
|
||||||
Core::Memory::Memory& cpu_memory;
|
Core::Memory::Memory& cpu_memory;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue