Files
imhex/lib/libimhex/source/providers/cached_provider.cpp
iTrooz 17c2dfcbd0 chore: apply more light lints (#2575)
<!--
Please provide as much information as possible about what your PR aims
to do.
PRs with no description will most likely be closed until more
information is provided.
If you're planing on changing fundamental behaviour or add big new
features, please open a GitHub Issue first before starting to work on
it.
If it's not something big and you still want to contact us about it,
feel free to do so !
-->

### Problem description
<!-- Describe the bug that you fixed/feature request that you
implemented, or link to an existing issue describing it -->

### Implementation description
<!-- Explain what you did to correct the problem -->

### Screenshots
<!-- If your change is visual, take a screenshot showing it. Ideally,
make before/after sceenshots -->

### Additional things
<!-- Anything else you would like to say -->
2025-12-20 15:59:48 +01:00

131 lines
3.8 KiB
C++

#include "hex/providers/cached_provider.hpp"
#include <algorithm>
#include <optional>
namespace hex::prv {
CachedProvider::CachedProvider(size_t cacheBlockSize, size_t maxBlocks)
: m_cacheBlockSize(cacheBlockSize), m_maxBlocks(maxBlocks), m_cache(maxBlocks) {}
CachedProvider::~CachedProvider() {
clearCache();
}
Provider::OpenResult CachedProvider::open() {
clearCache();
return {};
}
void CachedProvider::close() {
clearCache();
}
void CachedProvider::readRaw(u64 offset, void* buffer, size_t size) {
if (!isAvailable() || !isReadable())
return;
auto out = static_cast<u8 *>(buffer);
while (size > 0) {
const auto blockIndex = calcBlockIndex(offset);
const auto blockOffset = calcBlockOffset(offset);
const auto toRead = std::min(m_cacheBlockSize - blockOffset, size);
const auto cacheSlot = blockIndex % m_maxBlocks;
{
std::shared_lock lock(m_cacheMutex);
const auto &slot = m_cache[cacheSlot];
if (slot && slot->index == blockIndex) {
std::copy_n(slot->data.begin() + blockOffset, toRead, out);
out += toRead;
offset += toRead;
size -= toRead;
continue;
}
}
std::vector<uint8_t> blockData(m_cacheBlockSize);
readFromSource(blockIndex * m_cacheBlockSize, blockData.data(), m_cacheBlockSize);
{
std::unique_lock lock(m_cacheMutex);
m_cache[cacheSlot] = Block{.index=blockIndex, .data=std::move(blockData), .dirty=false};
std::copy_n(m_cache[cacheSlot]->data.begin() + blockOffset, toRead, out);
}
out += toRead;
offset += toRead;
size -= toRead;
}
}
void CachedProvider::writeRaw(u64 offset, const void* buffer, size_t size) {
if (!isAvailable() || !isWritable())
return;
auto in = static_cast<const u8 *>(buffer);
while (size > 0) {
const auto blockIndex = calcBlockIndex(offset);
const auto blockOffset = calcBlockOffset(offset);
const auto toWrite = std::min(m_cacheBlockSize - blockOffset, size);
const auto cacheSlot = blockIndex % m_maxBlocks;
{
std::unique_lock lock(m_cacheMutex);
auto& slot = m_cache[cacheSlot];
if (!slot || slot->index != blockIndex) {
std::vector<uint8_t> blockData(m_cacheBlockSize);
readFromSource(blockIndex * m_cacheBlockSize, blockData.data(), m_cacheBlockSize);
slot = Block { .index=blockIndex, .data=std::move(blockData), .dirty=false };
}
std::copy_n(in, toWrite, slot->data.begin() + blockOffset);
slot->dirty = true;
}
writeToSource(offset, in, toWrite);
in += toWrite;
offset += toWrite;
size -= toWrite;
}
}
void CachedProvider::resizeRaw(u64 newSize) {
clearCache();
resizeSource(newSize);
}
u64 CachedProvider::getActualSize() const {
if (!isAvailable())
return 0;
if (m_cachedSize == 0) {
std::unique_lock lock(m_cacheMutex);
m_cachedSize = getSourceSize();
}
return m_cachedSize;
}
void CachedProvider::clearCache() {
std::unique_lock lock(m_cacheMutex);
for (auto& slot : m_cache)
slot.reset();
m_cachedSize = 0;
}
void CachedProvider::evictIfNeeded() {
if (m_cache.size() < m_maxBlocks)
return;
m_cache.erase(m_cache.begin());
}
}