mirror of
https://github.com/llvm/llvm-project.git
synced 2025-05-02 01:06:06 +00:00
Revert "[scudo] Fix the logic of MaxAllowedFragmentedPages" (#108130)
Reverts llvm/llvm-project#107927 We are supposed to check the MaxAllowedFragmentedPages instead.
This commit is contained in:
parent
5773adb0bf
commit
76151c4490
@ -72,16 +72,13 @@ namespace {
|
|||||||
struct CachedBlock {
|
struct CachedBlock {
|
||||||
static constexpr u16 CacheIndexMax = UINT16_MAX;
|
static constexpr u16 CacheIndexMax = UINT16_MAX;
|
||||||
static constexpr u16 InvalidEntry = CacheIndexMax;
|
static constexpr u16 InvalidEntry = CacheIndexMax;
|
||||||
// We allow a certain amount of fragmentation and part of the fragmented bytes
|
// * MaxReleasedCachePages default is currently 4
|
||||||
// will be released by `releaseAndZeroPagesToOS()`. This increases the chance
|
// - We arrived at this value after noticing that mapping
|
||||||
// of cache hit rate and reduces the overhead to the RSS at the same time. See
|
// in larger memory regions performs better than releasing
|
||||||
// more details in the `MapAllocatorCache::retrieve()` section.
|
// memory and forcing a cache hit. According to the data,
|
||||||
//
|
// it suggests that beyond 4 pages, the release execution time is
|
||||||
// We arrived at this default value after noticing that mapping in larger
|
// longer than the map execution time. In this way, the default
|
||||||
// memory regions performs better than releasing memory and forcing a cache
|
// is dependent on the platform.
|
||||||
// hit. According to the data, it suggests that beyond 4 pages, the release
|
|
||||||
// execution time is longer than the map execution time. In this way,
|
|
||||||
// the default is dependent on the platform.
|
|
||||||
static constexpr uptr MaxReleasedCachePages = 4U;
|
static constexpr uptr MaxReleasedCachePages = 4U;
|
||||||
|
|
||||||
uptr CommitBase = 0;
|
uptr CommitBase = 0;
|
||||||
@ -728,14 +725,8 @@ MapAllocator<Config>::tryAllocateFromCache(const Options &Options, uptr Size,
|
|||||||
uptr EntryHeaderPos;
|
uptr EntryHeaderPos;
|
||||||
uptr MaxAllowedFragmentedPages = MaxUnreleasedCachePages;
|
uptr MaxAllowedFragmentedPages = MaxUnreleasedCachePages;
|
||||||
|
|
||||||
if (LIKELY(!useMemoryTagging<Config>(Options))) {
|
if (UNLIKELY(useMemoryTagging<Config>(Options)))
|
||||||
MaxAllowedFragmentedPages += CachedBlock::MaxReleasedCachePages;
|
MaxAllowedFragmentedPages += CachedBlock::MaxReleasedCachePages;
|
||||||
} else {
|
|
||||||
// TODO: Enable MaxReleasedCachePages may result in pages for an entry being
|
|
||||||
// partially released and it erases the tag of those pages as well. To
|
|
||||||
// support this feature for MTE, we need to tag those pages again.
|
|
||||||
DCHECK_EQ(CachedBlock::MaxReleasedCachePages, 0U);
|
|
||||||
}
|
|
||||||
|
|
||||||
Entry = Cache.retrieve(MaxAllowedFragmentedPages, Size, Alignment,
|
Entry = Cache.retrieve(MaxAllowedFragmentedPages, Size, Alignment,
|
||||||
getHeadersSize(), EntryHeaderPos);
|
getHeadersSize(), EntryHeaderPos);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user