[scudo] Make the boundary of memory group aligned with region begin

This alignment guarantee enables simpler group range check while page
releasing and a potential optimization which is, now all the pointers
from the same group are also inth same region, that means the complexity
in markFreeBlocks() can be reduced as well.

Reviewed By: cferris

Differential Revision: https://reviews.llvm.org/D142931
This commit is contained in:
Chia-hung Duan 2023-03-06 19:22:30 +00:00
parent 3df28efaa0
commit a6e3bb9bfb
3 changed files with 96 additions and 92 deletions

View File

@ -62,8 +62,8 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
struct BatchGroup {
// `Next` is used by IntrusiveList.
BatchGroup *Next;
// The identifier of each group
uptr GroupId;
// The compact base address of each group
uptr CompactPtrGroupBase;
// Cache value of TransferBatch::getMaxCached()
u16 MaxCachedPerBatch;
// Number of blocks pushed into this group. This is an increment-only

View File

@ -122,11 +122,14 @@ public:
return reinterpret_cast<void *>(static_cast<uptr>(CompactPtr));
}
uptr compactPtrGroup(CompactPtrT CompactPtr) {
return CompactPtr >> GroupSizeLog;
uptr compactPtrGroupBase(CompactPtrT CompactPtr) {
const uptr Mask = (static_cast<uptr>(1) << GroupSizeLog) - 1;
return CompactPtr & ~Mask;
}
uptr batchGroupBase(uptr GroupId) { return GroupId << GroupSizeLog; }
uptr decompactGroupBase(uptr CompactPtrGroupBase) {
return CompactPtrGroupBase;
}
TransferBatch *popBatch(CacheT *C, uptr ClassId) {
DCHECK_LT(ClassId, NumClasses);
@ -172,11 +175,12 @@ public:
// together.
bool SameGroup = true;
for (u32 I = 1; I < Size; ++I) {
if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I]))
if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I]))
SameGroup = false;
CompactPtrT Cur = Array[I];
u32 J = I;
while (J > 0 && compactPtrGroup(Cur) < compactPtrGroup(Array[J - 1])) {
while (J > 0 &&
compactPtrGroupBase(Cur) < compactPtrGroupBase(Array[J - 1])) {
Array[J] = Array[J - 1];
--J;
}
@ -363,6 +367,11 @@ private:
const uptr End = Region + MapSize;
if (End != MapEnd)
unmap(reinterpret_cast<void *>(End), MapEnd - End);
DCHECK_EQ(Region % RegionSize, 0U);
static_assert(Config::PrimaryRegionSizeLog == GroupSizeLog,
"Memory group should be the same size as Region");
return Region;
}
@ -417,7 +426,7 @@ private:
REQUIRES(Sci->Mutex) {
DCHECK_GT(Size, 0U);
auto CreateGroup = [&](uptr GroupId) {
auto CreateGroup = [&](uptr CompactPtrGroupBase) {
BatchGroup *BG = nullptr;
TransferBatch *TB = nullptr;
if (ClassId == SizeClassMap::BatchClassId) {
@ -476,7 +485,7 @@ private:
TB->clear();
}
BG->GroupId = GroupId;
BG->CompactPtrGroupBase = CompactPtrGroupBase;
// TODO(chiahungduan): Avoid the use of push_back() in `Batches`.
BG->Batches.push_front(TB);
BG->PushedBlocks = 0;
@ -518,7 +527,7 @@ private:
if (ClassId == SizeClassMap::BatchClassId) {
if (Cur == nullptr) {
// Don't need to classify BatchClassId.
Cur = CreateGroup(/*GroupId=*/0);
Cur = CreateGroup(/*CompactPtrGroupBase=*/0);
Sci->FreeList.push_front(Cur);
}
InsertBlocks(Cur, Array, Size);
@ -529,13 +538,15 @@ private:
// will be pushed next. `Prev` is the element right before `Cur`.
BatchGroup *Prev = nullptr;
while (Cur != nullptr && compactPtrGroup(Array[0]) > Cur->GroupId) {
while (Cur != nullptr &&
compactPtrGroupBase(Array[0]) > Cur->CompactPtrGroupBase) {
Prev = Cur;
Cur = Cur->Next;
}
if (Cur == nullptr || compactPtrGroup(Array[0]) != Cur->GroupId) {
Cur = CreateGroup(compactPtrGroup(Array[0]));
if (Cur == nullptr ||
compactPtrGroupBase(Array[0]) != Cur->CompactPtrGroupBase) {
Cur = CreateGroup(compactPtrGroupBase(Array[0]));
if (Prev == nullptr)
Sci->FreeList.push_front(Cur);
else
@ -546,7 +557,7 @@ private:
// id.
if (SameGroup) {
for (u32 I = 0; I < Size; ++I)
DCHECK_EQ(compactPtrGroup(Array[I]), Cur->GroupId);
DCHECK_EQ(compactPtrGroupBase(Array[I]), Cur->CompactPtrGroupBase);
InsertBlocks(Cur, Array, Size);
return;
@ -556,17 +567,19 @@ private:
// push them to their group together.
u32 Count = 1;
for (u32 I = 1; I < Size; ++I) {
if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I])) {
DCHECK_EQ(compactPtrGroup(Array[I - 1]), Cur->GroupId);
if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I])) {
DCHECK_EQ(compactPtrGroupBase(Array[I - 1]), Cur->CompactPtrGroupBase);
InsertBlocks(Cur, Array + I - Count, Count);
while (Cur != nullptr && compactPtrGroup(Array[I]) > Cur->GroupId) {
while (Cur != nullptr &&
compactPtrGroupBase(Array[I]) > Cur->CompactPtrGroupBase) {
Prev = Cur;
Cur = Cur->Next;
}
if (Cur == nullptr || compactPtrGroup(Array[I]) != Cur->GroupId) {
Cur = CreateGroup(compactPtrGroup(Array[I]));
if (Cur == nullptr ||
compactPtrGroupBase(Array[I]) != Cur->CompactPtrGroupBase) {
Cur = CreateGroup(compactPtrGroupBase(Array[I]));
DCHECK_NE(Prev, nullptr);
Sci->FreeList.insert(Prev, Cur);
}
@ -662,14 +675,14 @@ private:
if (ClassId != SizeClassMap::BatchClassId) {
u32 N = 1;
uptr CurGroup = compactPtrGroup(ShuffleArray[0]);
uptr CurGroup = compactPtrGroupBase(ShuffleArray[0]);
for (u32 I = 1; I < NumberOfBlocks; I++) {
if (UNLIKELY(compactPtrGroup(ShuffleArray[I]) != CurGroup)) {
if (UNLIKELY(compactPtrGroupBase(ShuffleArray[I]) != CurGroup)) {
shuffle(ShuffleArray + I - N, N, &Sci->RandState);
pushBlocksImpl(C, ClassId, Sci, ShuffleArray + I - N, N,
/*SameGroup=*/true);
N = 1;
CurGroup = compactPtrGroup(ShuffleArray[I]);
CurGroup = compactPtrGroupBase(ShuffleArray[I]);
} else {
++N;
}
@ -758,8 +771,8 @@ private:
const uptr Base = First * RegionSize;
const uptr NumberOfRegions = Last - First + 1U;
const uptr GroupSize = (1U << GroupSizeLog);
const uptr CurRegionGroupId =
compactPtrGroup(compactPtr(ClassId, Sci->CurrentRegion));
const uptr CurGroupBase =
compactPtrGroupBase(compactPtr(ClassId, Sci->CurrentRegion));
ReleaseRecorder Recorder(Base);
PageReleaseContext Context(BlockSize, RegionSize, NumberOfRegions,
@ -774,9 +787,10 @@ private:
if (PushedBytesDelta * BlockSize < PageSize)
continue;
uptr AllocatedGroupSize = BG.GroupId == CurRegionGroupId
? Sci->CurrentRegionAllocated
: GroupSize;
uptr AllocatedGroupSize =
decompactGroupBase(BG.CompactPtrGroupBase) == CurGroupBase
? Sci->CurrentRegionAllocated
: GroupSize;
if (AllocatedGroupSize == 0)
continue;
@ -805,15 +819,16 @@ private:
// `CurrentRegionAllocated` of the current region. This exception excludes
// the chance of doing range marking for the current region.
const bool CanDoRangeMark =
GroupSize == RegionSize && BG.GroupId != CurRegionGroupId;
GroupSize == RegionSize &&
decompactGroupBase(BG.CompactPtrGroupBase) != CurGroupBase;
if (CanDoRangeMark && NumBlocks == MaxContainedBlocks) {
for (const auto &It : BG.Batches)
for (u16 I = 0; I < It.getCount(); ++I)
DCHECK_EQ(compactPtrGroup(It.get(I)), BG.GroupId);
DCHECK_EQ(compactPtrGroupBase(It.get(I)), BG.CompactPtrGroupBase);
const uptr From = batchGroupBase(BG.GroupId);
const uptr To = batchGroupBase(BG.GroupId) + AllocatedGroupSize;
const uptr From = decompactGroupBase(BG.CompactPtrGroupBase);
const uptr To = From + AllocatedGroupSize;
Context.markRangeAsAllCounted(From, To, Base);
} else {
if (CanDoRangeMark)

View File

@ -47,6 +47,7 @@ public:
typedef typename Config::PrimaryCompactPtrT CompactPtrT;
static const uptr CompactPtrScale = Config::PrimaryCompactPtrScale;
static const uptr GroupSizeLog = Config::PrimaryGroupSizeLog;
static const uptr GroupScale = GroupSizeLog - CompactPtrScale;
typedef typename Config::SizeClassMap SizeClassMap;
typedef SizeClassAllocator64<Config> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
@ -336,9 +337,6 @@ public:
static uptr getRegionInfoArraySize() { return sizeof(RegionInfoArray); }
uptr getCompactPtrBaseByClassId(uptr ClassId) {
// If we are not compacting pointers, base everything off of 0.
if (sizeof(CompactPtrT) == sizeof(uptr) && CompactPtrScale == 0)
return 0;
return getRegionInfo(ClassId)->RegionBeg;
}
@ -475,10 +473,12 @@ private:
}
static uptr compactPtrGroup(CompactPtrT CompactPtr) {
return static_cast<uptr>(CompactPtr) >> (GroupSizeLog - CompactPtrScale);
const uptr Mask = (static_cast<uptr>(1) << GroupScale) - 1;
return static_cast<uptr>(CompactPtr) & ~Mask;
}
static uptr batchGroupBase(uptr Base, uptr GroupId) {
return (GroupId << GroupSizeLog) + Base;
static uptr decompactGroupBase(uptr Base, uptr CompactPtrGroupBase) {
DCHECK_EQ(CompactPtrGroupBase % (static_cast<uptr>(1) << (GroupScale)), 0U);
return Base + (CompactPtrGroupBase << CompactPtrScale);
}
// Push the blocks to their batch group. The layout will be like,
@ -504,7 +504,7 @@ private:
REQUIRES(Region->Mutex) {
DCHECK_GT(Size, 0U);
auto CreateGroup = [&](uptr GroupId) {
auto CreateGroup = [&](uptr CompactPtrGroupBase) {
BatchGroup *BG = nullptr;
TransferBatch *TB = nullptr;
if (ClassId == SizeClassMap::BatchClassId) {
@ -563,7 +563,7 @@ private:
TB->clear();
}
BG->GroupId = GroupId;
BG->CompactPtrGroupBase = CompactPtrGroupBase;
// TODO(chiahungduan): Avoid the use of push_back() in `Batches`.
BG->Batches.push_front(TB);
BG->PushedBlocks = 0;
@ -605,7 +605,7 @@ private:
if (ClassId == SizeClassMap::BatchClassId) {
if (Cur == nullptr) {
// Don't need to classify BatchClassId.
Cur = CreateGroup(/*GroupId=*/0);
Cur = CreateGroup(/*CompactPtrGroupBase=*/0);
Region->FreeList.push_front(Cur);
}
InsertBlocks(Cur, Array, Size);
@ -616,12 +616,14 @@ private:
// will be pushed next. `Prev` is the element right before `Cur`.
BatchGroup *Prev = nullptr;
while (Cur != nullptr && compactPtrGroup(Array[0]) > Cur->GroupId) {
while (Cur != nullptr &&
compactPtrGroup(Array[0]) > Cur->CompactPtrGroupBase) {
Prev = Cur;
Cur = Cur->Next;
}
if (Cur == nullptr || compactPtrGroup(Array[0]) != Cur->GroupId) {
if (Cur == nullptr ||
compactPtrGroup(Array[0]) != Cur->CompactPtrGroupBase) {
Cur = CreateGroup(compactPtrGroup(Array[0]));
if (Prev == nullptr)
Region->FreeList.push_front(Cur);
@ -633,7 +635,7 @@ private:
// id.
if (SameGroup) {
for (u32 I = 0; I < Size; ++I)
DCHECK_EQ(compactPtrGroup(Array[I]), Cur->GroupId);
DCHECK_EQ(compactPtrGroup(Array[I]), Cur->CompactPtrGroupBase);
InsertBlocks(Cur, Array, Size);
return;
@ -644,15 +646,17 @@ private:
u32 Count = 1;
for (u32 I = 1; I < Size; ++I) {
if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I])) {
DCHECK_EQ(compactPtrGroup(Array[I - 1]), Cur->GroupId);
DCHECK_EQ(compactPtrGroup(Array[I - 1]), Cur->CompactPtrGroupBase);
InsertBlocks(Cur, Array + I - Count, Count);
while (Cur != nullptr && compactPtrGroup(Array[I]) > Cur->GroupId) {
while (Cur != nullptr &&
compactPtrGroup(Array[I]) > Cur->CompactPtrGroupBase) {
Prev = Cur;
Cur = Cur->Next;
}
if (Cur == nullptr || compactPtrGroup(Array[I]) != Cur->GroupId) {
if (Cur == nullptr ||
compactPtrGroup(Array[I]) != Cur->CompactPtrGroupBase) {
Cur = CreateGroup(compactPtrGroup(Array[I]));
DCHECK_NE(Prev, nullptr);
Region->FreeList.insert(Prev, Cur);
@ -863,40 +867,27 @@ private:
continue;
}
// Group boundary does not necessarily have the same alignment as Region.
// It may sit across a Region boundary. Which means that we may have the
// following two cases,
// Group boundary is always GroupSize-aligned from CompactPtr base. The
// layout of memory groups is like,
//
// 1. Group boundary sits before RegionBeg.
// (CompactPtrBase)
// #1 CompactPtrGroupBase #2 CompactPtrGroupBase ...
// | | |
// v v v
// +-----------------------+-----------------------+
// \ / \ /
// --- GroupSize --- --- GroupSize ---
//
// (BatchGroupBase)
// batchGroupBase RegionBeg BatchGroupEnd
// | | |
// v v v
// +------------+----------------+
// \ /
// ------ GroupSize ------
//
// 2. Group boundary sits after RegionBeg.
//
// (BatchGroupBase)
// RegionBeg batchGroupBase BatchGroupEnd
// | | |
// v v v
// +-----------+-----------------------------+
// \ /
// ------ GroupSize ------
//
// Note that in the first case, the group range before RegionBeg is never
// used. Therefore, while calculating the used group size, we should
// exclude that part to get the correct size.
// After decompacting the CompactPtrGroupBase, we expect the alignment
// property is held as well.
const uptr BatchGroupBase =
Max(batchGroupBase(CompactPtrBase, BG->GroupId), Region->RegionBeg);
decompactGroupBase(CompactPtrBase, BG->CompactPtrGroupBase);
DCHECK_LE(Region->RegionBeg, BatchGroupBase);
DCHECK_GE(AllocatedUserEnd, BatchGroupBase);
const uptr BatchGroupEnd =
batchGroupBase(CompactPtrBase, BG->GroupId) + GroupSize;
DCHECK_EQ((Region->RegionBeg - BatchGroupBase) % GroupSize, 0U);
const uptr BatchGroupEnd = BatchGroupBase + GroupSize;
const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd
? BatchGroupEnd - BatchGroupBase
? GroupSize
: AllocatedUserEnd - BatchGroupBase;
if (AllocatedGroupSize == 0) {
Prev = BG;
@ -980,11 +971,11 @@ private:
if (GroupToRelease.empty())
return 0;
const uptr ReleaseBase =
Max(batchGroupBase(CompactPtrBase, GroupToRelease.front()->GroupId),
Region->RegionBeg);
const uptr ReleaseBase = decompactGroupBase(
CompactPtrBase, GroupToRelease.front()->CompactPtrGroupBase);
const uptr LastGroupEnd =
Min(batchGroupBase(CompactPtrBase, GroupToRelease.back()->GroupId) +
Min(decompactGroupBase(CompactPtrBase,
GroupToRelease.back()->CompactPtrGroupBase) +
GroupSize,
AllocatedUserEnd);
// The last block may straddle the group boundary. Rounding up to BlockSize
@ -1003,15 +994,11 @@ private:
for (BatchGroup &BG : GroupToRelease) {
BG.PushedBlocksAtLastCheckpoint = BG.PushedBlocks;
// TODO(chiahungduan): Replace GroupId with BatchGroupBase to simplify the
// calculation here and above (where we determine the set of groups to
// release).
const uptr BatchGroupBase =
Max(batchGroupBase(CompactPtrBase, BG.GroupId), Region->RegionBeg);
const uptr BatchGroupEnd =
batchGroupBase(CompactPtrBase, BG.GroupId) + GroupSize;
decompactGroupBase(CompactPtrBase, BG.CompactPtrGroupBase);
const uptr BatchGroupEnd = BatchGroupBase + GroupSize;
const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd
? BatchGroupEnd - BatchGroupBase
? GroupSize
: AllocatedUserEnd - BatchGroupBase;
const uptr BatchGroupUsedEnd = BatchGroupBase + AllocatedGroupSize;
const bool BlockAlignedWithUsedEnd =
@ -1027,7 +1014,7 @@ private:
if (NumBlocks == MaxContainedBlocks) {
for (const auto &It : BG.Batches)
for (u16 I = 0; I < It.getCount(); ++I)
DCHECK_EQ(compactPtrGroup(It.get(I)), BG.GroupId);
DCHECK_EQ(compactPtrGroup(It.get(I)), BG.CompactPtrGroupBase);
Context.markRangeAsAllCounted(BatchGroupBase, BatchGroupUsedEnd,
Region->RegionBeg);
@ -1062,16 +1049,18 @@ private:
break;
}
DCHECK_NE(BG->GroupId, GroupToRelease.front()->GroupId);
DCHECK_NE(BG->CompactPtrGroupBase,
GroupToRelease.front()->CompactPtrGroupBase);
if (BG->GroupId < GroupToRelease.front()->GroupId) {
if (BG->CompactPtrGroupBase <
GroupToRelease.front()->CompactPtrGroupBase) {
Prev = BG;
BG = BG->Next;
continue;
}
// At here, the `BG` is the first BatchGroup with GroupId larger than the
// first element in `GroupToRelease`. We need to insert
// At here, the `BG` is the first BatchGroup with CompactPtrGroupBase
// larger than the first element in `GroupToRelease`. We need to insert
// `GroupToRelease::front()` (which is `Cur` below) before `BG`.
//
// 1. If `Prev` is nullptr, we simply push `Cur` to the front of
@ -1097,7 +1086,7 @@ private:
BatchGroup *Prev = Region->FreeList.front();
for (BatchGroup *Cur = Prev->Next; Cur != nullptr;
Prev = Cur, Cur = Cur->Next) {
CHECK_LT(Prev->GroupId, Cur->GroupId);
CHECK_LT(Prev->CompactPtrGroupBase, Cur->CompactPtrGroupBase);
}
}