2022-02-17 16:01:31 -08:00
|
|
|
#include "llvm/ProfileData/MemProf.h"
|
2022-03-22 14:40:02 -07:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
2022-02-23 22:18:20 +01:00
|
|
|
#include "llvm/ProfileData/InstrProf.h"
|
2023-08-29 18:58:39 +00:00
|
|
|
#include "llvm/ProfileData/SampleProf.h"
|
2022-02-17 16:01:31 -08:00
|
|
|
#include "llvm/Support/Endian.h"
|
|
|
|
#include "llvm/Support/EndianStream.h"
|
|
|
|
|
|
|
|
namespace llvm {
|
|
|
|
namespace memprof {
|
|
|
|
|
2022-03-21 19:39:24 -07:00
|
|
|
void IndexedMemProfRecord::serialize(const MemProfSchema &Schema,
|
|
|
|
raw_ostream &OS) {
|
2022-02-17 16:01:31 -08:00
|
|
|
using namespace support;
|
|
|
|
|
|
|
|
endian::Writer LE(OS, little);
|
|
|
|
|
2022-03-22 14:40:02 -07:00
|
|
|
LE.write<uint64_t>(AllocSites.size());
|
2022-03-21 19:39:24 -07:00
|
|
|
for (const IndexedAllocationInfo &N : AllocSites) {
|
2022-03-22 14:40:02 -07:00
|
|
|
LE.write<uint64_t>(N.CallStack.size());
|
2022-03-21 19:39:24 -07:00
|
|
|
for (const FrameId &Id : N.CallStack)
|
|
|
|
LE.write<FrameId>(Id);
|
2022-03-22 14:40:02 -07:00
|
|
|
N.Info.serialize(Schema, OS);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Related contexts.
|
|
|
|
LE.write<uint64_t>(CallSites.size());
|
|
|
|
for (const auto &Frames : CallSites) {
|
|
|
|
LE.write<uint64_t>(Frames.size());
|
2022-03-21 19:39:24 -07:00
|
|
|
for (const FrameId &Id : Frames)
|
|
|
|
LE.write<FrameId>(Id);
|
2022-02-17 16:01:31 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-21 19:39:24 -07:00
|
|
|
IndexedMemProfRecord
|
|
|
|
IndexedMemProfRecord::deserialize(const MemProfSchema &Schema,
|
|
|
|
const unsigned char *Ptr) {
|
2022-02-17 16:01:31 -08:00
|
|
|
using namespace support;
|
|
|
|
|
2022-03-21 19:39:24 -07:00
|
|
|
IndexedMemProfRecord Record;
|
2022-03-22 14:40:02 -07:00
|
|
|
|
|
|
|
// Read the meminfo nodes.
|
|
|
|
const uint64_t NumNodes = endian::readNext<uint64_t, little, unaligned>(Ptr);
|
|
|
|
for (uint64_t I = 0; I < NumNodes; I++) {
|
2022-03-21 19:39:24 -07:00
|
|
|
IndexedAllocationInfo Node;
|
2022-02-17 16:01:31 -08:00
|
|
|
const uint64_t NumFrames =
|
|
|
|
endian::readNext<uint64_t, little, unaligned>(Ptr);
|
|
|
|
for (uint64_t J = 0; J < NumFrames; J++) {
|
2022-03-21 19:39:24 -07:00
|
|
|
const FrameId Id = endian::readNext<FrameId, little, unaligned>(Ptr);
|
|
|
|
Node.CallStack.push_back(Id);
|
2022-02-17 16:01:31 -08:00
|
|
|
}
|
2022-03-22 14:40:02 -07:00
|
|
|
Node.Info.deserialize(Schema, Ptr);
|
2022-02-17 16:01:31 -08:00
|
|
|
Ptr += PortableMemInfoBlock::serializedSize();
|
2022-03-22 14:40:02 -07:00
|
|
|
Record.AllocSites.push_back(Node);
|
2022-02-17 16:01:31 -08:00
|
|
|
}
|
2022-03-22 14:40:02 -07:00
|
|
|
|
|
|
|
// Read the callsite information.
|
|
|
|
const uint64_t NumCtxs = endian::readNext<uint64_t, little, unaligned>(Ptr);
|
|
|
|
for (uint64_t J = 0; J < NumCtxs; J++) {
|
|
|
|
const uint64_t NumFrames =
|
|
|
|
endian::readNext<uint64_t, little, unaligned>(Ptr);
|
2022-03-21 19:39:24 -07:00
|
|
|
llvm::SmallVector<FrameId> Frames;
|
|
|
|
Frames.reserve(NumFrames);
|
2022-03-22 14:40:02 -07:00
|
|
|
for (uint64_t K = 0; K < NumFrames; K++) {
|
2022-03-21 19:39:24 -07:00
|
|
|
const FrameId Id = endian::readNext<FrameId, little, unaligned>(Ptr);
|
|
|
|
Frames.push_back(Id);
|
2022-03-22 14:40:02 -07:00
|
|
|
}
|
|
|
|
Record.CallSites.push_back(Frames);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Record;
|
|
|
|
}
|
|
|
|
|
2022-03-21 19:39:24 -07:00
|
|
|
GlobalValue::GUID IndexedMemProfRecord::getGUID(const StringRef FunctionName) {
|
2023-08-29 18:58:39 +00:00
|
|
|
// Canonicalize the function name to drop suffixes such as ".llvm.", ".uniq."
|
|
|
|
// etc. We can then match functions in the profile use phase prior to the
|
|
|
|
// addition of these suffixes. Note that this applies to both instrumented and
|
|
|
|
// sampled function names.
|
|
|
|
StringRef CanonicalName =
|
|
|
|
sampleprof::FunctionSamples::getCanonicalFnName(FunctionName);
|
2022-03-22 14:40:02 -07:00
|
|
|
|
|
|
|
// We use the function guid which we expect to be a uint64_t. At
|
2023-08-29 18:58:39 +00:00
|
|
|
// this time, it is the lower 64 bits of the md5 of the canonical
|
|
|
|
// function name.
|
|
|
|
return Function::getGUID(CanonicalName);
|
2022-02-17 16:01:31 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
Expected<MemProfSchema> readMemProfSchema(const unsigned char *&Buffer) {
|
|
|
|
using namespace support;
|
|
|
|
|
|
|
|
const unsigned char *Ptr = Buffer;
|
|
|
|
const uint64_t NumSchemaIds =
|
|
|
|
endian::readNext<uint64_t, little, unaligned>(Ptr);
|
|
|
|
if (NumSchemaIds > static_cast<uint64_t>(Meta::Size)) {
|
|
|
|
return make_error<InstrProfError>(instrprof_error::malformed,
|
|
|
|
"memprof schema invalid");
|
|
|
|
}
|
|
|
|
|
|
|
|
MemProfSchema Result;
|
|
|
|
for (size_t I = 0; I < NumSchemaIds; I++) {
|
|
|
|
const uint64_t Tag = endian::readNext<uint64_t, little, unaligned>(Ptr);
|
|
|
|
if (Tag >= static_cast<uint64_t>(Meta::Size)) {
|
|
|
|
return make_error<InstrProfError>(instrprof_error::malformed,
|
|
|
|
"memprof schema invalid");
|
|
|
|
}
|
|
|
|
Result.push_back(static_cast<Meta>(Tag));
|
|
|
|
}
|
|
|
|
// Advace the buffer to one past the schema if we succeeded.
|
|
|
|
Buffer = Ptr;
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace memprof
|
|
|
|
} // namespace llvm
|