[flang][cuda] Add asyncId to allocate entry point (#134947)

This commit is contained in:
Valentin Clement (バレンタイン クレメン) 2025-04-09 10:52:02 -07:00 committed by GitHub
parent fa273e1158
commit f4d87c42a6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
38 changed files with 110 additions and 91 deletions

View File

@ -29,6 +29,9 @@
#include <cstdio>
#include <cstring>
/// Value used for asyncId when no specific stream is specified.
static constexpr std::int64_t kNoAsyncId = -1;
namespace Fortran::runtime {
class Terminator;
@ -369,7 +372,7 @@ public:
// before calling. It (re)computes the byte strides after
// allocation. Does not allocate automatic components or
// perform default component initialization.
RT_API_ATTRS int Allocate();
RT_API_ATTRS int Allocate(std::int64_t asyncId);
RT_API_ATTRS void SetByteStrides();
// Deallocates storage; does not call FINAL subroutines or

View File

@ -347,7 +347,7 @@ inline RT_API_ATTRS void DoMaxMinNorm2(Descriptor &result, const Descriptor &x,
// as the element size of the source.
result.Establish(x.type(), x.ElementBytes(), nullptr, 0, nullptr,
CFI_attribute_allocatable);
if (int stat{result.Allocate()}) {
if (int stat{result.Allocate(kNoAsyncId)}) {
terminator.Crash(
"%s: could not allocate memory for result; STAT=%d", intrinsic, stat);
}

View File

@ -53,7 +53,7 @@ int RTDEF(CUFAllocatableAllocate)(Descriptor &desc, int64_t stream,
}
// Perform the standard allocation.
int stat{RTNAME(AllocatableAllocate)(
desc, hasStat, errMsg, sourceFile, sourceLine)};
desc, stream, hasStat, errMsg, sourceFile, sourceLine)};
if (pinned) {
// Set pinned according to stat. More infrastructre is needed to set it
// closer to the actual allocation call.

View File

@ -133,15 +133,17 @@ void RTDEF(AllocatableApplyMold)(
}
}
int RTDEF(AllocatableAllocate)(Descriptor &descriptor, bool hasStat,
const Descriptor *errMsg, const char *sourceFile, int sourceLine) {
int RTDEF(AllocatableAllocate)(Descriptor &descriptor, std::int64_t asyncId,
bool hasStat, const Descriptor *errMsg, const char *sourceFile,
int sourceLine) {
Terminator terminator{sourceFile, sourceLine};
if (!descriptor.IsAllocatable()) {
return ReturnError(terminator, StatInvalidDescriptor, errMsg, hasStat);
} else if (descriptor.IsAllocated()) {
return ReturnError(terminator, StatBaseNotNull, errMsg, hasStat);
} else {
int stat{ReturnError(terminator, descriptor.Allocate(), errMsg, hasStat)};
int stat{
ReturnError(terminator, descriptor.Allocate(asyncId), errMsg, hasStat)};
if (stat == StatOk) {
if (const DescriptorAddendum * addendum{descriptor.Addendum()}) {
if (const auto *derived{addendum->derivedType()}) {
@ -160,7 +162,7 @@ int RTDEF(AllocatableAllocateSource)(Descriptor &alloc,
const Descriptor &source, bool hasStat, const Descriptor *errMsg,
const char *sourceFile, int sourceLine) {
int stat{RTNAME(AllocatableAllocate)(
alloc, hasStat, errMsg, sourceFile, sourceLine)};
alloc, /*asyncId=*/-1, hasStat, errMsg, sourceFile, sourceLine)};
if (stat == StatOk) {
Terminator terminator{sourceFile, sourceLine};
DoFromSourceAssign(alloc, source, terminator);

View File

@ -50,8 +50,8 @@ static RT_API_ATTRS void AllocateOrReallocateVectorIfNeeded(
initialAllocationSize(fromElements, to.ElementBytes())};
to.GetDimension(0).SetBounds(1, allocationSize);
RTNAME(AllocatableAllocate)
(to, /*hasStat=*/false, /*errMsg=*/nullptr, vector.sourceFile,
vector.sourceLine);
(to, /*asyncId=*/-1, /*hasStat=*/false, /*errMsg=*/nullptr,
vector.sourceFile, vector.sourceLine);
to.GetDimension(0).SetBounds(1, fromElements);
vector.actualAllocationSize = allocationSize;
} else {
@ -59,8 +59,8 @@ static RT_API_ATTRS void AllocateOrReallocateVectorIfNeeded(
// first value: there should be no reallocation.
RUNTIME_CHECK(terminator, previousToElements >= fromElements);
RTNAME(AllocatableAllocate)
(to, /*hasStat=*/false, /*errMsg=*/nullptr, vector.sourceFile,
vector.sourceLine);
(to, /*asyncId=*/-1, /*hasStat=*/false, /*errMsg=*/nullptr,
vector.sourceFile, vector.sourceLine);
vector.actualAllocationSize = previousToElements;
}
} else {

View File

@ -99,7 +99,7 @@ static RT_API_ATTRS int AllocateAssignmentLHS(
toDim.SetByteStride(stride);
stride *= toDim.Extent();
}
int result{ReturnError(terminator, to.Allocate())};
int result{ReturnError(terminator, to.Allocate(kNoAsyncId))};
if (result == StatOk && derived && !derived->noInitializationNeeded()) {
result = ReturnError(terminator, Initialize(to, *derived, terminator));
}
@ -277,7 +277,7 @@ RT_API_ATTRS void Assign(Descriptor &to, const Descriptor &from,
// entity, otherwise, the Deallocate() below will not
// free the descriptor memory.
newFrom.raw().attribute = CFI_attribute_allocatable;
auto stat{ReturnError(terminator, newFrom.Allocate())};
auto stat{ReturnError(terminator, newFrom.Allocate(kNoAsyncId))};
if (stat == StatOk) {
if (HasDynamicComponent(from)) {
// If 'from' has allocatable/automatic component, we cannot

View File

@ -117,7 +117,7 @@ static RT_API_ATTRS void Compare(Descriptor &result, const Descriptor &x,
for (int j{0}; j < rank; ++j) {
result.GetDimension(j).SetBounds(1, ub[j]);
}
if (result.Allocate() != CFI_SUCCESS) {
if (result.Allocate(kNoAsyncId) != CFI_SUCCESS) {
terminator.Crash("Compare: could not allocate storage for result");
}
std::size_t xChars{x.ElementBytes() >> shift<CHAR>};
@ -172,7 +172,7 @@ static RT_API_ATTRS void AdjustLRHelper(Descriptor &result,
for (int j{0}; j < rank; ++j) {
result.GetDimension(j).SetBounds(1, ub[j]);
}
if (result.Allocate() != CFI_SUCCESS) {
if (result.Allocate(kNoAsyncId) != CFI_SUCCESS) {
terminator.Crash("ADJUSTL/R: could not allocate storage for result");
}
for (SubscriptValue resultAt{0}; elements-- > 0;
@ -226,7 +226,7 @@ static RT_API_ATTRS void LenTrim(Descriptor &result, const Descriptor &string,
for (int j{0}; j < rank; ++j) {
result.GetDimension(j).SetBounds(1, ub[j]);
}
if (result.Allocate() != CFI_SUCCESS) {
if (result.Allocate(kNoAsyncId) != CFI_SUCCESS) {
terminator.Crash("LEN_TRIM: could not allocate storage for result");
}
std::size_t stringElementChars{string.ElementBytes() >> shift<CHAR>};
@ -408,7 +408,7 @@ static RT_API_ATTRS void GeneralCharFunc(Descriptor &result,
for (int j{0}; j < rank; ++j) {
result.GetDimension(j).SetBounds(1, ub[j]);
}
if (result.Allocate() != CFI_SUCCESS) {
if (result.Allocate(kNoAsyncId) != CFI_SUCCESS) {
terminator.Crash("SCAN/VERIFY: could not allocate storage for result");
}
std::size_t stringElementChars{string.ElementBytes() >> shift<CHAR>};
@ -511,7 +511,7 @@ static RT_API_ATTRS void MaxMinHelper(Descriptor &accumulator,
for (int j{0}; j < rank; ++j) {
accumulator.GetDimension(j).SetBounds(1, ub[j]);
}
RUNTIME_CHECK(terminator, accumulator.Allocate() == CFI_SUCCESS);
RUNTIME_CHECK(terminator, accumulator.Allocate(kNoAsyncId) == CFI_SUCCESS);
}
for (CHAR *result{accumulator.OffsetElement<CHAR>()}; elements-- > 0;
accumData += accumChars, result += chars, x.IncrementSubscripts(xAt)) {
@ -587,7 +587,7 @@ void RTDEF(CharacterConcatenate)(Descriptor &accumulator,
for (int j{0}; j < rank; ++j) {
accumulator.GetDimension(j).SetBounds(1, ub[j]);
}
if (accumulator.Allocate() != CFI_SUCCESS) {
if (accumulator.Allocate(kNoAsyncId) != CFI_SUCCESS) {
terminator.Crash(
"CharacterConcatenate: could not allocate storage for result");
}
@ -610,7 +610,7 @@ void RTDEF(CharacterConcatenateScalar1)(
accumulator.set_base_addr(nullptr);
std::size_t oldLen{accumulator.ElementBytes()};
accumulator.raw().elem_len += chars;
RUNTIME_CHECK(terminator, accumulator.Allocate() == CFI_SUCCESS);
RUNTIME_CHECK(terminator, accumulator.Allocate(kNoAsyncId) == CFI_SUCCESS);
std::memcpy(accumulator.OffsetElement<char>(oldLen), from, chars);
FreeMemory(old);
}
@ -812,7 +812,7 @@ void RTDEF(Repeat)(Descriptor &result, const Descriptor &string,
std::size_t origBytes{string.ElementBytes()};
result.Establish(string.type(), origBytes * ncopies, nullptr, 0, nullptr,
CFI_attribute_allocatable);
if (result.Allocate() != CFI_SUCCESS) {
if (result.Allocate(kNoAsyncId) != CFI_SUCCESS) {
terminator.Crash("REPEAT could not allocate storage for result");
}
const char *from{string.OffsetElement()};
@ -846,7 +846,7 @@ void RTDEF(Trim)(Descriptor &result, const Descriptor &string,
}
result.Establish(string.type(), resultBytes, nullptr, 0, nullptr,
CFI_attribute_allocatable);
RUNTIME_CHECK(terminator, result.Allocate() == CFI_SUCCESS);
RUNTIME_CHECK(terminator, result.Allocate(kNoAsyncId) == CFI_SUCCESS);
std::memcpy(result.OffsetElement(), string.OffsetElement(), resultBytes);
}

View File

@ -171,7 +171,8 @@ RT_API_ATTRS void CopyElement(const Descriptor &to, const SubscriptValue toAt[],
*reinterpret_cast<Descriptor *>(toPtr + component->offset())};
if (toDesc.raw().base_addr != nullptr) {
toDesc.set_base_addr(nullptr);
RUNTIME_CHECK(terminator, toDesc.Allocate() == CFI_SUCCESS);
RUNTIME_CHECK(
terminator, toDesc.Allocate(/*asyncId=*/-1) == CFI_SUCCESS);
const Descriptor &fromDesc{*reinterpret_cast<const Descriptor *>(
fromPtr + component->offset())};
copyStack.emplace(toDesc, fromDesc);

View File

@ -51,7 +51,8 @@ RT_API_ATTRS int Initialize(const Descriptor &instance,
comp.EstablishDescriptor(allocDesc, instance, terminator);
allocDesc.raw().attribute = CFI_attribute_allocatable;
if (comp.genre() == typeInfo::Component::Genre::Automatic) {
stat = ReturnError(terminator, allocDesc.Allocate(), errMsg, hasStat);
stat = ReturnError(
terminator, allocDesc.Allocate(kNoAsyncId), errMsg, hasStat);
if (stat == StatOk) {
if (const DescriptorAddendum * addendum{allocDesc.Addendum()}) {
if (const auto *derived{addendum->derivedType()}) {
@ -151,7 +152,8 @@ RT_API_ATTRS int InitializeClone(const Descriptor &clone,
*clone.ElementComponent<Descriptor>(at, comp.offset())};
if (origDesc.IsAllocated()) {
cloneDesc.ApplyMold(origDesc, origDesc.rank());
stat = ReturnError(terminator, cloneDesc.Allocate(), errMsg, hasStat);
stat = ReturnError(
terminator, cloneDesc.Allocate(kNoAsyncId), errMsg, hasStat);
if (stat == StatOk) {
if (const DescriptorAddendum * addendum{cloneDesc.Addendum()}) {
if (const typeInfo::DerivedType *
@ -258,7 +260,7 @@ static RT_API_ATTRS void CallFinalSubroutine(const Descriptor &descriptor,
copy.raw().attribute = CFI_attribute_allocatable;
Terminator stubTerminator{"CallFinalProcedure() in Fortran runtime", 0};
RUNTIME_CHECK(terminator ? *terminator : stubTerminator,
copy.Allocate() == CFI_SUCCESS);
copy.Allocate(kNoAsyncId) == CFI_SUCCESS);
ShallowCopyDiscontiguousToContiguous(copy, descriptor);
argDescriptor = &copy;
}

View File

@ -158,7 +158,7 @@ RT_API_ATTRS static inline int MapAllocIdx(const Descriptor &desc) {
#endif
}
RT_API_ATTRS int Descriptor::Allocate() {
RT_API_ATTRS int Descriptor::Allocate(std::int64_t asyncId) {
std::size_t elementBytes{ElementBytes()};
if (static_cast<std::int64_t>(elementBytes) < 0) {
// F'2023 7.4.4.2 p5: "If the character length parameter value evaluates
@ -170,7 +170,7 @@ RT_API_ATTRS int Descriptor::Allocate() {
// Zero size allocation is possible in Fortran and the resulting
// descriptor must be allocated/associated. Since std::malloc(0)
// result is implementation defined, always allocate at least one byte.
void *p{alloc(byteSize ? byteSize : 1, /*asyncId=*/-1)};
void *p{alloc(byteSize ? byteSize : 1, asyncId)};
if (!p) {
return CFI_ERROR_MEM_ALLOCATION;
}

View File

@ -152,7 +152,7 @@ inline RT_API_ATTRS void CharacterMaxOrMinLoc(const char *intrinsic,
CFI_attribute_allocatable);
result.GetDimension(0).SetBounds(1, extent[0]);
Terminator terminator{source, line};
if (int stat{result.Allocate()}) {
if (int stat{result.Allocate(kNoAsyncId)}) {
terminator.Crash(
"%s: could not allocate memory for result; STAT=%d", intrinsic, stat);
}
@ -181,7 +181,7 @@ inline RT_API_ATTRS void TotalNumericMaxOrMinLoc(const char *intrinsic,
CFI_attribute_allocatable);
result.GetDimension(0).SetBounds(1, extent[0]);
Terminator terminator{source, line};
if (int stat{result.Allocate()}) {
if (int stat{result.Allocate(kNoAsyncId)}) {
terminator.Crash(
"%s: could not allocate memory for result; STAT=%d", intrinsic, stat);
}

View File

@ -220,7 +220,7 @@ void RTDEF(Findloc)(Descriptor &result, const Descriptor &x,
CFI_attribute_allocatable);
result.GetDimension(0).SetBounds(1, extent[0]);
Terminator terminator{source, line};
if (int stat{result.Allocate()}) {
if (int stat{result.Allocate(kNoAsyncId)}) {
terminator.Crash(
"FINDLOC: could not allocate memory for result; STAT=%d", stat);
}

View File

@ -183,7 +183,7 @@ inline static RT_API_ATTRS void DoMatmulTranspose(
for (int j{0}; j < resRank; ++j) {
result.GetDimension(j).SetBounds(1, extent[j]);
}
if (int stat{result.Allocate()}) {
if (int stat{result.Allocate(kNoAsyncId)}) {
terminator.Crash(
"MATMUL-TRANSPOSE: could not allocate memory for result; STAT=%d",
stat);

View File

@ -255,7 +255,7 @@ static inline RT_API_ATTRS void DoMatmul(
for (int j{0}; j < resRank; ++j) {
result.GetDimension(j).SetBounds(1, extent[j]);
}
if (int stat{result.Allocate()}) {
if (int stat{result.Allocate(kNoAsyncId)}) {
terminator.Crash(
"MATMUL: could not allocate memory for result; STAT=%d", stat);
}

View File

@ -30,7 +30,7 @@ static RT_API_ATTRS void TransferImpl(Descriptor &result,
if (const DescriptorAddendum * addendum{mold.Addendum()}) {
*result.Addendum() = *addendum;
}
if (int stat{result.Allocate()}) {
if (int stat{result.Allocate(kNoAsyncId)}) {
Terminator{sourceFile, line}.Crash(
"TRANSFER: could not allocate memory for result; STAT=%d", stat);
}

View File

@ -148,7 +148,7 @@ void DescriptorStorage<COPY_VALUES>::push(const Descriptor &source) {
if constexpr (COPY_VALUES) {
// copy the data pointed to by the box
box.set_base_addr(nullptr);
box.Allocate();
box.Allocate(kNoAsyncId);
RTNAME(AssignTemporary)
(box, source, terminator_.sourceFileName(), terminator_.sourceLine());
}

View File

@ -261,7 +261,7 @@ RT_API_ATTRS void CreatePartialReductionResult(Descriptor &result,
for (int j{0}; j + 1 < xRank; ++j) {
result.GetDimension(j).SetBounds(1, resultExtent[j]);
}
if (int stat{result.Allocate()}) {
if (int stat{result.Allocate(kNoAsyncId)}) {
terminator.Crash(
"%s: could not allocate memory for result; STAT=%d", intrinsic, stat);
}

View File

@ -132,7 +132,7 @@ static inline RT_API_ATTRS std::size_t AllocateResult(Descriptor &result,
for (int j{0}; j < rank; ++j) {
result.GetDimension(j).SetBounds(1, extent[j]);
}
if (int stat{result.Allocate()}) {
if (int stat{result.Allocate(kNoAsyncId)}) {
terminator.Crash(
"%s: Could not allocate memory for result (stat=%d)", function, stat);
}
@ -157,7 +157,7 @@ static inline RT_API_ATTRS std::size_t AllocateBesselResult(Descriptor &result,
for (int j{0}; j < rank; ++j) {
result.GetDimension(j).SetBounds(1, extent[j]);
}
if (int stat{result.Allocate()}) {
if (int stat{result.Allocate(kNoAsyncId)}) {
terminator.Crash(
"%s: Could not allocate memory for result (stat=%d)", function, stat);
}

View File

@ -26,7 +26,7 @@ int main() {
for (int j{0}; j < 3; ++j) {
source->GetDimension(j).SetBounds(1, sourceExtent[j]);
}
TEST(source->Allocate() == CFI_SUCCESS);
TEST(source->Allocate(kNoAsyncId) == CFI_SUCCESS);
TEST(source->IsAllocated());
MATCH(2, source->GetDimension(0).Extent());
MATCH(3, source->GetDimension(1).Extent());

View File

@ -26,7 +26,7 @@ TEST(AllocatableTest, MoveAlloc) {
auto b{createAllocatable(TypeCategory::Integer, 4)};
// ALLOCATE(a(20))
a->GetDimension(0).SetBounds(1, 20);
a->Allocate();
a->Allocate(kNoAsyncId);
EXPECT_TRUE(a->IsAllocated());
EXPECT_FALSE(b->IsAllocated());
@ -46,7 +46,7 @@ TEST(AllocatableTest, MoveAlloc) {
// move_alloc with errMsg
auto errMsg{Descriptor::Create(
sizeof(char), 64, nullptr, 0, nullptr, CFI_attribute_allocatable)};
errMsg->Allocate();
errMsg->Allocate(kNoAsyncId);
RTNAME(MoveAlloc)(*b, *a, nullptr, false, errMsg.get(), __FILE__, __LINE__);
EXPECT_FALSE(a->IsAllocated());
EXPECT_TRUE(b->IsAllocated());

View File

@ -42,7 +42,7 @@ TEST(AllocatableCUFTest, SimpleDeviceAllocatable) {
CUDA_REPORT_IF_ERROR(cudaMalloc(&device_desc, a->SizeInBytes()));
RTNAME(AllocatableAllocate)
(*a, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__, __LINE__);
(*a, kNoAsyncId, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__, __LINE__);
EXPECT_TRUE(a->IsAllocated());
RTNAME(CUFDescriptorSync)(device_desc, a.get(), __FILE__, __LINE__);
cudaDeviceSynchronize();

View File

@ -35,7 +35,8 @@ TEST(AllocatableCUFTest, SimpleDeviceAllocate) {
EXPECT_FALSE(a->HasAddendum());
RTNAME(AllocatableSetBounds)(*a, 0, 1, 10);
RTNAME(AllocatableAllocate)
(*a, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__, __LINE__);
(*a, /*asyncId=*/-1, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__,
__LINE__);
EXPECT_TRUE(a->IsAllocated());
RTNAME(AllocatableDeallocate)
(*a, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__, __LINE__);
@ -53,7 +54,8 @@ TEST(AllocatableCUFTest, SimplePinnedAllocate) {
EXPECT_FALSE(a->HasAddendum());
RTNAME(AllocatableSetBounds)(*a, 0, 1, 10);
RTNAME(AllocatableAllocate)
(*a, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__, __LINE__);
(*a, /*asyncId=*/-1, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__,
__LINE__);
EXPECT_TRUE(a->IsAllocated());
RTNAME(AllocatableDeallocate)
(*a, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__, __LINE__);

View File

@ -50,7 +50,8 @@ TEST(MemoryCUFTest, CUFDataTransferDescDesc) {
EXPECT_EQ((int)kDeviceAllocatorPos, dev->GetAllocIdx());
RTNAME(AllocatableSetBounds)(*dev, 0, 1, 10);
RTNAME(AllocatableAllocate)
(*dev, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__, __LINE__);
(*dev, /*asyncId=*/-1, /*hasStat=*/false, /*errMsg=*/nullptr, __FILE__,
__LINE__);
EXPECT_TRUE(dev->IsAllocated());
// Create temp array to transfer to device.

View File

@ -35,7 +35,7 @@ OwningPtr<Descriptor> CreateDescriptor(const std::vector<SubscriptValue> &shape,
for (int j{0}; j < rank; ++j) {
descriptor->GetDimension(j).SetBounds(2, shape[j] + 1);
}
if (descriptor->Allocate() != 0) {
if (descriptor->Allocate(kNoAsyncId) != 0) {
return nullptr;
}

View File

@ -26,7 +26,7 @@ template <std::size_t n = 64>
static OwningPtr<Descriptor> CreateEmptyCharDescriptor() {
OwningPtr<Descriptor> descriptor{Descriptor::Create(
sizeof(char), n, nullptr, 0, nullptr, CFI_attribute_allocatable)};
if (descriptor->Allocate() != 0) {
if (descriptor->Allocate(kNoAsyncId) != 0) {
return nullptr;
}
return descriptor;
@ -36,7 +36,7 @@ static OwningPtr<Descriptor> CharDescriptor(const char *value) {
std::size_t n{std::strlen(value)};
OwningPtr<Descriptor> descriptor{Descriptor::Create(
sizeof(char), n, nullptr, 0, nullptr, CFI_attribute_allocatable)};
if (descriptor->Allocate() != 0) {
if (descriptor->Allocate(kNoAsyncId) != 0) {
return nullptr;
}
std::memcpy(descriptor->OffsetElement(), value, n);
@ -47,7 +47,7 @@ template <int kind = sizeof(std::int64_t)>
static OwningPtr<Descriptor> EmptyIntDescriptor() {
OwningPtr<Descriptor> descriptor{Descriptor::Create(TypeCategory::Integer,
kind, nullptr, 0, nullptr, CFI_attribute_allocatable)};
if (descriptor->Allocate() != 0) {
if (descriptor->Allocate(kNoAsyncId) != 0) {
return nullptr;
}
return descriptor;
@ -57,7 +57,7 @@ template <int kind = sizeof(std::int64_t)>
static OwningPtr<Descriptor> IntDescriptor(const int &value) {
OwningPtr<Descriptor> descriptor{Descriptor::Create(TypeCategory::Integer,
kind, nullptr, 0, nullptr, CFI_attribute_allocatable)};
if (descriptor->Allocate() != 0) {
if (descriptor->Allocate(kNoAsyncId) != 0) {
return nullptr;
}
std::memcpy(descriptor->OffsetElement<int>(), &value, sizeof(int));

View File

@ -59,7 +59,7 @@ TEST(TemporaryStack, ValueStackBasic) {
Descriptor &outputDesc2{testDescriptorStorage[2].descriptor()};
inputDesc.Establish(code, elementBytes, descriptorPtr, rank, extent);
inputDesc.Allocate();
inputDesc.Allocate(kNoAsyncId);
ASSERT_EQ(inputDesc.IsAllocated(), true);
uint32_t *inputData = static_cast<uint32_t *>(inputDesc.raw().base_addr);
for (std::size_t i = 0; i < inputDesc.Elements(); ++i) {
@ -123,7 +123,7 @@ TEST(TemporaryStack, ValueStackMultiSize) {
boxDims.extent = extent[dim];
boxDims.sm = elementBytes;
}
desc->Allocate();
desc->Allocate(kNoAsyncId);
// fill the array with some data to test
for (uint32_t i = 0; i < desc->Elements(); ++i) {

View File

@ -42,7 +42,7 @@ static OwningPtr<Descriptor> MakeArray(const std::vector<int> &shape,
for (int j{0}; j < rank; ++j) {
result->GetDimension(j).SetBounds(1, shape[j]);
}
int stat{result->Allocate()};
int stat{result->Allocate(kNoAsyncId)};
EXPECT_EQ(stat, 0) << stat;
EXPECT_LE(data.size(), result->Elements());
char *p{result->OffsetElement<char>()};

View File

@ -94,9 +94,9 @@ int RTDECL(AllocatableCheckLengthParameter)(Descriptor &,
// Successfully allocated memory is initialized if the allocatable has a
// derived type, and is always initialized by AllocatableAllocateSource().
// Performs all necessary coarray synchronization and validation actions.
int RTDECL(AllocatableAllocate)(Descriptor &, bool hasStat = false,
const Descriptor *errMsg = nullptr, const char *sourceFile = nullptr,
int sourceLine = 0);
int RTDECL(AllocatableAllocate)(Descriptor &, std::int64_t asyncId = -1,
bool hasStat = false, const Descriptor *errMsg = nullptr,
const char *sourceFile = nullptr, int sourceLine = 0);
int RTDECL(AllocatableAllocateSource)(Descriptor &, const Descriptor &source,
bool hasStat = false, const Descriptor *errMsg = nullptr,
const char *sourceFile = nullptr, int sourceLine = 0);

View File

@ -186,9 +186,14 @@ static mlir::Value genRuntimeAllocate(fir::FirOpBuilder &builder,
? fir::runtime::getRuntimeFunc<mkRTKey(PointerAllocate)>(loc, builder)
: fir::runtime::getRuntimeFunc<mkRTKey(AllocatableAllocate)>(loc,
builder);
llvm::SmallVector<mlir::Value> args{
box.getAddr(), errorManager.hasStat, errorManager.errMsgAddr,
errorManager.sourceFile, errorManager.sourceLine};
llvm::SmallVector<mlir::Value> args{box.getAddr()};
if (!box.isPointer())
args.push_back(
builder.createIntegerConstant(loc, builder.getI64Type(), -1));
args.push_back(errorManager.hasStat);
args.push_back(errorManager.errMsgAddr);
args.push_back(errorManager.sourceFile);
args.push_back(errorManager.sourceLine);
llvm::SmallVector<mlir::Value> operands;
for (auto [fst, snd] : llvm::zip(args, callee.getFunctionType().getInputs()))
operands.emplace_back(builder.createConvert(loc, snd, fst));

View File

@ -76,16 +76,19 @@ void fir::runtime::genAllocatableAllocate(fir::FirOpBuilder &builder,
mlir::func::FuncOp func{
fir::runtime::getRuntimeFunc<mkRTKey(AllocatableAllocate)>(loc, builder)};
mlir::FunctionType fTy{func.getFunctionType()};
mlir::Value asyncId =
builder.createIntegerConstant(loc, builder.getI64Type(), -1);
mlir::Value sourceFile{fir::factory::locationToFilename(builder, loc)};
mlir::Value sourceLine{
fir::factory::locationToLineNo(builder, loc, fTy.getInput(4))};
fir::factory::locationToLineNo(builder, loc, fTy.getInput(5))};
if (!hasStat)
hasStat = builder.createBool(loc, false);
if (!errMsg) {
mlir::Type boxNoneTy = fir::BoxType::get(builder.getNoneType());
errMsg = builder.create<fir::AbsentOp>(loc, boxNoneTy).getResult();
}
llvm::SmallVector<mlir::Value> args{fir::runtime::createArguments(
builder, loc, fTy, desc, hasStat, errMsg, sourceFile, sourceLine)};
llvm::SmallVector<mlir::Value> args{
fir::runtime::createArguments(builder, loc, fTy, desc, asyncId, hasStat,
errMsg, sourceFile, sourceLine)};
builder.create<fir::CallOp>(loc, func, args);
}

View File

@ -191,7 +191,7 @@ func.func @test_polymorphic(%arg0: !fir.class<!fir.type<_QMtypesTt>> {fir.bindc_
// CHECK: %[[VAL_35:.*]] = fir.absent !fir.box<none>
// CHECK: %[[VAL_36:.*]] = fir.convert %[[VAL_4]] : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMtypesTt>>>>>) -> !fir.ref<!fir.box<none>>
// CHECK: %[[VAL_37:.*]] = fir.convert %[[VAL_31]] : (!fir.ref<!fir.char<1,{{.*}}>>) -> !fir.ref<i8>
// CHECK: %[[VAL_38:.*]] = fir.call @_FortranAAllocatableAllocate(%[[VAL_36]], %[[VAL_34]], %[[VAL_35]], %[[VAL_37]], %[[VAL_33]]) : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
// CHECK: %[[VAL_38:.*]] = fir.call @_FortranAAllocatableAllocate(%[[VAL_36]], %{{.*}}, %[[VAL_34]], %[[VAL_35]], %[[VAL_37]], %[[VAL_33]]) : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
// CHECK: %[[VAL_12:.*]] = arith.constant true
// CHECK: %[[VAL_39:.*]] = fir.load %[[VAL_13]]#0 : !fir.ref<!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMtypesTt>>>>>
// CHECK: %[[VAL_40:.*]] = arith.constant 1 : index
@ -275,7 +275,7 @@ func.func @test_polymorphic_expr(%arg0: !fir.class<!fir.type<_QMtypesTt>> {fir.b
// CHECK: %[[VAL_36:.*]] = fir.absent !fir.box<none>
// CHECK: %[[VAL_37:.*]] = fir.convert %[[VAL_5]] : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMtypesTt>>>>>) -> !fir.ref<!fir.box<none>>
// CHECK: %[[VAL_38:.*]] = fir.convert %[[VAL_32]] : (!fir.ref<!fir.char<1,{{.*}}>>) -> !fir.ref<i8>
// CHECK: %[[VAL_39:.*]] = fir.call @_FortranAAllocatableAllocate(%[[VAL_37]], %[[VAL_35]], %[[VAL_36]], %[[VAL_38]], %[[VAL_34]]) : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
// CHECK: %[[VAL_39:.*]] = fir.call @_FortranAAllocatableAllocate(%[[VAL_37]], %{{.*}}, %[[VAL_35]], %[[VAL_36]], %[[VAL_38]], %[[VAL_34]]) : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
// CHECK: %[[VAL_13:.*]] = arith.constant true
// CHECK: %[[VAL_40:.*]] = fir.load %[[VAL_14]]#0 : !fir.ref<!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMtypesTt>>>>>
// CHECK: %[[VAL_41:.*]] = arith.constant 1 : index
@ -328,7 +328,7 @@ func.func @test_polymorphic_expr(%arg0: !fir.class<!fir.type<_QMtypesTt>> {fir.b
// CHECK: %[[VAL_85:.*]] = fir.absent !fir.box<none>
// CHECK: %[[VAL_86:.*]] = fir.convert %[[VAL_4]] : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMtypesTt>>>>>) -> !fir.ref<!fir.box<none>>
// CHECK: %[[VAL_87:.*]] = fir.convert %[[VAL_81]] : (!fir.ref<!fir.char<1,{{.*}}>>) -> !fir.ref<i8>
// CHECK: %[[VAL_88:.*]] = fir.call @_FortranAAllocatableAllocate(%[[VAL_86]], %[[VAL_84]], %[[VAL_85]], %[[VAL_87]], %[[VAL_83]]) : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
// CHECK: %[[VAL_88:.*]] = fir.call @_FortranAAllocatableAllocate(%[[VAL_86]], %{{.*}}, %[[VAL_84]], %[[VAL_85]], %[[VAL_87]], %[[VAL_83]]) : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
// CHECK: %[[VAL_62:.*]] = arith.constant true
// CHECK: %[[VAL_89:.*]] = fir.load %[[VAL_63]]#0 : !fir.ref<!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMtypesTt>>>>>
// CHECK: %[[VAL_90:.*]] = arith.constant 1 : index

View File

@ -473,6 +473,6 @@ contains
end module
! CHECK-LABEL: func.func @_QMacc_declare_post_action_statPinit()
! CHECK: fir.call @_FortranAAllocatableAllocate({{.*}}) fastmath<contract> {acc.declare_action = #acc.declare_action<postAlloc = @_QMacc_declare_post_action_statEx_acc_declare_update_desc_post_alloc>} : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: fir.call @_FortranAAllocatableAllocate({{.*}}) fastmath<contract> {acc.declare_action = #acc.declare_action<postAlloc = @_QMacc_declare_post_action_statEx_acc_declare_update_desc_post_alloc>} : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: fir.if
! CHECK: fir.call @_FortranAAllocatableAllocate({{.*}}) fastmath<contract> {acc.declare_action = #acc.declare_action<postAlloc = @_QMacc_declare_post_action_statEy_acc_declare_update_desc_post_alloc>} : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: fir.call @_FortranAAllocatableAllocate({{.*}}) fastmath<contract> {acc.declare_action = #acc.declare_action<postAlloc = @_QMacc_declare_post_action_statEy_acc_declare_update_desc_post_alloc>} : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32

View File

@ -434,6 +434,6 @@ contains
end module
! CHECK-LABEL: func.func @_QMacc_declare_post_action_statPinit()
! CHECK: fir.call @_FortranAAllocatableAllocate({{.*}}) fastmath<contract> {acc.declare_action = #acc.declare_action<postAlloc = @_QMacc_declare_post_action_statEx_acc_declare_update_desc_post_alloc>} : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: fir.call @_FortranAAllocatableAllocate({{.*}}) fastmath<contract> {acc.declare_action = #acc.declare_action<postAlloc = @_QMacc_declare_post_action_statEx_acc_declare_update_desc_post_alloc>} : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: fir.if
! CHECK: fir.call @_FortranAAllocatableAllocate({{.*}}) fastmath<contract> {acc.declare_action = #acc.declare_action<postAlloc = @_QMacc_declare_post_action_statEy_acc_declare_update_desc_post_alloc>} : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: fir.call @_FortranAAllocatableAllocate({{.*}}) fastmath<contract> {acc.declare_action = #acc.declare_action<postAlloc = @_QMacc_declare_post_action_statEy_acc_declare_update_desc_post_alloc>} : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32

View File

@ -267,7 +267,7 @@ contains
! CHECK: %[[C0:.*]] = arith.constant 0 : i32
! CHECK: fir.call @_FortranAAllocatableInitDerivedForAllocate(%[[P_CAST]], %[[TYPE_DESC_P1_CAST]], %[[RANK]], %[[C0]]) {{.*}}: (!fir.ref<!fir.box<none>>, !fir.ref<none>, i32, i32) -> ()
! CHECK: %[[P_CAST:.*]] = fir.convert %[[P_DECL]]#0 : (!fir.ref<!fir.class<!fir.heap<!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>) -> !fir.ref<!fir.box<none>>
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[P_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[P_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: %[[TYPE_DESC_P1:.*]] = fir.type_desc !fir.type<_QMpolyTp1{a:i32,b:i32}>
! CHECK: %[[C1_CAST:.*]] = fir.convert %[[C1_DECL]]#0 : (!fir.ref<!fir.class<!fir.heap<!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>) -> !fir.ref<!fir.box<none>>
@ -276,7 +276,7 @@ contains
! CHECK: %[[C0:.*]] = arith.constant 0 : i32
! CHECK: fir.call @_FortranAAllocatableInitDerivedForAllocate(%[[C1_CAST]], %[[TYPE_DESC_P1_CAST]], %[[RANK]], %[[C0]]) {{.*}}: (!fir.ref<!fir.box<none>>, !fir.ref<none>, i32, i32) -> ()
! CHECK: %[[C1_CAST:.*]] = fir.convert %[[C1_DECL]]#0 : (!fir.ref<!fir.class<!fir.heap<!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>) -> !fir.ref<!fir.box<none>>
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[C1_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[C1_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: %[[TYPE_DESC_P2:.*]] = fir.type_desc !fir.type<_QMpolyTp2{p1:!fir.type<_QMpolyTp1{a:i32,b:i32}>,c:i32}>
! CHECK: %[[C2_CAST:.*]] = fir.convert %[[C2_DECL]]#0 : (!fir.ref<!fir.class<!fir.heap<!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>) -> !fir.ref<!fir.box<none>>
@ -285,7 +285,7 @@ contains
! CHECK: %[[C0:.*]] = arith.constant 0 : i32
! CHECK: fir.call @_FortranAAllocatableInitDerivedForAllocate(%[[C2_CAST]], %[[TYPE_DESC_P2_CAST]], %[[RANK]], %[[C0]]) {{.*}}: (!fir.ref<!fir.box<none>>, !fir.ref<none>, i32, i32) -> ()
! CHECK: %[[C2_CAST:.*]] = fir.convert %[[C2_DECL]]#0 : (!fir.ref<!fir.class<!fir.heap<!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>) -> !fir.ref<!fir.box<none>>
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[C2_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[C2_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: %[[TYPE_DESC_P1:.*]] = fir.type_desc !fir.type<_QMpolyTp1{a:i32,b:i32}>
! CHECK: %[[C3_CAST:.*]] = fir.convert %[[C3_DECL]]#0 : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>>) -> !fir.ref<!fir.box<none>>
@ -300,7 +300,7 @@ contains
! CHECK: %[[C10_I64:.*]] = fir.convert %[[C10]] : (i32) -> i64
! CHECK: fir.call @_FortranAAllocatableSetBounds(%[[C3_CAST]], %[[C0]], %[[C1_I64]], %[[C10_I64]]) {{.*}}: (!fir.ref<!fir.box<none>>, i32, i64, i64) -> ()
! CHECK: %[[C3_CAST:.*]] = fir.convert %[[C3_DECL]]#0 : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>>) -> !fir.ref<!fir.box<none>>
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[C3_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[C3_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: %[[TYPE_DESC_P2:.*]] = fir.type_desc !fir.type<_QMpolyTp2{p1:!fir.type<_QMpolyTp1{a:i32,b:i32}>,c:i32}>
! CHECK: %[[C4_CAST:.*]] = fir.convert %[[C4_DECL]]#0 : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>>) -> !fir.ref<!fir.box<none>>
@ -316,7 +316,7 @@ contains
! CHECK: %[[C20_I64:.*]] = fir.convert %[[C20]] : (i32) -> i64
! CHECK: fir.call @_FortranAAllocatableSetBounds(%[[C4_CAST]], %[[C0]], %[[C1_I64]], %[[C20_I64]]) {{.*}}: (!fir.ref<!fir.box<none>>, i32, i64, i64) -> ()
! CHECK: %[[C4_CAST:.*]] = fir.convert %[[C4_DECL]]#0 : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>>) -> !fir.ref<!fir.box<none>>
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[C4_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[C4_CAST]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: %[[C1_LOAD1:.*]] = fir.load %[[C1_DECL]]#0 : !fir.ref<!fir.class<!fir.heap<!fir.type<_QMpolyTp1{a:i32,b:i32}>>>>
! CHECK: fir.dispatch "proc1"(%[[C1_LOAD1]] : !fir.class<!fir.heap<!fir.type<_QMpolyTp1{a:i32,b:i32}>>>)
@ -390,7 +390,7 @@ contains
! CHECK: %[[CORANK:.*]] = arith.constant 0 : i32
! CHECK: fir.call @_FortranAAllocatableInitIntrinsicForAllocate(%[[BOX_NONE]], %[[CAT]], %[[KIND]], %[[RANK]], %[[CORANK]]) {{.*}} : (!fir.ref<!fir.box<none>>, i32, i32, i32, i32) -> ()
! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[P_DECL]]#0 : (!fir.ref<!fir.class<!fir.heap<none>>>) -> !fir.ref<!fir.box<none>>
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[PTR_DECL]]#0 : (!fir.ref<!fir.class<!fir.ptr<none>>>) -> !fir.ref<!fir.box<none>>
! CHECK: %[[CAT:.*]] = arith.constant 2 : i32
@ -573,7 +573,7 @@ contains
! CHECK: %[[CORANK:.*]] = arith.constant 0 : i32
! CHECK: fir.call @_FortranAAllocatableInitCharacterForAllocate(%[[A_NONE]], %[[LEN]], %[[KIND]], %[[RANK]], %[[CORANK]]) {{.*}} : (!fir.ref<!fir.box<none>>, i64, i32, i32, i32) -> ()
! CHECK: %[[A_NONE:.*]] = fir.convert %[[A_DECL]]#0 : (!fir.ref<!fir.class<!fir.heap<none>>>) -> !fir.ref<!fir.box<none>>
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[A_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[A_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
end module
@ -592,17 +592,17 @@ end
! LLVM-LABEL: define void @_QMpolyPtest_allocatable()
! LLVM: call void @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp1, i32 0, i32 0)
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i64 {{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
! LLVM: call void @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp1, i32 0, i32 0)
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i64 {{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
! LLVM: call void @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp2, i32 0, i32 0)
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i64 {{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
! LLVM: call void @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp1, i32 1, i32 0)
! LLVM: call void @_FortranAAllocatableSetBounds(ptr %{{.*}}, i32 0, i64 1, i64 10)
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i64 {{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
! LLVM: call void @_FortranAAllocatableInitDerivedForAllocate(ptr %{{.*}}, ptr @_QMpolyEXdtXp2, i32 1, i32 0)
! LLVM: call void @_FortranAAllocatableSetBounds(ptr %{{.*}}, i32 0, i64 1, i64 20)
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %{{.*}}, i64 {{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
! LLVM-COUNT-2: call void %{{[0-9]*}}()
! LLVM: call void @llvm.memcpy.p0.p0.i32
@ -683,5 +683,5 @@ end
! LLVM: store { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } { ptr null, i64 8, i32 20240719, i8 0, i8 42, i8 2, i8 1, ptr @_QMpolyEXdtXp1, [1 x i64] zeroinitializer }, ptr %[[ALLOCA1:[0-9]*]]
! LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[ALLOCA2:[0-9]+]], ptr %[[ALLOCA1]], i32 40, i1 false)
! LLVM: call void @_FortranAAllocatableInitDerivedForAllocate(ptr %[[ALLOCA2]], ptr @_QMpolyEXdtXp1, i32 0, i32 0)
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %[[ALLOCA2]], i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableAllocate(ptr %[[ALLOCA2]], i64 {{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})
! LLVM: %{{.*}} = call i32 @_FortranAAllocatableDeallocatePolymorphic(ptr %[[ALLOCA2]], ptr {{.*}}, i1 false, ptr null, ptr @_QQclX{{.*}}, i32 {{.*}})

View File

@ -31,7 +31,7 @@ subroutine foo()
! CHECK: fir.call @{{.*}}AllocatableSetBounds(%[[xBoxCast2]], %c0{{.*}}, %[[xlbCast]], %[[xubCast]]) {{.*}}: (!fir.ref<!fir.box<none>>, i32, i64, i64) -> ()
! CHECK-DAG: %[[xBoxCast3:.*]] = fir.convert %[[xBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> !fir.ref<!fir.box<none>>
! CHECK-DAG: %[[sourceFile:.*]] = fir.convert %{{.*}} -> !fir.ref<i8>
! CHECK: fir.call @{{.*}}AllocatableAllocate(%[[xBoxCast3]], %false{{.*}}, %[[errMsg]], %[[sourceFile]], %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: fir.call @{{.*}}AllocatableAllocate(%[[xBoxCast3]], %{{.*}}, %false{{.*}}, %[[errMsg]], %[[sourceFile]], %{{.*}}) {{.*}}: (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! Simply check that we are emitting the right numebr of set bound for y and z. Otherwise, this is just like x.
! CHECK: fir.convert %[[yBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>) -> !fir.ref<!fir.box<none>>
@ -180,4 +180,4 @@ end subroutine
! CHECK: %[[M_BOX_NONE:.*]] = fir.convert %[[EMBOX_M]] : (!fir.box<!fir.array<10xi32>>) -> !fir.box<none>
! CHECK: fir.call @_FortranAAllocatableApplyMold(%[[A_BOX_NONE]], %[[M_BOX_NONE]], %[[RANK]]) {{.*}} : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32) -> ()
! CHECK: %[[A_BOX_NONE:.*]] = fir.convert %[[A]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<none>>
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[A_BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[A_BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32

View File

@ -16,7 +16,7 @@ end subroutine
! CHECK: %[[A_REF_BOX_NONE1:.*]] = fir.convert %[[A]] : (!fir.ref<!fir.box<!fir.heap<i32>>>) -> !fir.ref<!fir.box<none>>
! CHECK: fir.call @_FortranAAllocatableApplyMold(%[[A_REF_BOX_NONE1]], %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, !fir.box<none>, i32) -> ()
! CHECK: %[[A_REF_BOX_NONE2:.*]] = fir.convert %[[A]] : (!fir.ref<!fir.box<!fir.heap<i32>>>) -> !fir.ref<!fir.box<none>>
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[A_REF_BOX_NONE2]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[A_REF_BOX_NONE2]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
subroutine array_scalar_mold_allocation()
real, allocatable :: a(:)
@ -40,4 +40,4 @@ end subroutine array_scalar_mold_allocation
! CHECK: %[[REF_BOX_A1:.*]] = fir.convert %1 : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> !fir.ref<!fir.box<none>>
! CHECK: fir.call @_FortranAAllocatableSetBounds(%[[REF_BOX_A1]], {{.*}},{{.*}}, {{.*}}) fastmath<contract> : (!fir.ref<!fir.box<none>>, i32, i64, i64) -> ()
! CHECK: %[[REF_BOX_A2:.*]] = fir.convert %[[A]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> !fir.ref<!fir.box<none>>
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[REF_BOX_A2]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[REF_BOX_A2]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32

View File

@ -1149,7 +1149,7 @@ end program
! CHECK-LABEL: func.func @_QQmain() attributes {fir.bindc_name = "test"} {
! CHECK: %[[ADDR_O:.*]] = fir.address_of(@_QFEo) : !fir.ref<!fir.box<!fir.heap<!fir.type<_QMpolymorphic_testTouter{inner:!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>}>>>>
! CHECK: %[[BOX_NONE:.*]] = fir.convert %[[ADDR_O]] : (!fir.ref<!fir.box<!fir.heap<!fir.type<_QMpolymorphic_testTouter{inner:!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>}>>>>) -> !fir.ref<!fir.box<none>>
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: %{{.*}} = fir.call @_FortranAAllocatableAllocate(%[[BOX_NONE]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
! CHECK: %[[O:.*]] = fir.load %[[ADDR_O]] : !fir.ref<!fir.box<!fir.heap<!fir.type<_QMpolymorphic_testTouter{inner:!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>}>>>>
! CHECK: %[[COORD_INNER:.*]] = fir.coordinate_of %[[O]], inner : (!fir.box<!fir.heap<!fir.type<_QMpolymorphic_testTouter{inner:!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>}>>>) -> !fir.ref<!fir.type<_QMpolymorphic_testTp1{a:i32,b:i32}>>
! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} unordered iter_args(%arg1 = %{{.*}}) -> (!fir.array<5x!fir.logical<4>>) {

View File

@ -840,7 +840,7 @@ func.func @_QPtest6(%arg0: !fir.class<!fir.array<?x?x!fir.type<_QMmTt>>> {fir.bi
// CHECK: %[[VAL_34:.*]] = fir.absent !fir.box<none>
// CHECK: %[[VAL_35:.*]] = fir.convert %[[VAL_7]] : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMmTt>>>>>) -> !fir.ref<!fir.box<none>>
// CHECK: %[[VAL_36:.*]] = fir.convert %[[VAL_33]] : (!fir.ref<!fir.char<1,{{.*}}>>) -> !fir.ref<i8>
// CHECK: %[[VAL_37:.*]] = fir.call @_FortranAAllocatableAllocate(%[[VAL_35]], %[[VAL_6]], %[[VAL_34]], %[[VAL_36]], %[[VAL_2]]) : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
// CHECK: %[[VAL_37:.*]] = fir.call @_FortranAAllocatableAllocate(%[[VAL_35]], %{{.*}}, %[[VAL_6]], %[[VAL_34]], %[[VAL_36]], %[[VAL_2]]) : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
// CHECK: %[[VAL_38:.*]] = fir.load %[[VAL_22]] : !fir.ref<!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMmTt>>>>>
// CHECK: %[[VAL_39:.*]] = fir.address_of(@{{_QQcl.*}}
// CHECK: %[[VAL_40:.*]] = fir.convert %[[VAL_38]] : (!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMmTt>>>>) -> !fir.box<none>
@ -928,7 +928,7 @@ func.func @_QPtest6_stack(%arg0: !fir.class<!fir.array<?x?x!fir.type<_QMmTt>>> {
// CHECK: %[[VAL_34:.*]] = fir.absent !fir.box<none>
// CHECK: %[[VAL_35:.*]] = fir.convert %[[VAL_7]] : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMmTt>>>>>) -> !fir.ref<!fir.box<none>>
// CHECK: %[[VAL_36:.*]] = fir.convert %[[VAL_33]] : (!fir.ref<!fir.char<1,{{.*}}>>) -> !fir.ref<i8>
// CHECK: %[[VAL_37:.*]] = fir.call @_FortranAAllocatableAllocate(%[[VAL_35]], %[[VAL_6]], %[[VAL_34]], %[[VAL_36]], %[[VAL_2]]) : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
// CHECK: %[[VAL_37:.*]] = fir.call @_FortranAAllocatableAllocate(%[[VAL_35]], %{{.*}}, %[[VAL_6]], %[[VAL_34]], %[[VAL_36]], %[[VAL_2]]) : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
// CHECK: %[[VAL_38:.*]] = fir.load %[[VAL_22]] : !fir.ref<!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMmTt>>>>>
// CHECK: %[[VAL_39:.*]] = fir.address_of(@{{_QQcl.*}}
// CHECK: %[[VAL_40:.*]] = fir.convert %[[VAL_38]] : (!fir.class<!fir.heap<!fir.array<?x?x!fir.type<_QMmTt>>>>) -> !fir.box<none>
@ -1015,7 +1015,7 @@ func.func @_QPtest7(%arg0: !fir.class<!fir.array<?x?xnone>> {fir.bindc_name = "x
// CHECK: %[[VAL_34:.*]] = fir.absent !fir.box<none>
// CHECK: %[[VAL_35:.*]] = fir.convert %[[VAL_7]] : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x?xnone>>>>) -> !fir.ref<!fir.box<none>>
// CHECK: %[[VAL_36:.*]] = fir.convert %[[VAL_33]] : (!fir.ref<!fir.char<1,{{.*}}>>) -> !fir.ref<i8>
// CHECK: %[[VAL_37:.*]] = fir.call @_FortranAAllocatableAllocate(%[[VAL_35]], %[[VAL_6]], %[[VAL_34]], %[[VAL_36]], %[[VAL_2]]) : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
// CHECK: %[[VAL_37:.*]] = fir.call @_FortranAAllocatableAllocate(%[[VAL_35]], %{{.*}}, %[[VAL_6]], %[[VAL_34]], %[[VAL_36]], %[[VAL_2]]) : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
// CHECK: %[[VAL_38:.*]] = fir.load %[[VAL_22]] : !fir.ref<!fir.class<!fir.heap<!fir.array<?x?xnone>>>>
// CHECK: %[[VAL_39:.*]] = fir.address_of(@{{_QQcl.*}}
// CHECK: %[[VAL_40:.*]] = fir.convert %[[VAL_38]] : (!fir.class<!fir.heap<!fir.array<?x?xnone>>>) -> !fir.box<none>
@ -1103,7 +1103,7 @@ func.func @_QPtest7_stack(%arg0: !fir.class<!fir.array<?x?xnone>> {fir.bindc_nam
// CHECK: %[[VAL_34:.*]] = fir.absent !fir.box<none>
// CHECK: %[[VAL_35:.*]] = fir.convert %[[VAL_7]] : (!fir.ref<!fir.class<!fir.heap<!fir.array<?x?xnone>>>>) -> !fir.ref<!fir.box<none>>
// CHECK: %[[VAL_36:.*]] = fir.convert %[[VAL_33]] : (!fir.ref<!fir.char<1,{{.*}}>>) -> !fir.ref<i8>
// CHECK: %[[VAL_37:.*]] = fir.call @_FortranAAllocatableAllocate(%[[VAL_35]], %[[VAL_6]], %[[VAL_34]], %[[VAL_36]], %[[VAL_2]]) : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
// CHECK: %[[VAL_37:.*]] = fir.call @_FortranAAllocatableAllocate(%[[VAL_35]], %{{.*}}, %[[VAL_6]], %[[VAL_34]], %[[VAL_36]], %[[VAL_2]]) : (!fir.ref<!fir.box<none>>, i64, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
// CHECK: %[[VAL_38:.*]] = fir.load %[[VAL_22]] : !fir.ref<!fir.class<!fir.heap<!fir.array<?x?xnone>>>>
// CHECK: %[[VAL_39:.*]] = fir.address_of(@{{_QQcl.*}}
// CHECK: %[[VAL_40:.*]] = fir.convert %[[VAL_38]] : (!fir.class<!fir.heap<!fir.array<?x?xnone>>>) -> !fir.box<none>