Backing out r174919 while I investigate a self-host bug on Takumi's builder.

llvm-svn: 174925
This commit is contained in:
Lang Hames 2013-02-12 00:44:43 +00:00
parent 209d63af48
commit 697b004219
6 changed files with 11 additions and 367 deletions

View File

@ -18,7 +18,6 @@
#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Frontend/CodeGenOptions.h"
using namespace clang;
@ -743,342 +742,6 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
}
namespace {
class FieldMemcpyizer {
public:
FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl,
const VarDecl *SrcRec)
: CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec),
RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)),
FirstField(0), LastField(0), FirstFieldOffset(0), LastFieldOffset(0),
LastAddedFieldIndex(0) { }
static bool isMemcpyableField(FieldDecl *F) {
Qualifiers Qual = F->getType().getQualifiers();
if (Qual.hasVolatile() || Qual.hasObjCLifetime())
return false;
return true;
}
void addMemcpyableField(FieldDecl *F) {
if (FirstField == 0)
addInitialField(F);
else
addNextField(F);
}
CharUnits getMemcpySize() const {
unsigned LastFieldSize =
CGF.getContext().getTypeInfo(LastField->getType()).first;
uint64_t MemcpySizeBits =
LastFieldOffset + LastFieldSize - FirstFieldOffset +
CGF.getContext().getCharWidth() - 1;
CharUnits MemcpySize =
CGF.getContext().toCharUnitsFromBits(MemcpySizeBits);
return MemcpySize;
}
void emitMemcpy() {
// Give the subclass a chance to bail out if it feels the memcpy isn't
// worth it (e.g. Hasn't aggregated enough data).
if (FirstField == 0) {
return;
}
unsigned FirstFieldAlign =
CGF.getContext().getTypeInfo(FirstField->getType()).second;
assert(FirstFieldOffset % FirstFieldAlign == 0 && "Bad field alignment.");
CharUnits Alignment =
CGF.getContext().toCharUnitsFromBits(FirstFieldAlign);
CharUnits MemcpySize = getMemcpySize();
QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
llvm::Value *ThisPtr = CGF.LoadCXXThis();
LValue DestLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField);
llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec));
LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField);
emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddr() : Dest.getAddress(),
Src.isBitField() ? Src.getBitFieldAddr() : Src.getAddress(),
MemcpySize, Alignment);
reset();
}
void reset() {
FirstField = 0;
}
protected:
CodeGenFunction &CGF;
const CXXRecordDecl *ClassDecl;
private:
void emitMemcpyIR(llvm::Value *DestPtr, llvm::Value *SrcPtr,
CharUnits Size, CharUnits Alignment) {
llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
llvm::Type *DBP =
llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace());
DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP);
llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
llvm::Type *SBP =
llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace());
SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP);
CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity(),
Alignment.getQuantity());
}
void addInitialField(FieldDecl *F) {
FirstField = F;
LastField = F;
FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex());
LastFieldOffset = FirstFieldOffset;
LastAddedFieldIndex = F->getFieldIndex();
return;
}
void addNextField(FieldDecl *F) {
assert(F->getFieldIndex() == LastAddedFieldIndex + 1 &&
"Cannot aggregate non-contiguous fields.");
LastAddedFieldIndex = F->getFieldIndex();
// The 'first' and 'last' fields are chosen by offset, rather than field
// index. This allows the code to support bitfields, as well as regular
// fields.
uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex());
if (FOffset < FirstFieldOffset) {
FirstField = F;
FirstFieldOffset = FOffset;
} else if (FOffset > LastFieldOffset) {
LastField = F;
LastFieldOffset = FOffset;
}
}
const VarDecl *SrcRec;
const ASTRecordLayout &RecLayout;
FieldDecl *FirstField;
FieldDecl *LastField;
uint64_t FirstFieldOffset, LastFieldOffset;
unsigned LastAddedFieldIndex;
};
class ConstructorMemcpyizer : public FieldMemcpyizer {
private:
/// Get source argument for copy constructor. Returns null if not a copy
/// constructor.
static const VarDecl* getTrivialCopySource(const CXXConstructorDecl *CD,
FunctionArgList &Args) {
if (CD->isCopyOrMoveConstructor() && CD->isImplicitlyDefined())
return Args[Args.size() - 1];
return 0;
}
// Returns true if a CXXCtorInitializer represents a member initialization
// that can be rolled into a memcpy.
bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const {
if (!MemcpyableCtor)
return false;
FieldDecl *Field = MemberInit->getMember();
assert(Field != 0 && "No field for member init.");
QualType FieldType = Field->getType();
CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
// Bail out on non-POD, not-trivially-constructable members.
if (!(CE && CE->getConstructor()->isTrivial()) &&
!(FieldType.isTriviallyCopyableType(CGF.getContext()) ||
FieldType->isReferenceType()))
return false;
// Bail out on volatile fields.
if (!isMemcpyableField(Field))
return false;
// Otherwise we're good.
return true;
}
public:
ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD,
FunctionArgList &Args)
: FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CD, Args)),
ConstructorDecl(CD),
MemcpyableCtor(CD->isImplicitlyDefined() &&
CD->isCopyOrMoveConstructor() &&
CGF.getLangOpts().getGC() == LangOptions::NonGC),
Args(Args) { }
void addMemberInitializer(CXXCtorInitializer *MemberInit) {
if (isMemberInitMemcpyable(MemberInit)) {
AggregatedInits.push_back(MemberInit);
addMemcpyableField(MemberInit->getMember());
} else {
emitAggregatedInits();
EmitMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit,
ConstructorDecl, Args);
}
}
void emitAggregatedInits() {
if (AggregatedInits.size() <= 1) {
// This memcpy is too small to be worthwhile. Fall back on default
// codegen.
for (unsigned i = 0; i < AggregatedInits.size(); ++i) {
EmitMemberInitializer(CGF, ConstructorDecl->getParent(),
AggregatedInits[i], ConstructorDecl, Args);
}
reset();
return;
}
pushEHDestructors();
emitMemcpy();
AggregatedInits.clear();
}
void pushEHDestructors() {
llvm::Value *ThisPtr = CGF.LoadCXXThis();
QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
for (unsigned i = 0; i < AggregatedInits.size(); ++i) {
QualType FieldType = AggregatedInits[i]->getMember()->getType();
QualType::DestructionKind dtorKind = FieldType.isDestructedType();
if (CGF.needsEHCleanup(dtorKind))
CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
}
}
void finish() {
emitAggregatedInits();
}
private:
const CXXConstructorDecl *ConstructorDecl;
bool MemcpyableCtor;
FunctionArgList &Args;
SmallVector<CXXCtorInitializer*, 16> AggregatedInits;
};
class AssignmentMemcpyizer : public FieldMemcpyizer {
private:
// Returns the memcpyable field copied by the given statement, if one
// exists. Otherwise r
FieldDecl* getMemcpyableField(Stmt *S) {
if (!AssignmentsMemcpyable)
return 0;
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) {
// Recognise trivial assignments.
if (BO->getOpcode() != BO_Assign)
return 0;
MemberExpr *ME = dyn_cast<MemberExpr>(BO->getLHS());
if (!ME)
return 0;
FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl());
if (!Field || !isMemcpyableField(Field))
return 0;
Stmt *RHS = BO->getRHS();
if (ImplicitCastExpr *EC = dyn_cast<ImplicitCastExpr>(RHS))
RHS = EC->getSubExpr();
if (!RHS)
return 0;
MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS);
if (dyn_cast<FieldDecl>(ME2->getMemberDecl()) != Field)
return 0;
return Field;
} else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S)) {
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl());
if (!(MD && (MD->isCopyAssignmentOperator() ||
MD->isMoveAssignmentOperator()) &&
MD->isTrivial()))
return 0;
MemberExpr *IOA = dyn_cast<MemberExpr>(MCE->getImplicitObjectArgument());
if (!IOA)
return 0;
FieldDecl *Field = dyn_cast<FieldDecl>(IOA->getMemberDecl());
if (!Field || !isMemcpyableField(Field))
return 0;
MemberExpr *Arg0 = dyn_cast<MemberExpr>(MCE->getArg(0));
if (!Arg0 || Field != dyn_cast<FieldDecl>(Arg0->getMemberDecl()))
return 0;
return Field;
} else if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
FunctionDecl *FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl());
if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy)
return 0;
Expr *DstPtr = CE->getArg(0);
if (ImplicitCastExpr *DC = dyn_cast<ImplicitCastExpr>(DstPtr))
DstPtr = DC->getSubExpr();
UnaryOperator *DUO = dyn_cast<UnaryOperator>(DstPtr);
if (!DUO || DUO->getOpcode() != UO_AddrOf)
return 0;
MemberExpr *ME = dyn_cast<MemberExpr>(DUO->getSubExpr());
if (!ME)
return 0;
FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl());
if (!Field || !isMemcpyableField(Field))
return 0;
Expr *SrcPtr = CE->getArg(1);
if (ImplicitCastExpr *SC = dyn_cast<ImplicitCastExpr>(SrcPtr))
SrcPtr = SC->getSubExpr();
UnaryOperator *SUO = dyn_cast<UnaryOperator>(SrcPtr);
if (!SUO || SUO->getOpcode() != UO_AddrOf)
return 0;
MemberExpr *ME2 = dyn_cast<MemberExpr>(SUO->getSubExpr());
if (!ME2 || Field != dyn_cast<FieldDecl>(ME2->getMemberDecl()))
return 0;
return Field;
}
return 0;
}
bool AssignmentsMemcpyable;
SmallVector<Stmt*, 16> AggregatedStmts;
public:
AssignmentMemcpyizer(CodeGenFunction &CGF, const CXXMethodDecl *AD,
FunctionArgList &Args)
: FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]),
AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) {
assert(Args.size() == 2);
}
void emitAssignment(Stmt *S) {
FieldDecl *F = getMemcpyableField(S);
if (F) {
addMemcpyableField(F);
AggregatedStmts.push_back(S);
} else {
emitAggregatedStmts();
CGF.EmitStmt(S);
}
}
void emitAggregatedStmts() {
if (AggregatedStmts.size() <= 1) {
for (unsigned i = 0; i < AggregatedStmts.size(); ++i)
CGF.EmitStmt(AggregatedStmts[i]);
reset();
}
emitMemcpy();
AggregatedStmts.clear();
}
void finish() {
emitAggregatedStmts();
}
};
}
/// EmitCtorPrologue - This routine generates necessary code to initialize
/// base classes and non-static data members belonging to this constructor.
void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
@ -1107,10 +770,8 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
InitializeVTablePointers(ClassDecl);
ConstructorMemcpyizer CM(*this, CD, Args);
for (unsigned I = 0, E = MemberInitializers.size(); I != E; ++I)
CM.addMemberInitializer(MemberInitializers[I]);
CM.finish();
EmitMemberInitializer(*this, ClassDecl, MemberInitializers[I], CD, Args);
}
static bool
@ -1279,24 +940,6 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
}
void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) {
const CXXMethodDecl *AssignOp = cast<CXXMethodDecl>(CurGD.getDecl());
const Stmt *RootS = AssignOp->getBody();
assert(isa<CompoundStmt>(RootS) &&
"Body of an implicit assignment operator should be compound stmt.");
const CompoundStmt *RootCS = cast<CompoundStmt>(RootS);
LexicalScope Scope(*this, RootCS->getSourceRange());
AssignmentMemcpyizer AM(*this, AssignOp, Args);
for (CompoundStmt::const_body_iterator I = RootCS->body_begin(),
E = RootCS->body_end();
I != E; ++I) {
AM.emitAssignment(*I);
}
AM.finish();
}
namespace {
/// Call the operator delete associated with the current destructor.
struct CallDtorDelete : EHScopeStack::Cleanup {

View File

@ -559,11 +559,6 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
// The lambda "__invoke" function is special, because it forwards or
// clones the body of the function call operator (but is actually static).
EmitLambdaStaticInvokeFunction(cast<CXXMethodDecl>(FD));
} else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator()) {
// Implicit copy-assignment gets the same special treatment as implicit
// copy-constructors.
emitImplicitAssignmentOperatorBody(Args);
}
else
EmitFunctionBody(Args);

View File

@ -1402,7 +1402,6 @@ public:
void EmitConstructorBody(FunctionArgList &Args);
void EmitDestructorBody(FunctionArgList &Args);
void emitImplicitAssignmentOperatorBody(FunctionArgList &Args);
void EmitFunctionBody(FunctionArgList &Args);
void EmitForwardingCallToLambda(const CXXRecordDecl *Lambda,

View File

@ -96,8 +96,14 @@ int main() {
// CHECK-LP64: .globl __ZN1XaSERKS_
// CHECK-LP64: .weak_definition __ZN1XaSERKS_
// CHECK-LP64: __ZN1XaSERKS_:
// CHECK-LP64: .globl __ZN1QaSERKS_
// CHECK-LP64: .weak_definition __ZN1QaSERKS_
// CHECK-LP64: __ZN1QaSERKS_:
// CHECK-LP32: .globl __ZN1XaSERKS_
// CHECK-LP32: .weak_definition __ZN1XaSERKS_
// CHECK-LP32: __ZN1XaSERKS_:
// CHECK-LP32: .globl __ZN1QaSERKS_
// CHECK-LP32: .weak_definition __ZN1QaSERKS_
// CHECK-LP32: __ZN1QaSERKS_:

View File

@ -44,7 +44,7 @@ void test_D(D d1, D d2) {
// CHECK: {{call.*_ZN1AaSERS_}}
// CHECK: {{call.*_ZN1BaSERS_}}
// CHECK: {{call.*_ZN1CaSERKS_}}
// CHECK: {{call void @llvm.memcpy.p0i8.p0i8.i64.*i64 28}}
// CHECK: {{call void @llvm.memcpy.p0i8.p0i8.i64.*i64 24}}
// CHECK: {{call.*_ZN1BaSERS_}}
// CHECK: br
// CHECK: {{call.*_ZN1CaSERKS_}}

View File

@ -46,7 +46,7 @@ void f(D d) {
// CHECK: call void @_ZN1AD1Ev
// CHECK: call void @_ZN1AC2ERS_
// CHECK: call void @_ZN1BC2ERS_
// CHECK: {{call void @llvm.memcpy.p0i8.p0i8.i64.*i64 28}}
// CHECK: {{call void @llvm.memcpy.p0i8.p0i8.i64.*i64 24}}
// CHECK: call void @_ZN1BC1ERS_
// CHECK: br
// CHECK: {{icmp ult.*, 2}}
@ -54,7 +54,8 @@ void f(D d) {
// CHECK: call void @_ZN1AC1Ev
// CHECK: call void @_ZN1CC1ERS_1A
// CHECK: call void @_ZN1AD1Ev
// CHECK: {{call void @llvm.memcpy.p0i8.p0i8.i64.*i64 300}}
// CHECK: {{call void @llvm.memcpy.p0i8.p0i8.i64.*i64 288}}
// CHECK: {{call void @llvm.memcpy.p0i8.p0i8.i64.*i64 12}}
// CHECK: ret void