llvm-project/clang/test/OpenMP/ordered_codegen.cpp
PeixinQiao a42380ce83 [OMPIRBuilder] Add ordered directive to OMPBuilder
Add support for ordered directive in the OpenMPIRBuilder.

This patch also modidies clang to use the ordered directive when the
option -fopenmp-enable-irbuilder is enabled.

Also fix one ICE when parsing one canonical for loop with the relational
operator LE or GE in openmp region by replacing unary increment
operation of the expression of the variable "Expr A" minus the variable
"Expr B" (++(Expr A - Expr B)) with binary addition operation of the
experssion of the variable "Expr A" minus the variable "Expr B" and the
expression with constant value "1" (Expr A - Expr B + "1").

Reviewed By: Meinersbur, kiranchandramohan

Differential Revision: https://reviews.llvm.org/D107430
2021-09-03 09:37:58 +08:00

3436 lines
220 KiB
C++

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefixes=CHECK1,CHECK1-NORMAL
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK2,CHECK2-NORMAL
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-enable-irbuilder -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefixes=CHECK1,CHECK1-IRBUILDER
// RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK2,CHECK2-IRBUILDER
// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -fopenmp-version=45 -o - | FileCheck %s --check-prefixes=CHECK3,CHECK3-NORMAL
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -fopenmp-version=45 -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK4,CHECK4-NORMAL
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-enable-irbuilder -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -fopenmp-version=45 -o - | FileCheck %s --check-prefixes=CHECK3,CHECK3-IRBUILDER
// RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -fopenmp-version=45 -o %t %s
// RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -fopenmp-version=45 -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK4,CHECK4-IRBUILDER
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK5
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK6
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
void static_not_chunked(float *a, float *b, float *c, float *d) {
#pragma omp for schedule(static) ordered
// Loop header
for (int i = 32000000; i > 33; i += -7) {
// Start of body: calculate i from IV:
// ... start of ordered region ...
// ... loop body ...
// End of body: store into a[i]:
// ... end of ordered region ...
#pragma omp ordered
a[i] = b[i] * c[i] * d[i];
}
}
void dynamic1(float *a, float *b, float *c, float *d) {
#pragma omp for schedule(dynamic) ordered
// Loop header
for (unsigned long long i = 131071; i < 2147483647; i += 127) {
// Start of body: calculate i from IV:
// ... start of ordered region ...
// ... loop body ...
// End of body: store into a[i]:
// ... end of ordered region ...
#pragma omp ordered threads
a[i] = b[i] * c[i] * d[i];
// ... end iteration for ordered loop ...
}
}
void test_auto(float *a, float *b, float *c, float *d) {
unsigned int x = 0;
unsigned int y = 0;
#pragma omp for schedule(auto) collapse(2) ordered
// Loop header
// FIXME: When the iteration count of some nested loop is not a known constant,
// we should pre-calculate it, like we do for the total number of iterations!
for (char i = static_cast<char>(y); i <= '9'; ++i)
for (x = 11; x > 0; --x) {
// Start of body: indices are calculated from IV:
// ... start of ordered region ...
// ... loop body ...
// End of body: store into a[i]:
// ... end of ordered region ...
#pragma omp ordered
a[i] = b[i] * c[i] * d[i];
// ... end iteration for ordered loop ...
}
}
void runtime(float *a, float *b, float *c, float *d) {
int x = 0;
#pragma omp for collapse(2) schedule(runtime) ordered
// Loop header
for (unsigned char i = '0' ; i <= '9'; ++i)
for (x = -10; x < 10; ++x) {
// Start of body: indices are calculated from IV:
// ... start of ordered region ...
// ... loop body ...
// End of body: store into a[i]:
// ... end of ordered region ...
#pragma omp ordered threads
a[i] = b[i] * c[i] * d[i];
// ... end iteration for ordered loop ...
}
}
float f[10];
void foo_simd(int low, int up) {
#pragma omp simd
for (int i = low; i < up; ++i) {
f[i] = 0.0;
#pragma omp ordered simd
f[i] = 1.0;
}
#pragma omp for simd ordered
for (int i = low; i < up; ++i) {
f[i] = 0.0;
#pragma omp ordered simd
f[i] = 1.0;
}
}
#endif // HEADER
// CHECK1-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
// CHECK1-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NORMAL-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK1-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK1-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK1-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK1-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 4571423, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP0]], i32 66, i32 0, i32 4571423, i32 1, i32 1)
// CHECK1-NORMAL-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 66, i32 0, i32 4571423, i32 1, i32 1)
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK1: omp.dispatch.cond:
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK1: omp.dispatch.body:
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP3]], [[TMP4]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP5]], 7
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
// CHECK1-NEXT: store i32 [[SUB]], i32* [[I]], align 4
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-NEXT: call void @__kmpc_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: [[TMP6:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP7]] to i64
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP6]], i64 [[IDXPROM]]
// CHECK1-NEXT: [[TMP8:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[IDXPROM1:%.*]] = sext i32 [[TMP10]] to i64
// CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP9]], i64 [[IDXPROM1]]
// CHECK1-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX2]], align 4
// CHECK1-NEXT: [[MUL3:%.*]] = fmul float [[TMP8]], [[TMP11]]
// CHECK1-NEXT: [[TMP12:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP13]] to i64
// CHECK1-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[IDXPROM4]]
// CHECK1-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX5]], align 4
// CHECK1-NEXT: [[MUL6:%.*]] = fmul float [[MUL3]], [[TMP14]]
// CHECK1-NEXT: [[TMP15:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
// CHECK1-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP16]] to i64
// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDXPROM7]]
// CHECK1-NEXT: store float [[MUL6]], float* [[ARRAYIDX8]], align 4
// CHECK1-NEXT: call void @__kmpc_end_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP17]], 1
// CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
// CHECK1-NEXT: call void @__kmpc_dispatch_fini_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK1: omp.dispatch.inc:
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK1: omp.dispatch.end:
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP0]])
// CHECK1-NORMAL-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z8dynamic1PfS_S_S_
// CHECK1-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i64, align 8
// CHECK1-NORMAL-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK1-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK1-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK1-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK1-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
// CHECK1-NEXT: store i64 16908287, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB6:[0-9]+]])
// CHECK1-NEXT: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1073741891, i64 0, i64 16908287, i64 1, i64 1)
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK1: omp.dispatch.cond:
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB6]])
// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_dispatch_next_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK1: omp.dispatch.body:
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
// CHECK1-NEXT: store i64 [[TMP2]], i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[ADD:%.*]] = add i64 [[TMP4]], 1
// CHECK1-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP3]], [[ADD]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[MUL:%.*]] = mul i64 [[TMP5]], 127
// CHECK1-NEXT: [[ADD1:%.*]] = add i64 131071, [[MUL]]
// CHECK1-NEXT: store i64 [[ADD1]], i64* [[I]], align 8
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-NEXT: call void @__kmpc_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: [[TMP6:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = load i64, i64* [[I]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP6]], i64 [[TMP7]]
// CHECK1-NEXT: [[TMP8:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = load i64, i64* [[I]], align 8
// CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP9]], i64 [[TMP10]]
// CHECK1-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX2]], align 4
// CHECK1-NEXT: [[MUL3:%.*]] = fmul float [[TMP8]], [[TMP11]]
// CHECK1-NEXT: [[TMP12:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = load i64, i64* [[I]], align 8
// CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[TMP13]]
// CHECK1-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX4]], align 4
// CHECK1-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP14]]
// CHECK1-NEXT: [[TMP15:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = load i64, i64* [[I]], align 8
// CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[TMP16]]
// CHECK1-NEXT: store float [[MUL5]], float* [[ARRAYIDX6]], align 4
// CHECK1-NEXT: call void @__kmpc_end_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[ADD7:%.*]] = add i64 [[TMP17]], 1
// CHECK1-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB6]])
// CHECK1-NEXT: call void @__kmpc_dispatch_fini_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK1: omp.dispatch.inc:
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK1: omp.dispatch.end:
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP0]])
// CHECK1-NORMAL-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z9test_autoPfS_S_S_
// CHECK1-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[Y:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[I:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[X6:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I8:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[X9:%.*]] = alloca i32, align 4
// CHECK1-NORMAL-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK1-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK1-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK1-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[X]], align 4
// CHECK1-NEXT: store i32 0, i32* [[Y]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[Y]], align 4
// CHECK1-NEXT: [[CONV:%.*]] = trunc i32 [[TMP1]] to i8
// CHECK1-NEXT: store i8 [[CONV]], i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK1-NEXT: [[TMP2:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK1-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
// CHECK1-NEXT: [[SUB:%.*]] = sub i32 57, [[CONV3]]
// CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB]], 1
// CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK1-NEXT: [[CONV4:%.*]] = zext i32 [[DIV]] to i64
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV4]], 11
// CHECK1-NEXT: [[SUB5:%.*]] = sub nsw i64 [[MUL]], 1
// CHECK1-NEXT: store i64 [[SUB5]], i64* [[DOTCAPTURE_EXPR_2]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK1-NEXT: store i8 [[TMP3]], i8* [[I]], align 1
// CHECK1-NEXT: store i32 11, i32* [[X6]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK1-NEXT: [[CONV7:%.*]] = sext i8 [[TMP4]] to i32
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV7]], 57
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK1: omp.precond.then:
// CHECK1-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
// CHECK1-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB8:[0-9]+]])
// CHECK1-NEXT: call void @__kmpc_dispatch_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1073741894, i64 0, i64 [[TMP6]], i64 1, i64 1)
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK1: omp.dispatch.cond:
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB8]])
// CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK1: omp.dispatch.body:
// CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
// CHECK1-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK1-NEXT: [[CMP10:%.*]] = icmp sle i64 [[TMP9]], [[TMP10]]
// CHECK1-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP11:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK1-NEXT: [[CONV11:%.*]] = sext i8 [[TMP11]] to i64
// CHECK1-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[DIV12:%.*]] = sdiv i64 [[TMP12]], 11
// CHECK1-NEXT: [[MUL13:%.*]] = mul nsw i64 [[DIV12]], 1
// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i64 [[CONV11]], [[MUL13]]
// CHECK1-NEXT: [[CONV15:%.*]] = trunc i64 [[ADD14]] to i8
// CHECK1-NEXT: store i8 [[CONV15]], i8* [[I8]], align 1
// CHECK1-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[DIV16:%.*]] = sdiv i64 [[TMP14]], 11
// CHECK1-NEXT: [[MUL17:%.*]] = mul nsw i64 [[DIV16]], 11
// CHECK1-NEXT: [[SUB18:%.*]] = sub nsw i64 [[TMP13]], [[MUL17]]
// CHECK1-NEXT: [[MUL19:%.*]] = mul nsw i64 [[SUB18]], 1
// CHECK1-NEXT: [[SUB20:%.*]] = sub nsw i64 11, [[MUL19]]
// CHECK1-NEXT: [[CONV21:%.*]] = trunc i64 [[SUB20]] to i32
// CHECK1-NEXT: store i32 [[CONV21]], i32* [[X9]], align 4
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-NEXT: call void @__kmpc_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: [[TMP15:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = load i8, i8* [[I8]], align 1
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i8 [[TMP16]] to i64
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDXPROM]]
// CHECK1-NEXT: [[TMP17:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[TMP18:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = load i8, i8* [[I8]], align 1
// CHECK1-NEXT: [[IDXPROM22:%.*]] = sext i8 [[TMP19]] to i64
// CHECK1-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds float, float* [[TMP18]], i64 [[IDXPROM22]]
// CHECK1-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX23]], align 4
// CHECK1-NEXT: [[MUL24:%.*]] = fmul float [[TMP17]], [[TMP20]]
// CHECK1-NEXT: [[TMP21:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK1-NEXT: [[TMP22:%.*]] = load i8, i8* [[I8]], align 1
// CHECK1-NEXT: [[IDXPROM25:%.*]] = sext i8 [[TMP22]] to i64
// CHECK1-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds float, float* [[TMP21]], i64 [[IDXPROM25]]
// CHECK1-NEXT: [[TMP23:%.*]] = load float, float* [[ARRAYIDX26]], align 4
// CHECK1-NEXT: [[MUL27:%.*]] = fmul float [[MUL24]], [[TMP23]]
// CHECK1-NEXT: [[TMP24:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = load i8, i8* [[I8]], align 1
// CHECK1-NEXT: [[IDXPROM28:%.*]] = sext i8 [[TMP25]] to i64
// CHECK1-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds float, float* [[TMP24]], i64 [[IDXPROM28]]
// CHECK1-NEXT: store float [[MUL27]], float* [[ARRAYIDX29]], align 4
// CHECK1-NEXT: call void @__kmpc_end_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK1-NEXT: [[ADD30:%.*]] = add nsw i64 [[TMP26]], 1
// CHECK1-NEXT: store i64 [[ADD30]], i64* [[DOTOMP_IV]], align 8
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB8]])
// CHECK1-NEXT: call void @__kmpc_dispatch_fini_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK1: omp.dispatch.inc:
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK1: omp.dispatch.end:
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
// CHECK1: omp.precond.end:
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP0]])
// CHECK1-NORMAL-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z7runtimePfS_S_S_
// CHECK1-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK1-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i8, align 1
// CHECK1-NEXT: [[X2:%.*]] = alloca i32, align 4
// CHECK1-NORMAL-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK1-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK1-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK1-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[X]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 199, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB10:[0-9]+]])
// CHECK1-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1073741893, i32 0, i32 199, i32 1, i32 1)
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK1: omp.dispatch.cond:
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB10]])
// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK1: omp.dispatch.body:
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP3]], [[TMP4]]
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP5]], 20
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 48, [[MUL]]
// CHECK1-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i8
// CHECK1-NEXT: store i8 [[CONV]], i8* [[I]], align 1
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP7]], 20
// CHECK1-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 20
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], [[MUL4]]
// CHECK1-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1
// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 -10, [[MUL5]]
// CHECK1-NEXT: store i32 [[ADD6]], i32* [[X2]], align 4
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-NEXT: call void @__kmpc_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: [[TMP8:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = load i8, i8* [[I]], align 1
// CHECK1-NEXT: [[IDXPROM:%.*]] = zext i8 [[TMP9]] to i64
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP8]], i64 [[IDXPROM]]
// CHECK1-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = load i8, i8* [[I]], align 1
// CHECK1-NEXT: [[IDXPROM7:%.*]] = zext i8 [[TMP12]] to i64
// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[TMP11]], i64 [[IDXPROM7]]
// CHECK1-NEXT: [[TMP13:%.*]] = load float, float* [[ARRAYIDX8]], align 4
// CHECK1-NEXT: [[MUL9:%.*]] = fmul float [[TMP10]], [[TMP13]]
// CHECK1-NEXT: [[TMP14:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = load i8, i8* [[I]], align 1
// CHECK1-NEXT: [[IDXPROM10:%.*]] = zext i8 [[TMP15]] to i64
// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds float, float* [[TMP14]], i64 [[IDXPROM10]]
// CHECK1-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX11]], align 4
// CHECK1-NEXT: [[MUL12:%.*]] = fmul float [[MUL9]], [[TMP16]]
// CHECK1-NEXT: [[TMP17:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP18:%.*]] = load i8, i8* [[I]], align 1
// CHECK1-NEXT: [[IDXPROM13:%.*]] = zext i8 [[TMP18]] to i64
// CHECK1-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds float, float* [[TMP17]], i64 [[IDXPROM13]]
// CHECK1-NEXT: store float [[MUL12]], float* [[ARRAYIDX14]], align 4
// CHECK1-NEXT: call void @__kmpc_end_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK1-NEXT: store i32 [[ADD15]], i32* [[DOTOMP_IV]], align 4
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB10]])
// CHECK1-NEXT: call void @__kmpc_dispatch_fini_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK1: omp.dispatch.inc:
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK1: omp.dispatch.end:
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP0]])
// CHECK1-NORMAL-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z8foo_simdii
// CHECK1-SAME: (i32 [[LOW:%.*]], i32 [[UP:%.*]]) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[LOW_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[UP_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IV16:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[_TMP17:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_20:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I26:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I28:%.*]] = alloca i32, align 4
// CHECK1-NORMAL-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-NEXT: store i32 [[LOW]], i32* [[LOW_ADDR]], align 4
// CHECK1-NEXT: store i32 [[UP]], i32* [[UP_ADDR]], align 4
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[LOW_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[UP_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
// CHECK1-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1
// CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1
// CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK1-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1
// CHECK1-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[I]], align 4
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
// CHECK1-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK1: simd.if.then:
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IV]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK1: omp.inner.for.cond:
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !3
// CHECK1-NEXT: [[ADD6:%.*]] = add i32 [[TMP9]], 1
// CHECK1-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TMP8]], [[ADD6]]
// CHECK1-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK1: omp.inner.for.body:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !3
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP11]], 1
// CHECK1-NEXT: [[ADD8:%.*]] = add i32 [[TMP10]], [[MUL]]
// CHECK1-NEXT: store i32 [[ADD8]], i32* [[I5]], align 4, !llvm.access.group !3
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !3
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM]]
// CHECK1-NEXT: store float 0.000000e+00, float* [[ARRAYIDX]], align 4, !llvm.access.group !3
// CHECK1-NEXT: call void @__captured_stmt(i32* [[I5]]), !llvm.access.group !3
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK1: omp.body.continue:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK1: omp.inner.for.inc:
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK1-NEXT: [[ADD9:%.*]] = add i32 [[TMP13]], 1
// CHECK1-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
// CHECK1: omp.inner.for.end:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK1-NEXT: [[SUB10:%.*]] = sub i32 [[TMP15]], [[TMP16]]
// CHECK1-NEXT: [[SUB11:%.*]] = sub i32 [[SUB10]], 1
// CHECK1-NEXT: [[ADD12:%.*]] = add i32 [[SUB11]], 1
// CHECK1-NEXT: [[DIV13:%.*]] = udiv i32 [[ADD12]], 1
// CHECK1-NEXT: [[MUL14:%.*]] = mul i32 [[DIV13]], 1
// CHECK1-NEXT: [[ADD15:%.*]] = add i32 [[TMP14]], [[MUL14]]
// CHECK1-NEXT: store i32 [[ADD15]], i32* [[I5]], align 4
// CHECK1-NEXT: br label [[SIMD_IF_END]]
// CHECK1: simd.if.end:
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[LOW_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP17]], i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[UP_ADDR]], align 4
// CHECK1-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK1-NEXT: [[SUB21:%.*]] = sub i32 [[TMP19]], [[TMP20]]
// CHECK1-NEXT: [[SUB22:%.*]] = sub i32 [[SUB21]], 1
// CHECK1-NEXT: [[ADD23:%.*]] = add i32 [[SUB22]], 1
// CHECK1-NEXT: [[DIV24:%.*]] = udiv i32 [[ADD23]], 1
// CHECK1-NEXT: [[SUB25:%.*]] = sub i32 [[DIV24]], 1
// CHECK1-NEXT: store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_20]], align 4
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK1-NEXT: store i32 [[TMP21]], i32* [[I26]], align 4
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK1-NEXT: [[CMP27:%.*]] = icmp slt i32 [[TMP22]], [[TMP23]]
// CHECK1-NEXT: br i1 [[CMP27]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK1: omp.precond.then:
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_20]], align 4
// CHECK1-NEXT: store i32 [[TMP24]], i32* [[DOTOMP_UB]], align 4
// CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_20]], align 4
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB12:[0-9]+]])
// CHECK1-NEXT: call void @__kmpc_dispatch_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 66, i32 0, i32 [[TMP25]], i32 1, i32 1)
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK1: omp.dispatch.cond:
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB12]])
// CHECK1-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_dispatch_next_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP26]], 0
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK1: omp.dispatch.body:
// CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK1-NEXT: store i32 [[TMP27]], i32* [[DOTOMP_IV16]], align 4
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]]
// CHECK1-IRBUILDER: omp.inner.for.cond30:
// CHECK1-NORMAL: omp.inner.for.cond29:
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV16]], align 4, !llvm.access.group !7
// CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !7
// CHECK1-NEXT: [[ADD30:%.*]] = add i32 [[TMP29]], 1
// CHECK1-NEXT: [[CMP31:%.*]] = icmp ult i32 [[TMP28]], [[ADD30]]
// CHECK1-NEXT: br i1 [[CMP31]], label [[OMP_INNER_FOR_BODY32:%.*]], label [[OMP_INNER_FOR_END40:%.*]]
// CHECK1-IRBUILDER: omp.inner.for.body33:
// CHECK1-NORMAL: omp.inner.for.body32:
// CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4, !llvm.access.group !7
// CHECK1-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV16]], align 4, !llvm.access.group !7
// CHECK1-NEXT: [[MUL33:%.*]] = mul i32 [[TMP31]], 1
// CHECK1-NEXT: [[ADD34:%.*]] = add i32 [[TMP30]], [[MUL33]]
// CHECK1-NEXT: store i32 [[ADD34]], i32* [[I28]], align 4, !llvm.access.group !7
// CHECK1-NEXT: [[TMP32:%.*]] = load i32, i32* [[I28]], align 4, !llvm.access.group !7
// CHECK1-NEXT: [[IDXPROM35:%.*]] = sext i32 [[TMP32]] to i64
// CHECK1-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM35]]
// CHECK1-NEXT: store float 0.000000e+00, float* [[ARRAYIDX36]], align 4, !llvm.access.group !7
// CHECK1-NEXT: call void @__captured_stmt.1(i32* [[I28]]), !llvm.access.group !7
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE37:%.*]]
// CHECK1-IRBUILDER: omp.body.continue38:
// CHECK1-NORMAL: omp.body.continue37:
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC38:%.*]]
// CHECK1-IRBUILDER: omp.inner.for.inc39:
// CHECK1-NORMAL: omp.inner.for.inc38:
// CHECK1-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV16]], align 4, !llvm.access.group !7
// CHECK1-NEXT: [[ADD39:%.*]] = add i32 [[TMP33]], 1
// CHECK1-NEXT: store i32 [[ADD39]], i32* [[DOTOMP_IV16]], align 4, !llvm.access.group !7
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB12]])
// CHECK1-NEXT: call void @__kmpc_dispatch_fini_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]]), !llvm.access.group !7
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP8:![0-9]+]]
// CHECK1-IRBUILDER: omp.inner.for.end42:
// CHECK1-NORMAL: omp.inner.for.end40:
// CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK1: omp.dispatch.inc:
// CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK1: omp.dispatch.end:
// CHECK1-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK1-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
// CHECK1-NEXT: br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK1: .omp.final.then:
// CHECK1-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK1-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK1-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK1-NEXT: [[SUB41:%.*]] = sub i32 [[TMP37]], [[TMP38]]
// CHECK1-NEXT: [[SUB42:%.*]] = sub i32 [[SUB41]], 1
// CHECK1-NEXT: [[ADD43:%.*]] = add i32 [[SUB42]], 1
// CHECK1-NEXT: [[DIV44:%.*]] = udiv i32 [[ADD43]], 1
// CHECK1-NEXT: [[MUL45:%.*]] = mul i32 [[DIV44]], 1
// CHECK1-NEXT: [[ADD46:%.*]] = add i32 [[TMP36]], [[MUL45]]
// CHECK1-NEXT: store i32 [[ADD46]], i32* [[I28]], align 4
// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK1: .omp.final.done:
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
// CHECK1: omp.precond.end:
// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-IRBUILDER-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP0]])
// CHECK1-NORMAL-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@__captured_stmt
// CHECK1-SAME: (i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM]]
// CHECK1-NEXT: store float 1.000000e+00, float* [[ARRAYIDX]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@__captured_stmt.1
// CHECK1-SAME: (i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR3]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM]]
// CHECK1-NEXT: store float 1.000000e+00, float* [[ARRAYIDX]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
// CHECK2-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK2-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK2-NORMAL-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK2-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK2-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK2-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK2-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 4571423, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
// CHECK2-IRBUILDER-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP0]], i32 66, i32 0, i32 4571423, i32 1, i32 1)
// CHECK2-NORMAL-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 66, i32 0, i32 4571423, i32 1, i32 1)
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK2: omp.dispatch.cond:
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
// CHECK2-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK2: omp.dispatch.body:
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK2: omp.inner.for.cond:
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP3]], [[TMP4]]
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK2: omp.inner.for.body:
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP5]], 7
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
// CHECK2-NEXT: store i32 [[SUB]], i32* [[I]], align 4
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK2-NEXT: call void @__kmpc_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK2-NEXT: [[TMP6:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[I]], align 4
// CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP7]] to i64
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP6]], i64 [[IDXPROM]]
// CHECK2-NEXT: [[TMP8:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[I]], align 4
// CHECK2-NEXT: [[IDXPROM1:%.*]] = sext i32 [[TMP10]] to i64
// CHECK2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP9]], i64 [[IDXPROM1]]
// CHECK2-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX2]], align 4
// CHECK2-NEXT: [[MUL3:%.*]] = fmul float [[TMP8]], [[TMP11]]
// CHECK2-NEXT: [[TMP12:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
// CHECK2-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP13]] to i64
// CHECK2-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[IDXPROM4]]
// CHECK2-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX5]], align 4
// CHECK2-NEXT: [[MUL6:%.*]] = fmul float [[MUL3]], [[TMP14]]
// CHECK2-NEXT: [[TMP15:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
// CHECK2-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP16]] to i64
// CHECK2-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDXPROM7]]
// CHECK2-NEXT: store float [[MUL6]], float* [[ARRAYIDX8]], align 4
// CHECK2-NEXT: call void @__kmpc_end_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK2: omp.body.continue:
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK2: omp.inner.for.inc:
// CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP17]], 1
// CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
// CHECK2-NEXT: call void @__kmpc_dispatch_fini_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK2: omp.inner.for.end:
// CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK2: omp.dispatch.inc:
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK2: omp.dispatch.end:
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK2-IRBUILDER-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP0]])
// CHECK2-NORMAL-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP0]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_Z8dynamic1PfS_S_S_
// CHECK2-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK2-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[TMP:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I:%.*]] = alloca i64, align 8
// CHECK2-NORMAL-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK2-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK2-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK2-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK2-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK2-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
// CHECK2-NEXT: store i64 16908287, i64* [[DOTOMP_UB]], align 8
// CHECK2-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB6:[0-9]+]])
// CHECK2-NEXT: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1073741891, i64 0, i64 16908287, i64 1, i64 1)
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK2: omp.dispatch.cond:
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB6]])
// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_dispatch_next_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
// CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
// CHECK2-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK2: omp.dispatch.body:
// CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
// CHECK2-NEXT: store i64 [[TMP2]], i64* [[DOTOMP_IV]], align 8
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK2: omp.inner.for.cond:
// CHECK2-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK2-NEXT: [[ADD:%.*]] = add i64 [[TMP4]], 1
// CHECK2-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP3]], [[ADD]]
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK2: omp.inner.for.body:
// CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[MUL:%.*]] = mul i64 [[TMP5]], 127
// CHECK2-NEXT: [[ADD1:%.*]] = add i64 131071, [[MUL]]
// CHECK2-NEXT: store i64 [[ADD1]], i64* [[I]], align 8
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK2-NEXT: call void @__kmpc_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK2-NEXT: [[TMP6:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK2-NEXT: [[TMP7:%.*]] = load i64, i64* [[I]], align 8
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP6]], i64 [[TMP7]]
// CHECK2-NEXT: [[TMP8:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK2-NEXT: [[TMP9:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK2-NEXT: [[TMP10:%.*]] = load i64, i64* [[I]], align 8
// CHECK2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP9]], i64 [[TMP10]]
// CHECK2-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX2]], align 4
// CHECK2-NEXT: [[MUL3:%.*]] = fmul float [[TMP8]], [[TMP11]]
// CHECK2-NEXT: [[TMP12:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK2-NEXT: [[TMP13:%.*]] = load i64, i64* [[I]], align 8
// CHECK2-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[TMP13]]
// CHECK2-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX4]], align 4
// CHECK2-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP14]]
// CHECK2-NEXT: [[TMP15:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK2-NEXT: [[TMP16:%.*]] = load i64, i64* [[I]], align 8
// CHECK2-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[TMP16]]
// CHECK2-NEXT: store float [[MUL5]], float* [[ARRAYIDX6]], align 4
// CHECK2-NEXT: call void @__kmpc_end_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK2: omp.body.continue:
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK2: omp.inner.for.inc:
// CHECK2-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[ADD7:%.*]] = add i64 [[TMP17]], 1
// CHECK2-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB6]])
// CHECK2-NEXT: call void @__kmpc_dispatch_fini_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK2: omp.inner.for.end:
// CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK2: omp.dispatch.inc:
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK2: omp.dispatch.end:
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK2-IRBUILDER-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP0]])
// CHECK2-NORMAL-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_Z9test_autoPfS_S_S_
// CHECK2-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK2-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK2-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[Y:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK2-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[I:%.*]] = alloca i8, align 1
// CHECK2-NEXT: [[X6:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I8:%.*]] = alloca i8, align 1
// CHECK2-NEXT: [[X9:%.*]] = alloca i32, align 4
// CHECK2-NORMAL-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK2-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK2-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK2-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK2-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK2-NEXT: store i32 0, i32* [[X]], align 4
// CHECK2-NEXT: store i32 0, i32* [[Y]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[Y]], align 4
// CHECK2-NEXT: [[CONV:%.*]] = trunc i32 [[TMP1]] to i8
// CHECK2-NEXT: store i8 [[CONV]], i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK2-NEXT: [[TMP2:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK2-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
// CHECK2-NEXT: [[SUB:%.*]] = sub i32 57, [[CONV3]]
// CHECK2-NEXT: [[ADD:%.*]] = add i32 [[SUB]], 1
// CHECK2-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK2-NEXT: [[CONV4:%.*]] = zext i32 [[DIV]] to i64
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV4]], 11
// CHECK2-NEXT: [[SUB5:%.*]] = sub nsw i64 [[MUL]], 1
// CHECK2-NEXT: store i64 [[SUB5]], i64* [[DOTCAPTURE_EXPR_2]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK2-NEXT: store i8 [[TMP3]], i8* [[I]], align 1
// CHECK2-NEXT: store i32 11, i32* [[X6]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK2-NEXT: [[CONV7:%.*]] = sext i8 [[TMP4]] to i32
// CHECK2-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV7]], 57
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK2: omp.precond.then:
// CHECK2-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
// CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
// CHECK2-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_UB]], align 8
// CHECK2-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK2-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB8:[0-9]+]])
// CHECK2-NEXT: call void @__kmpc_dispatch_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1073741894, i64 0, i64 [[TMP6]], i64 1, i64 1)
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK2: omp.dispatch.cond:
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB8]])
// CHECK2-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
// CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK2-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK2: omp.dispatch.body:
// CHECK2-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
// CHECK2-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK2: omp.inner.for.cond:
// CHECK2-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK2-NEXT: [[CMP10:%.*]] = icmp sle i64 [[TMP9]], [[TMP10]]
// CHECK2-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK2: omp.inner.for.body:
// CHECK2-NEXT: [[TMP11:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK2-NEXT: [[CONV11:%.*]] = sext i8 [[TMP11]] to i64
// CHECK2-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[DIV12:%.*]] = sdiv i64 [[TMP12]], 11
// CHECK2-NEXT: [[MUL13:%.*]] = mul nsw i64 [[DIV12]], 1
// CHECK2-NEXT: [[ADD14:%.*]] = add nsw i64 [[CONV11]], [[MUL13]]
// CHECK2-NEXT: [[CONV15:%.*]] = trunc i64 [[ADD14]] to i8
// CHECK2-NEXT: store i8 [[CONV15]], i8* [[I8]], align 1
// CHECK2-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[DIV16:%.*]] = sdiv i64 [[TMP14]], 11
// CHECK2-NEXT: [[MUL17:%.*]] = mul nsw i64 [[DIV16]], 11
// CHECK2-NEXT: [[SUB18:%.*]] = sub nsw i64 [[TMP13]], [[MUL17]]
// CHECK2-NEXT: [[MUL19:%.*]] = mul nsw i64 [[SUB18]], 1
// CHECK2-NEXT: [[SUB20:%.*]] = sub nsw i64 11, [[MUL19]]
// CHECK2-NEXT: [[CONV21:%.*]] = trunc i64 [[SUB20]] to i32
// CHECK2-NEXT: store i32 [[CONV21]], i32* [[X9]], align 4
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK2-NEXT: call void @__kmpc_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK2-NEXT: [[TMP15:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK2-NEXT: [[TMP16:%.*]] = load i8, i8* [[I8]], align 1
// CHECK2-NEXT: [[IDXPROM:%.*]] = sext i8 [[TMP16]] to i64
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDXPROM]]
// CHECK2-NEXT: [[TMP17:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK2-NEXT: [[TMP18:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK2-NEXT: [[TMP19:%.*]] = load i8, i8* [[I8]], align 1
// CHECK2-NEXT: [[IDXPROM22:%.*]] = sext i8 [[TMP19]] to i64
// CHECK2-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds float, float* [[TMP18]], i64 [[IDXPROM22]]
// CHECK2-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX23]], align 4
// CHECK2-NEXT: [[MUL24:%.*]] = fmul float [[TMP17]], [[TMP20]]
// CHECK2-NEXT: [[TMP21:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK2-NEXT: [[TMP22:%.*]] = load i8, i8* [[I8]], align 1
// CHECK2-NEXT: [[IDXPROM25:%.*]] = sext i8 [[TMP22]] to i64
// CHECK2-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds float, float* [[TMP21]], i64 [[IDXPROM25]]
// CHECK2-NEXT: [[TMP23:%.*]] = load float, float* [[ARRAYIDX26]], align 4
// CHECK2-NEXT: [[MUL27:%.*]] = fmul float [[MUL24]], [[TMP23]]
// CHECK2-NEXT: [[TMP24:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK2-NEXT: [[TMP25:%.*]] = load i8, i8* [[I8]], align 1
// CHECK2-NEXT: [[IDXPROM28:%.*]] = sext i8 [[TMP25]] to i64
// CHECK2-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds float, float* [[TMP24]], i64 [[IDXPROM28]]
// CHECK2-NEXT: store float [[MUL27]], float* [[ARRAYIDX29]], align 4
// CHECK2-NEXT: call void @__kmpc_end_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK2: omp.body.continue:
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK2: omp.inner.for.inc:
// CHECK2-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK2-NEXT: [[ADD30:%.*]] = add nsw i64 [[TMP26]], 1
// CHECK2-NEXT: store i64 [[ADD30]], i64* [[DOTOMP_IV]], align 8
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB8]])
// CHECK2-NEXT: call void @__kmpc_dispatch_fini_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK2: omp.inner.for.end:
// CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK2: omp.dispatch.inc:
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK2: omp.dispatch.end:
// CHECK2-NEXT: br label [[OMP_PRECOND_END]]
// CHECK2: omp.precond.end:
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK2-IRBUILDER-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP0]])
// CHECK2-NORMAL-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_Z7runtimePfS_S_S_
// CHECK2-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK2-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK2-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK2-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I:%.*]] = alloca i8, align 1
// CHECK2-NEXT: [[X2:%.*]] = alloca i32, align 4
// CHECK2-NORMAL-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK2-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK2-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK2-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK2-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK2-NEXT: store i32 0, i32* [[X]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 199, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB10:[0-9]+]])
// CHECK2-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1073741893, i32 0, i32 199, i32 1, i32 1)
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK2: omp.dispatch.cond:
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB10]])
// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
// CHECK2-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK2: omp.dispatch.body:
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK2: omp.inner.for.cond:
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP3]], [[TMP4]]
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK2: omp.inner.for.body:
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP5]], 20
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 48, [[MUL]]
// CHECK2-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i8
// CHECK2-NEXT: store i8 [[CONV]], i8* [[I]], align 1
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP7]], 20
// CHECK2-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 20
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], [[MUL4]]
// CHECK2-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1
// CHECK2-NEXT: [[ADD6:%.*]] = add nsw i32 -10, [[MUL5]]
// CHECK2-NEXT: store i32 [[ADD6]], i32* [[X2]], align 4
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK2-NEXT: call void @__kmpc_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK2-NEXT: [[TMP8:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK2-NEXT: [[TMP9:%.*]] = load i8, i8* [[I]], align 1
// CHECK2-NEXT: [[IDXPROM:%.*]] = zext i8 [[TMP9]] to i64
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP8]], i64 [[IDXPROM]]
// CHECK2-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK2-NEXT: [[TMP11:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK2-NEXT: [[TMP12:%.*]] = load i8, i8* [[I]], align 1
// CHECK2-NEXT: [[IDXPROM7:%.*]] = zext i8 [[TMP12]] to i64
// CHECK2-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[TMP11]], i64 [[IDXPROM7]]
// CHECK2-NEXT: [[TMP13:%.*]] = load float, float* [[ARRAYIDX8]], align 4
// CHECK2-NEXT: [[MUL9:%.*]] = fmul float [[TMP10]], [[TMP13]]
// CHECK2-NEXT: [[TMP14:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK2-NEXT: [[TMP15:%.*]] = load i8, i8* [[I]], align 1
// CHECK2-NEXT: [[IDXPROM10:%.*]] = zext i8 [[TMP15]] to i64
// CHECK2-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds float, float* [[TMP14]], i64 [[IDXPROM10]]
// CHECK2-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX11]], align 4
// CHECK2-NEXT: [[MUL12:%.*]] = fmul float [[MUL9]], [[TMP16]]
// CHECK2-NEXT: [[TMP17:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK2-NEXT: [[TMP18:%.*]] = load i8, i8* [[I]], align 1
// CHECK2-NEXT: [[IDXPROM13:%.*]] = zext i8 [[TMP18]] to i64
// CHECK2-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds float, float* [[TMP17]], i64 [[IDXPROM13]]
// CHECK2-NEXT: store float [[MUL12]], float* [[ARRAYIDX14]], align 4
// CHECK2-NEXT: call void @__kmpc_end_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK2: omp.body.continue:
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK2: omp.inner.for.inc:
// CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK2-NEXT: store i32 [[ADD15]], i32* [[DOTOMP_IV]], align 4
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB10]])
// CHECK2-NEXT: call void @__kmpc_dispatch_fini_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK2: omp.inner.for.end:
// CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK2: omp.dispatch.inc:
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK2: omp.dispatch.end:
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK2-IRBUILDER-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP0]])
// CHECK2-NORMAL-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@_Z8foo_simdii
// CHECK2-SAME: (i32 [[LOW:%.*]], i32 [[UP:%.*]]) #[[ATTR0]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[LOW_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[UP_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IV16:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[_TMP17:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_20:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I26:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I28:%.*]] = alloca i32, align 4
// CHECK2-NORMAL-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK2-NEXT: store i32 [[LOW]], i32* [[LOW_ADDR]], align 4
// CHECK2-NEXT: store i32 [[UP]], i32* [[UP_ADDR]], align 4
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[LOW_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[UP_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK2-NEXT: [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
// CHECK2-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1
// CHECK2-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1
// CHECK2-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK2-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1
// CHECK2-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK2-NEXT: store i32 [[TMP5]], i32* [[I]], align 4
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
// CHECK2-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK2: simd.if.then:
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IV]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK2: omp.inner.for.cond:
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !3
// CHECK2-NEXT: [[ADD6:%.*]] = add i32 [[TMP9]], 1
// CHECK2-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TMP8]], [[ADD6]]
// CHECK2-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK2: omp.inner.for.body:
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !3
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK2-NEXT: [[MUL:%.*]] = mul i32 [[TMP11]], 1
// CHECK2-NEXT: [[ADD8:%.*]] = add i32 [[TMP10]], [[MUL]]
// CHECK2-NEXT: store i32 [[ADD8]], i32* [[I5]], align 4, !llvm.access.group !3
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !3
// CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM]]
// CHECK2-NEXT: store float 0.000000e+00, float* [[ARRAYIDX]], align 4, !llvm.access.group !3
// CHECK2-NEXT: call void @__captured_stmt(i32* [[I5]]), !llvm.access.group !3
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK2: omp.body.continue:
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK2: omp.inner.for.inc:
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK2-NEXT: [[ADD9:%.*]] = add i32 [[TMP13]], 1
// CHECK2-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
// CHECK2: omp.inner.for.end:
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK2-NEXT: [[SUB10:%.*]] = sub i32 [[TMP15]], [[TMP16]]
// CHECK2-NEXT: [[SUB11:%.*]] = sub i32 [[SUB10]], 1
// CHECK2-NEXT: [[ADD12:%.*]] = add i32 [[SUB11]], 1
// CHECK2-NEXT: [[DIV13:%.*]] = udiv i32 [[ADD12]], 1
// CHECK2-NEXT: [[MUL14:%.*]] = mul i32 [[DIV13]], 1
// CHECK2-NEXT: [[ADD15:%.*]] = add i32 [[TMP14]], [[MUL14]]
// CHECK2-NEXT: store i32 [[ADD15]], i32* [[I5]], align 4
// CHECK2-NEXT: br label [[SIMD_IF_END]]
// CHECK2: simd.if.end:
// CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[LOW_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP17]], i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[UP_ADDR]], align 4
// CHECK2-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK2-NEXT: [[SUB21:%.*]] = sub i32 [[TMP19]], [[TMP20]]
// CHECK2-NEXT: [[SUB22:%.*]] = sub i32 [[SUB21]], 1
// CHECK2-NEXT: [[ADD23:%.*]] = add i32 [[SUB22]], 1
// CHECK2-NEXT: [[DIV24:%.*]] = udiv i32 [[ADD23]], 1
// CHECK2-NEXT: [[SUB25:%.*]] = sub i32 [[DIV24]], 1
// CHECK2-NEXT: store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_20]], align 4
// CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK2-NEXT: store i32 [[TMP21]], i32* [[I26]], align 4
// CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK2-NEXT: [[CMP27:%.*]] = icmp slt i32 [[TMP22]], [[TMP23]]
// CHECK2-NEXT: br i1 [[CMP27]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK2: omp.precond.then:
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_20]], align 4
// CHECK2-NEXT: store i32 [[TMP24]], i32* [[DOTOMP_UB]], align 4
// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_20]], align 4
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB12:[0-9]+]])
// CHECK2-NEXT: call void @__kmpc_dispatch_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 66, i32 0, i32 [[TMP25]], i32 1, i32 1)
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK2: omp.dispatch.cond:
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB12]])
// CHECK2-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_dispatch_next_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP26]], 0
// CHECK2-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK2: omp.dispatch.body:
// CHECK2-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK2-NEXT: store i32 [[TMP27]], i32* [[DOTOMP_IV16]], align 4
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]]
// CHECK2-IRBUILDER: omp.inner.for.cond30:
// CHECK2-NORMAL: omp.inner.for.cond29:
// CHECK2-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV16]], align 4, !llvm.access.group !7
// CHECK2-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !7
// CHECK2-NEXT: [[ADD30:%.*]] = add i32 [[TMP29]], 1
// CHECK2-NEXT: [[CMP31:%.*]] = icmp ult i32 [[TMP28]], [[ADD30]]
// CHECK2-NEXT: br i1 [[CMP31]], label [[OMP_INNER_FOR_BODY32:%.*]], label [[OMP_INNER_FOR_END40:%.*]]
// CHECK2-IRBUILDER: omp.inner.for.body33:
// CHECK2-NORMAL: omp.inner.for.body32:
// CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4, !llvm.access.group !7
// CHECK2-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV16]], align 4, !llvm.access.group !7
// CHECK2-NEXT: [[MUL33:%.*]] = mul i32 [[TMP31]], 1
// CHECK2-NEXT: [[ADD34:%.*]] = add i32 [[TMP30]], [[MUL33]]
// CHECK2-NEXT: store i32 [[ADD34]], i32* [[I28]], align 4, !llvm.access.group !7
// CHECK2-NEXT: [[TMP32:%.*]] = load i32, i32* [[I28]], align 4, !llvm.access.group !7
// CHECK2-NEXT: [[IDXPROM35:%.*]] = sext i32 [[TMP32]] to i64
// CHECK2-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM35]]
// CHECK2-NEXT: store float 0.000000e+00, float* [[ARRAYIDX36]], align 4, !llvm.access.group !7
// CHECK2-NEXT: call void @__captured_stmt.1(i32* [[I28]]), !llvm.access.group !7
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE37:%.*]]
// CHECK2-IRBUILDER: omp.body.continue38:
// CHECK2-NORMAL: omp.body.continue37:
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC38:%.*]]
// CHECK2-IRBUILDER: omp.inner.for.inc39:
// CHECK2-NORMAL: omp.inner.for.inc38:
// CHECK2-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV16]], align 4, !llvm.access.group !7
// CHECK2-NEXT: [[ADD39:%.*]] = add i32 [[TMP33]], 1
// CHECK2-NEXT: store i32 [[ADD39]], i32* [[DOTOMP_IV16]], align 4, !llvm.access.group !7
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB12]])
// CHECK2-NEXT: call void @__kmpc_dispatch_fini_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]]), !llvm.access.group !7
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP8:![0-9]+]]
// CHECK2-IRBUILDER: omp.inner.for.end42:
// CHECK2-NORMAL: omp.inner.for.end40:
// CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK2: omp.dispatch.inc:
// CHECK2-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK2: omp.dispatch.end:
// CHECK2-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK2-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
// CHECK2-NEXT: br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK2: .omp.final.then:
// CHECK2-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK2-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK2-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK2-NEXT: [[SUB41:%.*]] = sub i32 [[TMP37]], [[TMP38]]
// CHECK2-NEXT: [[SUB42:%.*]] = sub i32 [[SUB41]], 1
// CHECK2-NEXT: [[ADD43:%.*]] = add i32 [[SUB42]], 1
// CHECK2-NEXT: [[DIV44:%.*]] = udiv i32 [[ADD43]], 1
// CHECK2-NEXT: [[MUL45:%.*]] = mul i32 [[DIV44]], 1
// CHECK2-NEXT: [[ADD46:%.*]] = add i32 [[TMP36]], [[MUL45]]
// CHECK2-NEXT: store i32 [[ADD46]], i32* [[I28]], align 4
// CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK2: .omp.final.done:
// CHECK2-NEXT: br label [[OMP_PRECOND_END]]
// CHECK2: omp.precond.end:
// CHECK2-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK2-IRBUILDER-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP0]])
// CHECK2-NORMAL-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@__captured_stmt
// CHECK2-SAME: (i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM]]
// CHECK2-NEXT: store float 1.000000e+00, float* [[ARRAYIDX]], align 4
// CHECK2-NEXT: ret void
//
//
// CHECK2-LABEL: define {{[^@]+}}@__captured_stmt.1
// CHECK2-SAME: (i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR3]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK2-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM]]
// CHECK2-NEXT: store float 1.000000e+00, float* [[ARRAYIDX]], align 4
// CHECK2-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
// CHECK3-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NORMAL-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK3-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK3-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK3-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK3-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 4571423, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP0]], i32 66, i32 0, i32 4571423, i32 1, i32 1)
// CHECK3-NORMAL-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 66, i32 0, i32 4571423, i32 1, i32 1)
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK3: omp.dispatch.cond:
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
// CHECK3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK3: omp.dispatch.body:
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP3]], [[TMP4]]
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP5]], 7
// CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
// CHECK3-NEXT: store i32 [[SUB]], i32* [[I]], align 4
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK3-NEXT: call void @__kmpc_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK3-NEXT: [[TMP6:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[I]], align 4
// CHECK3-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP7]] to i64
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP6]], i64 [[IDXPROM]]
// CHECK3-NEXT: [[TMP8:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[I]], align 4
// CHECK3-NEXT: [[IDXPROM1:%.*]] = sext i32 [[TMP10]] to i64
// CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP9]], i64 [[IDXPROM1]]
// CHECK3-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX2]], align 4
// CHECK3-NEXT: [[MUL3:%.*]] = fmul float [[TMP8]], [[TMP11]]
// CHECK3-NEXT: [[TMP12:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
// CHECK3-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP13]] to i64
// CHECK3-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[IDXPROM4]]
// CHECK3-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX5]], align 4
// CHECK3-NEXT: [[MUL6:%.*]] = fmul float [[MUL3]], [[TMP14]]
// CHECK3-NEXT: [[TMP15:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
// CHECK3-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP16]] to i64
// CHECK3-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDXPROM7]]
// CHECK3-NEXT: store float [[MUL6]], float* [[ARRAYIDX8]], align 4
// CHECK3-NEXT: call void @__kmpc_end_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK3: omp.body.continue:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP17]], 1
// CHECK3-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
// CHECK3-NEXT: call void @__kmpc_dispatch_fini_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK3: omp.dispatch.inc:
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK3: omp.dispatch.end:
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP0]])
// CHECK3-NORMAL-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP0]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z8dynamic1PfS_S_S_
// CHECK3-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
// CHECK3-NEXT: [[TMP:%.*]] = alloca i64, align 8
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i64, align 8
// CHECK3-NORMAL-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK3-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK3-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK3-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK3-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK3-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
// CHECK3-NEXT: store i64 16908287, i64* [[DOTOMP_UB]], align 8
// CHECK3-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB6:[0-9]+]])
// CHECK3-NEXT: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 67, i64 0, i64 16908287, i64 1, i64 1)
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK3: omp.dispatch.cond:
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB6]])
// CHECK3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_dispatch_next_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
// CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK3: omp.dispatch.body:
// CHECK3-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
// CHECK3-NEXT: store i64 [[TMP2]], i64* [[DOTOMP_IV]], align 8
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK3-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK3-NEXT: [[ADD:%.*]] = add i64 [[TMP4]], 1
// CHECK3-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP3]], [[ADD]]
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK3-NEXT: [[MUL:%.*]] = mul i64 [[TMP5]], 127
// CHECK3-NEXT: [[ADD1:%.*]] = add i64 131071, [[MUL]]
// CHECK3-NEXT: store i64 [[ADD1]], i64* [[I]], align 8
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK3-NEXT: call void @__kmpc_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK3-NEXT: [[TMP6:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK3-NEXT: [[TMP7:%.*]] = load i64, i64* [[I]], align 8
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP6]], i64 [[TMP7]]
// CHECK3-NEXT: [[TMP8:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK3-NEXT: [[TMP10:%.*]] = load i64, i64* [[I]], align 8
// CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP9]], i64 [[TMP10]]
// CHECK3-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX2]], align 4
// CHECK3-NEXT: [[MUL3:%.*]] = fmul float [[TMP8]], [[TMP11]]
// CHECK3-NEXT: [[TMP12:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK3-NEXT: [[TMP13:%.*]] = load i64, i64* [[I]], align 8
// CHECK3-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[TMP13]]
// CHECK3-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX4]], align 4
// CHECK3-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP14]]
// CHECK3-NEXT: [[TMP15:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK3-NEXT: [[TMP16:%.*]] = load i64, i64* [[I]], align 8
// CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[TMP16]]
// CHECK3-NEXT: store float [[MUL5]], float* [[ARRAYIDX6]], align 4
// CHECK3-NEXT: call void @__kmpc_end_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK3: omp.body.continue:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK3-NEXT: [[ADD7:%.*]] = add i64 [[TMP17]], 1
// CHECK3-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB6]])
// CHECK3-NEXT: call void @__kmpc_dispatch_fini_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK3: omp.dispatch.inc:
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK3: omp.dispatch.end:
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP0]])
// CHECK3-NORMAL-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z9test_autoPfS_S_S_
// CHECK3-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK3-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[Y:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
// CHECK3-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
// CHECK3-NEXT: [[I:%.*]] = alloca i8, align 1
// CHECK3-NEXT: [[X6:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I8:%.*]] = alloca i8, align 1
// CHECK3-NEXT: [[X9:%.*]] = alloca i32, align 4
// CHECK3-NORMAL-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK3-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK3-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK3-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK3-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK3-NEXT: store i32 0, i32* [[X]], align 4
// CHECK3-NEXT: store i32 0, i32* [[Y]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[Y]], align 4
// CHECK3-NEXT: [[CONV:%.*]] = trunc i32 [[TMP1]] to i8
// CHECK3-NEXT: store i8 [[CONV]], i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK3-NEXT: [[TMP2:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK3-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
// CHECK3-NEXT: [[SUB:%.*]] = sub i32 57, [[CONV3]]
// CHECK3-NEXT: [[ADD:%.*]] = add i32 [[SUB]], 1
// CHECK3-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK3-NEXT: [[CONV4:%.*]] = zext i32 [[DIV]] to i64
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV4]], 11
// CHECK3-NEXT: [[SUB5:%.*]] = sub nsw i64 [[MUL]], 1
// CHECK3-NEXT: store i64 [[SUB5]], i64* [[DOTCAPTURE_EXPR_2]], align 8
// CHECK3-NEXT: [[TMP3:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK3-NEXT: store i8 [[TMP3]], i8* [[I]], align 1
// CHECK3-NEXT: store i32 11, i32* [[X6]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK3-NEXT: [[CONV7:%.*]] = sext i8 [[TMP4]] to i32
// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV7]], 57
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK3: omp.precond.then:
// CHECK3-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
// CHECK3-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
// CHECK3-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_UB]], align 8
// CHECK3-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB8:[0-9]+]])
// CHECK3-NEXT: call void @__kmpc_dispatch_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 70, i64 0, i64 [[TMP6]], i64 1, i64 1)
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK3: omp.dispatch.cond:
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB8]])
// CHECK3-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
// CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK3: omp.dispatch.body:
// CHECK3-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
// CHECK3-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK3-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK3-NEXT: [[CMP10:%.*]] = icmp sle i64 [[TMP9]], [[TMP10]]
// CHECK3-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP11:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK3-NEXT: [[CONV11:%.*]] = sext i8 [[TMP11]] to i64
// CHECK3-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK3-NEXT: [[DIV12:%.*]] = sdiv i64 [[TMP12]], 11
// CHECK3-NEXT: [[MUL13:%.*]] = mul nsw i64 [[DIV12]], 1
// CHECK3-NEXT: [[ADD14:%.*]] = add nsw i64 [[CONV11]], [[MUL13]]
// CHECK3-NEXT: [[CONV15:%.*]] = trunc i64 [[ADD14]] to i8
// CHECK3-NEXT: store i8 [[CONV15]], i8* [[I8]], align 1
// CHECK3-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK3-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK3-NEXT: [[DIV16:%.*]] = sdiv i64 [[TMP14]], 11
// CHECK3-NEXT: [[MUL17:%.*]] = mul nsw i64 [[DIV16]], 11
// CHECK3-NEXT: [[SUB18:%.*]] = sub nsw i64 [[TMP13]], [[MUL17]]
// CHECK3-NEXT: [[MUL19:%.*]] = mul nsw i64 [[SUB18]], 1
// CHECK3-NEXT: [[SUB20:%.*]] = sub nsw i64 11, [[MUL19]]
// CHECK3-NEXT: [[CONV21:%.*]] = trunc i64 [[SUB20]] to i32
// CHECK3-NEXT: store i32 [[CONV21]], i32* [[X9]], align 4
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK3-NEXT: call void @__kmpc_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK3-NEXT: [[TMP15:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK3-NEXT: [[TMP16:%.*]] = load i8, i8* [[I8]], align 1
// CHECK3-NEXT: [[IDXPROM:%.*]] = sext i8 [[TMP16]] to i64
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDXPROM]]
// CHECK3-NEXT: [[TMP17:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK3-NEXT: [[TMP18:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK3-NEXT: [[TMP19:%.*]] = load i8, i8* [[I8]], align 1
// CHECK3-NEXT: [[IDXPROM22:%.*]] = sext i8 [[TMP19]] to i64
// CHECK3-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds float, float* [[TMP18]], i64 [[IDXPROM22]]
// CHECK3-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX23]], align 4
// CHECK3-NEXT: [[MUL24:%.*]] = fmul float [[TMP17]], [[TMP20]]
// CHECK3-NEXT: [[TMP21:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK3-NEXT: [[TMP22:%.*]] = load i8, i8* [[I8]], align 1
// CHECK3-NEXT: [[IDXPROM25:%.*]] = sext i8 [[TMP22]] to i64
// CHECK3-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds float, float* [[TMP21]], i64 [[IDXPROM25]]
// CHECK3-NEXT: [[TMP23:%.*]] = load float, float* [[ARRAYIDX26]], align 4
// CHECK3-NEXT: [[MUL27:%.*]] = fmul float [[MUL24]], [[TMP23]]
// CHECK3-NEXT: [[TMP24:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK3-NEXT: [[TMP25:%.*]] = load i8, i8* [[I8]], align 1
// CHECK3-NEXT: [[IDXPROM28:%.*]] = sext i8 [[TMP25]] to i64
// CHECK3-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds float, float* [[TMP24]], i64 [[IDXPROM28]]
// CHECK3-NEXT: store float [[MUL27]], float* [[ARRAYIDX29]], align 4
// CHECK3-NEXT: call void @__kmpc_end_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK3: omp.body.continue:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK3-NEXT: [[ADD30:%.*]] = add nsw i64 [[TMP26]], 1
// CHECK3-NEXT: store i64 [[ADD30]], i64* [[DOTOMP_IV]], align 8
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB8]])
// CHECK3-NEXT: call void @__kmpc_dispatch_fini_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK3: omp.dispatch.inc:
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK3: omp.dispatch.end:
// CHECK3-NEXT: br label [[OMP_PRECOND_END]]
// CHECK3: omp.precond.end:
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP0]])
// CHECK3-NORMAL-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z7runtimePfS_S_S_
// CHECK3-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK3-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK3-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i8, align 1
// CHECK3-NEXT: [[X2:%.*]] = alloca i32, align 4
// CHECK3-NORMAL-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK3-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK3-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK3-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK3-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK3-NEXT: store i32 0, i32* [[X]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 199, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB10:[0-9]+]])
// CHECK3-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 69, i32 0, i32 199, i32 1, i32 1)
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK3: omp.dispatch.cond:
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB10]])
// CHECK3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK3: omp.dispatch.body:
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP3]], [[TMP4]]
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP5]], 20
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 48, [[MUL]]
// CHECK3-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i8
// CHECK3-NEXT: store i8 [[CONV]], i8* [[I]], align 1
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP7]], 20
// CHECK3-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 20
// CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], [[MUL4]]
// CHECK3-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1
// CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 -10, [[MUL5]]
// CHECK3-NEXT: store i32 [[ADD6]], i32* [[X2]], align 4
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK3-NEXT: call void @__kmpc_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK3-NEXT: [[TMP8:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK3-NEXT: [[TMP9:%.*]] = load i8, i8* [[I]], align 1
// CHECK3-NEXT: [[IDXPROM:%.*]] = zext i8 [[TMP9]] to i64
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP8]], i64 [[IDXPROM]]
// CHECK3-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK3-NEXT: [[TMP12:%.*]] = load i8, i8* [[I]], align 1
// CHECK3-NEXT: [[IDXPROM7:%.*]] = zext i8 [[TMP12]] to i64
// CHECK3-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[TMP11]], i64 [[IDXPROM7]]
// CHECK3-NEXT: [[TMP13:%.*]] = load float, float* [[ARRAYIDX8]], align 4
// CHECK3-NEXT: [[MUL9:%.*]] = fmul float [[TMP10]], [[TMP13]]
// CHECK3-NEXT: [[TMP14:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK3-NEXT: [[TMP15:%.*]] = load i8, i8* [[I]], align 1
// CHECK3-NEXT: [[IDXPROM10:%.*]] = zext i8 [[TMP15]] to i64
// CHECK3-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds float, float* [[TMP14]], i64 [[IDXPROM10]]
// CHECK3-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX11]], align 4
// CHECK3-NEXT: [[MUL12:%.*]] = fmul float [[MUL9]], [[TMP16]]
// CHECK3-NEXT: [[TMP17:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK3-NEXT: [[TMP18:%.*]] = load i8, i8* [[I]], align 1
// CHECK3-NEXT: [[IDXPROM13:%.*]] = zext i8 [[TMP18]] to i64
// CHECK3-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds float, float* [[TMP17]], i64 [[IDXPROM13]]
// CHECK3-NEXT: store float [[MUL12]], float* [[ARRAYIDX14]], align 4
// CHECK3-NEXT: call void @__kmpc_end_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK3: omp.body.continue:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK3-NEXT: store i32 [[ADD15]], i32* [[DOTOMP_IV]], align 4
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB10]])
// CHECK3-NEXT: call void @__kmpc_dispatch_fini_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK3: omp.dispatch.inc:
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK3: omp.dispatch.end:
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP0]])
// CHECK3-NORMAL-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z8foo_simdii
// CHECK3-SAME: (i32 [[LOW:%.*]], i32 [[UP:%.*]]) #[[ATTR0]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[LOW_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[UP_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IV16:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[_TMP17:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_20:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I26:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[I28:%.*]] = alloca i32, align 4
// CHECK3-NORMAL-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK3-NEXT: store i32 [[LOW]], i32* [[LOW_ADDR]], align 4
// CHECK3-NEXT: store i32 [[UP]], i32* [[UP_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[LOW_ADDR]], align 4
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[UP_ADDR]], align 4
// CHECK3-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK3-NEXT: [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
// CHECK3-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1
// CHECK3-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1
// CHECK3-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK3-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1
// CHECK3-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK3-NEXT: store i32 [[TMP5]], i32* [[I]], align 4
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
// CHECK3-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK3: simd.if.then:
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IV]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK3: omp.inner.for.cond:
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !3
// CHECK3-NEXT: [[ADD6:%.*]] = add i32 [[TMP9]], 1
// CHECK3-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TMP8]], [[ADD6]]
// CHECK3-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK3: omp.inner.for.body:
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !3
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK3-NEXT: [[MUL:%.*]] = mul i32 [[TMP11]], 1
// CHECK3-NEXT: [[ADD8:%.*]] = add i32 [[TMP10]], [[MUL]]
// CHECK3-NEXT: store i32 [[ADD8]], i32* [[I5]], align 4, !llvm.access.group !3
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !3
// CHECK3-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM]]
// CHECK3-NEXT: store float 0.000000e+00, float* [[ARRAYIDX]], align 4, !llvm.access.group !3
// CHECK3-NEXT: call void @__captured_stmt(i32* [[I5]]), !llvm.access.group !3
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK3: omp.body.continue:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK3: omp.inner.for.inc:
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK3-NEXT: [[ADD9:%.*]] = add i32 [[TMP13]], 1
// CHECK3-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
// CHECK3: omp.inner.for.end:
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK3-NEXT: [[SUB10:%.*]] = sub i32 [[TMP15]], [[TMP16]]
// CHECK3-NEXT: [[SUB11:%.*]] = sub i32 [[SUB10]], 1
// CHECK3-NEXT: [[ADD12:%.*]] = add i32 [[SUB11]], 1
// CHECK3-NEXT: [[DIV13:%.*]] = udiv i32 [[ADD12]], 1
// CHECK3-NEXT: [[MUL14:%.*]] = mul i32 [[DIV13]], 1
// CHECK3-NEXT: [[ADD15:%.*]] = add i32 [[TMP14]], [[MUL14]]
// CHECK3-NEXT: store i32 [[ADD15]], i32* [[I5]], align 4
// CHECK3-NEXT: br label [[SIMD_IF_END]]
// CHECK3: simd.if.end:
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[LOW_ADDR]], align 4
// CHECK3-NEXT: store i32 [[TMP17]], i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[UP_ADDR]], align 4
// CHECK3-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK3-NEXT: [[SUB21:%.*]] = sub i32 [[TMP19]], [[TMP20]]
// CHECK3-NEXT: [[SUB22:%.*]] = sub i32 [[SUB21]], 1
// CHECK3-NEXT: [[ADD23:%.*]] = add i32 [[SUB22]], 1
// CHECK3-NEXT: [[DIV24:%.*]] = udiv i32 [[ADD23]], 1
// CHECK3-NEXT: [[SUB25:%.*]] = sub i32 [[DIV24]], 1
// CHECK3-NEXT: store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_20]], align 4
// CHECK3-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK3-NEXT: store i32 [[TMP21]], i32* [[I26]], align 4
// CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK3-NEXT: [[CMP27:%.*]] = icmp slt i32 [[TMP22]], [[TMP23]]
// CHECK3-NEXT: br i1 [[CMP27]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK3: omp.precond.then:
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_20]], align 4
// CHECK3-NEXT: store i32 [[TMP24]], i32* [[DOTOMP_UB]], align 4
// CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_20]], align 4
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB12:[0-9]+]])
// CHECK3-NEXT: call void @__kmpc_dispatch_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 66, i32 0, i32 [[TMP25]], i32 1, i32 1)
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK3: omp.dispatch.cond:
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB12]])
// CHECK3-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_dispatch_next_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP26]], 0
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK3: omp.dispatch.body:
// CHECK3-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK3-NEXT: store i32 [[TMP27]], i32* [[DOTOMP_IV16]], align 4
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]]
// CHECK3-IRBUILDER: omp.inner.for.cond30:
// CHECK3-NORMAL: omp.inner.for.cond29:
// CHECK3-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV16]], align 4, !llvm.access.group !7
// CHECK3-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !7
// CHECK3-NEXT: [[ADD30:%.*]] = add i32 [[TMP29]], 1
// CHECK3-NEXT: [[CMP31:%.*]] = icmp ult i32 [[TMP28]], [[ADD30]]
// CHECK3-NEXT: br i1 [[CMP31]], label [[OMP_INNER_FOR_BODY32:%.*]], label [[OMP_INNER_FOR_END40:%.*]]
// CHECK3-IRBUILDER: omp.inner.for.body33:
// CHECK3-NORMAL: omp.inner.for.body32:
// CHECK3-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4, !llvm.access.group !7
// CHECK3-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV16]], align 4, !llvm.access.group !7
// CHECK3-NEXT: [[MUL33:%.*]] = mul i32 [[TMP31]], 1
// CHECK3-NEXT: [[ADD34:%.*]] = add i32 [[TMP30]], [[MUL33]]
// CHECK3-NEXT: store i32 [[ADD34]], i32* [[I28]], align 4, !llvm.access.group !7
// CHECK3-NEXT: [[TMP32:%.*]] = load i32, i32* [[I28]], align 4, !llvm.access.group !7
// CHECK3-NEXT: [[IDXPROM35:%.*]] = sext i32 [[TMP32]] to i64
// CHECK3-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM35]]
// CHECK3-NEXT: store float 0.000000e+00, float* [[ARRAYIDX36]], align 4, !llvm.access.group !7
// CHECK3-NEXT: call void @__captured_stmt.1(i32* [[I28]]), !llvm.access.group !7
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE37:%.*]]
// CHECK3-IRBUILDER: omp.body.continue38:
// CHECK3-NORMAL: omp.body.continue37:
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC38:%.*]]
// CHECK3-IRBUILDER: omp.inner.for.inc39:
// CHECK3-NORMAL: omp.inner.for.inc38:
// CHECK3-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV16]], align 4, !llvm.access.group !7
// CHECK3-NEXT: [[ADD39:%.*]] = add i32 [[TMP33]], 1
// CHECK3-NEXT: store i32 [[ADD39]], i32* [[DOTOMP_IV16]], align 4, !llvm.access.group !7
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB12]])
// CHECK3-NEXT: call void @__kmpc_dispatch_fini_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]]), !llvm.access.group !7
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP8:![0-9]+]]
// CHECK3-IRBUILDER: omp.inner.for.end42:
// CHECK3-NORMAL: omp.inner.for.end40:
// CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK3: omp.dispatch.inc:
// CHECK3-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK3: omp.dispatch.end:
// CHECK3-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK3-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
// CHECK3-NEXT: br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK3: .omp.final.then:
// CHECK3-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK3-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK3-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK3-NEXT: [[SUB41:%.*]] = sub i32 [[TMP37]], [[TMP38]]
// CHECK3-NEXT: [[SUB42:%.*]] = sub i32 [[SUB41]], 1
// CHECK3-NEXT: [[ADD43:%.*]] = add i32 [[SUB42]], 1
// CHECK3-NEXT: [[DIV44:%.*]] = udiv i32 [[ADD43]], 1
// CHECK3-NEXT: [[MUL45:%.*]] = mul i32 [[DIV44]], 1
// CHECK3-NEXT: [[ADD46:%.*]] = add i32 [[TMP36]], [[MUL45]]
// CHECK3-NEXT: store i32 [[ADD46]], i32* [[I28]], align 4
// CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK3: .omp.final.done:
// CHECK3-NEXT: br label [[OMP_PRECOND_END]]
// CHECK3: omp.precond.end:
// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK3-IRBUILDER-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP0]])
// CHECK3-NORMAL-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@__captured_stmt
// CHECK3-SAME: (i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK3-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM]]
// CHECK3-NEXT: store float 1.000000e+00, float* [[ARRAYIDX]], align 4
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@__captured_stmt.1
// CHECK3-SAME: (i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR3]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK3-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK3-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM]]
// CHECK3-NEXT: store float 1.000000e+00, float* [[ARRAYIDX]], align 4
// CHECK3-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
// CHECK4-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK4-NORMAL-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK4-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK4-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK4-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK4-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 4571423, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
// CHECK4-IRBUILDER-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP0]], i32 66, i32 0, i32 4571423, i32 1, i32 1)
// CHECK4-NORMAL-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 66, i32 0, i32 4571423, i32 1, i32 1)
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK4: omp.dispatch.cond:
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
// CHECK4-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK4-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
// CHECK4-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK4: omp.dispatch.body:
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK4: omp.inner.for.cond:
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP3]], [[TMP4]]
// CHECK4-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK4: omp.inner.for.body:
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP5]], 7
// CHECK4-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]]
// CHECK4-NEXT: store i32 [[SUB]], i32* [[I]], align 4
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK4-NEXT: call void @__kmpc_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK4-NEXT: [[TMP6:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[I]], align 4
// CHECK4-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP7]] to i64
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP6]], i64 [[IDXPROM]]
// CHECK4-NEXT: [[TMP8:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK4-NEXT: [[TMP9:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[I]], align 4
// CHECK4-NEXT: [[IDXPROM1:%.*]] = sext i32 [[TMP10]] to i64
// CHECK4-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP9]], i64 [[IDXPROM1]]
// CHECK4-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX2]], align 4
// CHECK4-NEXT: [[MUL3:%.*]] = fmul float [[TMP8]], [[TMP11]]
// CHECK4-NEXT: [[TMP12:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
// CHECK4-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP13]] to i64
// CHECK4-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[IDXPROM4]]
// CHECK4-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX5]], align 4
// CHECK4-NEXT: [[MUL6:%.*]] = fmul float [[MUL3]], [[TMP14]]
// CHECK4-NEXT: [[TMP15:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK4-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
// CHECK4-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP16]] to i64
// CHECK4-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDXPROM7]]
// CHECK4-NEXT: store float [[MUL6]], float* [[ARRAYIDX8]], align 4
// CHECK4-NEXT: call void @__kmpc_end_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK4: omp.body.continue:
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK4: omp.inner.for.inc:
// CHECK4-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP17]], 1
// CHECK4-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
// CHECK4-NEXT: call void @__kmpc_dispatch_fini_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK4: omp.inner.for.end:
// CHECK4-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK4: omp.dispatch.inc:
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK4: omp.dispatch.end:
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK4-IRBUILDER-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP0]])
// CHECK4-NORMAL-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP0]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@_Z8dynamic1PfS_S_S_
// CHECK4-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
// CHECK4-NEXT: [[TMP:%.*]] = alloca i64, align 8
// CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
// CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[I:%.*]] = alloca i64, align 8
// CHECK4-NORMAL-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK4-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK4-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK4-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK4-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK4-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
// CHECK4-NEXT: store i64 16908287, i64* [[DOTOMP_UB]], align 8
// CHECK4-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB6:[0-9]+]])
// CHECK4-NEXT: call void @__kmpc_dispatch_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 67, i64 0, i64 16908287, i64 1, i64 1)
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK4: omp.dispatch.cond:
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB6]])
// CHECK4-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_dispatch_next_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
// CHECK4-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
// CHECK4-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK4: omp.dispatch.body:
// CHECK4-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
// CHECK4-NEXT: store i64 [[TMP2]], i64* [[DOTOMP_IV]], align 8
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK4: omp.inner.for.cond:
// CHECK4-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK4-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK4-NEXT: [[ADD:%.*]] = add i64 [[TMP4]], 1
// CHECK4-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP3]], [[ADD]]
// CHECK4-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK4: omp.inner.for.body:
// CHECK4-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK4-NEXT: [[MUL:%.*]] = mul i64 [[TMP5]], 127
// CHECK4-NEXT: [[ADD1:%.*]] = add i64 131071, [[MUL]]
// CHECK4-NEXT: store i64 [[ADD1]], i64* [[I]], align 8
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK4-NEXT: call void @__kmpc_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK4-NEXT: [[TMP6:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK4-NEXT: [[TMP7:%.*]] = load i64, i64* [[I]], align 8
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP6]], i64 [[TMP7]]
// CHECK4-NEXT: [[TMP8:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK4-NEXT: [[TMP9:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK4-NEXT: [[TMP10:%.*]] = load i64, i64* [[I]], align 8
// CHECK4-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP9]], i64 [[TMP10]]
// CHECK4-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX2]], align 4
// CHECK4-NEXT: [[MUL3:%.*]] = fmul float [[TMP8]], [[TMP11]]
// CHECK4-NEXT: [[TMP12:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK4-NEXT: [[TMP13:%.*]] = load i64, i64* [[I]], align 8
// CHECK4-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[TMP13]]
// CHECK4-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX4]], align 4
// CHECK4-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP14]]
// CHECK4-NEXT: [[TMP15:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK4-NEXT: [[TMP16:%.*]] = load i64, i64* [[I]], align 8
// CHECK4-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[TMP16]]
// CHECK4-NEXT: store float [[MUL5]], float* [[ARRAYIDX6]], align 4
// CHECK4-NEXT: call void @__kmpc_end_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK4: omp.body.continue:
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK4: omp.inner.for.inc:
// CHECK4-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK4-NEXT: [[ADD7:%.*]] = add i64 [[TMP17]], 1
// CHECK4-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB6]])
// CHECK4-NEXT: call void @__kmpc_dispatch_fini_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK4: omp.inner.for.end:
// CHECK4-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK4: omp.dispatch.inc:
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK4: omp.dispatch.end:
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK4-IRBUILDER-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP0]])
// CHECK4-NORMAL-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@_Z9test_autoPfS_S_S_
// CHECK4-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK4-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[Y:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
// CHECK4-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK4-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i64, align 8
// CHECK4-NEXT: [[I:%.*]] = alloca i8, align 1
// CHECK4-NEXT: [[X6:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
// CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[I8:%.*]] = alloca i8, align 1
// CHECK4-NEXT: [[X9:%.*]] = alloca i32, align 4
// CHECK4-NORMAL-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK4-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK4-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK4-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK4-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK4-NEXT: store i32 0, i32* [[X]], align 4
// CHECK4-NEXT: store i32 0, i32* [[Y]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[Y]], align 4
// CHECK4-NEXT: [[CONV:%.*]] = trunc i32 [[TMP1]] to i8
// CHECK4-NEXT: store i8 [[CONV]], i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK4-NEXT: [[TMP2:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK4-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
// CHECK4-NEXT: [[SUB:%.*]] = sub i32 57, [[CONV3]]
// CHECK4-NEXT: [[ADD:%.*]] = add i32 [[SUB]], 1
// CHECK4-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK4-NEXT: [[CONV4:%.*]] = zext i32 [[DIV]] to i64
// CHECK4-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV4]], 11
// CHECK4-NEXT: [[SUB5:%.*]] = sub nsw i64 [[MUL]], 1
// CHECK4-NEXT: store i64 [[SUB5]], i64* [[DOTCAPTURE_EXPR_2]], align 8
// CHECK4-NEXT: [[TMP3:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK4-NEXT: store i8 [[TMP3]], i8* [[I]], align 1
// CHECK4-NEXT: store i32 11, i32* [[X6]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK4-NEXT: [[CONV7:%.*]] = sext i8 [[TMP4]] to i32
// CHECK4-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV7]], 57
// CHECK4-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK4: omp.precond.then:
// CHECK4-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
// CHECK4-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
// CHECK4-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_UB]], align 8
// CHECK4-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK4-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_2]], align 8
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB8:[0-9]+]])
// CHECK4-NEXT: call void @__kmpc_dispatch_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 70, i64 0, i64 [[TMP6]], i64 1, i64 1)
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK4: omp.dispatch.cond:
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB8]])
// CHECK4-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]])
// CHECK4-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
// CHECK4-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK4: omp.dispatch.body:
// CHECK4-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
// CHECK4-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK4: omp.inner.for.cond:
// CHECK4-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK4-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8
// CHECK4-NEXT: [[CMP10:%.*]] = icmp sle i64 [[TMP9]], [[TMP10]]
// CHECK4-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK4: omp.inner.for.body:
// CHECK4-NEXT: [[TMP11:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1
// CHECK4-NEXT: [[CONV11:%.*]] = sext i8 [[TMP11]] to i64
// CHECK4-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK4-NEXT: [[DIV12:%.*]] = sdiv i64 [[TMP12]], 11
// CHECK4-NEXT: [[MUL13:%.*]] = mul nsw i64 [[DIV12]], 1
// CHECK4-NEXT: [[ADD14:%.*]] = add nsw i64 [[CONV11]], [[MUL13]]
// CHECK4-NEXT: [[CONV15:%.*]] = trunc i64 [[ADD14]] to i8
// CHECK4-NEXT: store i8 [[CONV15]], i8* [[I8]], align 1
// CHECK4-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK4-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK4-NEXT: [[DIV16:%.*]] = sdiv i64 [[TMP14]], 11
// CHECK4-NEXT: [[MUL17:%.*]] = mul nsw i64 [[DIV16]], 11
// CHECK4-NEXT: [[SUB18:%.*]] = sub nsw i64 [[TMP13]], [[MUL17]]
// CHECK4-NEXT: [[MUL19:%.*]] = mul nsw i64 [[SUB18]], 1
// CHECK4-NEXT: [[SUB20:%.*]] = sub nsw i64 11, [[MUL19]]
// CHECK4-NEXT: [[CONV21:%.*]] = trunc i64 [[SUB20]] to i32
// CHECK4-NEXT: store i32 [[CONV21]], i32* [[X9]], align 4
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK4-NEXT: call void @__kmpc_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK4-NEXT: [[TMP15:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK4-NEXT: [[TMP16:%.*]] = load i8, i8* [[I8]], align 1
// CHECK4-NEXT: [[IDXPROM:%.*]] = sext i8 [[TMP16]] to i64
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDXPROM]]
// CHECK4-NEXT: [[TMP17:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK4-NEXT: [[TMP18:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK4-NEXT: [[TMP19:%.*]] = load i8, i8* [[I8]], align 1
// CHECK4-NEXT: [[IDXPROM22:%.*]] = sext i8 [[TMP19]] to i64
// CHECK4-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds float, float* [[TMP18]], i64 [[IDXPROM22]]
// CHECK4-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX23]], align 4
// CHECK4-NEXT: [[MUL24:%.*]] = fmul float [[TMP17]], [[TMP20]]
// CHECK4-NEXT: [[TMP21:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK4-NEXT: [[TMP22:%.*]] = load i8, i8* [[I8]], align 1
// CHECK4-NEXT: [[IDXPROM25:%.*]] = sext i8 [[TMP22]] to i64
// CHECK4-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds float, float* [[TMP21]], i64 [[IDXPROM25]]
// CHECK4-NEXT: [[TMP23:%.*]] = load float, float* [[ARRAYIDX26]], align 4
// CHECK4-NEXT: [[MUL27:%.*]] = fmul float [[MUL24]], [[TMP23]]
// CHECK4-NEXT: [[TMP24:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK4-NEXT: [[TMP25:%.*]] = load i8, i8* [[I8]], align 1
// CHECK4-NEXT: [[IDXPROM28:%.*]] = sext i8 [[TMP25]] to i64
// CHECK4-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds float, float* [[TMP24]], i64 [[IDXPROM28]]
// CHECK4-NEXT: store float [[MUL27]], float* [[ARRAYIDX29]], align 4
// CHECK4-NEXT: call void @__kmpc_end_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK4: omp.body.continue:
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK4: omp.inner.for.inc:
// CHECK4-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8
// CHECK4-NEXT: [[ADD30:%.*]] = add nsw i64 [[TMP26]], 1
// CHECK4-NEXT: store i64 [[ADD30]], i64* [[DOTOMP_IV]], align 8
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB8]])
// CHECK4-NEXT: call void @__kmpc_dispatch_fini_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK4: omp.inner.for.end:
// CHECK4-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK4: omp.dispatch.inc:
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK4: omp.dispatch.end:
// CHECK4-NEXT: br label [[OMP_PRECOND_END]]
// CHECK4: omp.precond.end:
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK4-IRBUILDER-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP0]])
// CHECK4-NORMAL-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@_Z7runtimePfS_S_S_
// CHECK4-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK4-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK4-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK4-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[TMP:%.*]] = alloca i8, align 1
// CHECK4-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[I:%.*]] = alloca i8, align 1
// CHECK4-NEXT: [[X2:%.*]] = alloca i32, align 4
// CHECK4-NORMAL-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK4-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK4-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK4-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK4-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK4-NEXT: store i32 0, i32* [[X]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 199, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB10:[0-9]+]])
// CHECK4-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 69, i32 0, i32 199, i32 1, i32 1)
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK4: omp.dispatch.cond:
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB10]])
// CHECK4-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK4-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
// CHECK4-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK4: omp.dispatch.body:
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP2]], i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK4: omp.inner.for.cond:
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP3]], [[TMP4]]
// CHECK4-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK4: omp.inner.for.body:
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP5]], 20
// CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 48, [[MUL]]
// CHECK4-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i8
// CHECK4-NEXT: store i8 [[CONV]], i8* [[I]], align 1
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP7]], 20
// CHECK4-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 20
// CHECK4-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], [[MUL4]]
// CHECK4-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1
// CHECK4-NEXT: [[ADD6:%.*]] = add nsw i32 -10, [[MUL5]]
// CHECK4-NEXT: store i32 [[ADD6]], i32* [[X2]], align 4
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK4-NEXT: call void @__kmpc_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK4-NEXT: [[TMP8:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK4-NEXT: [[TMP9:%.*]] = load i8, i8* [[I]], align 1
// CHECK4-NEXT: [[IDXPROM:%.*]] = zext i8 [[TMP9]] to i64
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP8]], i64 [[IDXPROM]]
// CHECK4-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK4-NEXT: [[TMP11:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK4-NEXT: [[TMP12:%.*]] = load i8, i8* [[I]], align 1
// CHECK4-NEXT: [[IDXPROM7:%.*]] = zext i8 [[TMP12]] to i64
// CHECK4-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[TMP11]], i64 [[IDXPROM7]]
// CHECK4-NEXT: [[TMP13:%.*]] = load float, float* [[ARRAYIDX8]], align 4
// CHECK4-NEXT: [[MUL9:%.*]] = fmul float [[TMP10]], [[TMP13]]
// CHECK4-NEXT: [[TMP14:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK4-NEXT: [[TMP15:%.*]] = load i8, i8* [[I]], align 1
// CHECK4-NEXT: [[IDXPROM10:%.*]] = zext i8 [[TMP15]] to i64
// CHECK4-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds float, float* [[TMP14]], i64 [[IDXPROM10]]
// CHECK4-NEXT: [[TMP16:%.*]] = load float, float* [[ARRAYIDX11]], align 4
// CHECK4-NEXT: [[MUL12:%.*]] = fmul float [[MUL9]], [[TMP16]]
// CHECK4-NEXT: [[TMP17:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK4-NEXT: [[TMP18:%.*]] = load i8, i8* [[I]], align 1
// CHECK4-NEXT: [[IDXPROM13:%.*]] = zext i8 [[TMP18]] to i64
// CHECK4-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds float, float* [[TMP17]], i64 [[IDXPROM13]]
// CHECK4-NEXT: store float [[MUL12]], float* [[ARRAYIDX14]], align 4
// CHECK4-NEXT: call void @__kmpc_end_ordered(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK4: omp.body.continue:
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK4: omp.inner.for.inc:
// CHECK4-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP19]], 1
// CHECK4-NEXT: store i32 [[ADD15]], i32* [[DOTOMP_IV]], align 4
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB10]])
// CHECK4-NEXT: call void @__kmpc_dispatch_fini_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK4: omp.inner.for.end:
// CHECK4-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK4: omp.dispatch.inc:
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK4: omp.dispatch.end:
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK4-IRBUILDER-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP0]])
// CHECK4-NORMAL-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@_Z8foo_simdii
// CHECK4-SAME: (i32 [[LOW:%.*]], i32 [[UP:%.*]]) #[[ATTR0]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[LOW_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[UP_ADDR:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_IV16:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[_TMP17:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_20:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[I26:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK4-NEXT: [[I28:%.*]] = alloca i32, align 4
// CHECK4-NORMAL-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK4-NEXT: store i32 [[LOW]], i32* [[LOW_ADDR]], align 4
// CHECK4-NEXT: store i32 [[UP]], i32* [[UP_ADDR]], align 4
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[LOW_ADDR]], align 4
// CHECK4-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[UP_ADDR]], align 4
// CHECK4-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK4-NEXT: [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
// CHECK4-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1
// CHECK4-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1
// CHECK4-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK4-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1
// CHECK4-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK4-NEXT: store i32 [[TMP5]], i32* [[I]], align 4
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK4-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]]
// CHECK4-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK4: simd.if.then:
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IV]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK4: omp.inner.for.cond:
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !3
// CHECK4-NEXT: [[ADD6:%.*]] = add i32 [[TMP9]], 1
// CHECK4-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TMP8]], [[ADD6]]
// CHECK4-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK4: omp.inner.for.body:
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !3
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK4-NEXT: [[MUL:%.*]] = mul i32 [[TMP11]], 1
// CHECK4-NEXT: [[ADD8:%.*]] = add i32 [[TMP10]], [[MUL]]
// CHECK4-NEXT: store i32 [[ADD8]], i32* [[I5]], align 4, !llvm.access.group !3
// CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !3
// CHECK4-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM]]
// CHECK4-NEXT: store float 0.000000e+00, float* [[ARRAYIDX]], align 4, !llvm.access.group !3
// CHECK4-NEXT: call void @__captured_stmt(i32* [[I5]]), !llvm.access.group !3
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK4: omp.body.continue:
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK4: omp.inner.for.inc:
// CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK4-NEXT: [[ADD9:%.*]] = add i32 [[TMP13]], 1
// CHECK4-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
// CHECK4: omp.inner.for.end:
// CHECK4-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK4-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK4-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK4-NEXT: [[SUB10:%.*]] = sub i32 [[TMP15]], [[TMP16]]
// CHECK4-NEXT: [[SUB11:%.*]] = sub i32 [[SUB10]], 1
// CHECK4-NEXT: [[ADD12:%.*]] = add i32 [[SUB11]], 1
// CHECK4-NEXT: [[DIV13:%.*]] = udiv i32 [[ADD12]], 1
// CHECK4-NEXT: [[MUL14:%.*]] = mul i32 [[DIV13]], 1
// CHECK4-NEXT: [[ADD15:%.*]] = add i32 [[TMP14]], [[MUL14]]
// CHECK4-NEXT: store i32 [[ADD15]], i32* [[I5]], align 4
// CHECK4-NEXT: br label [[SIMD_IF_END]]
// CHECK4: simd.if.end:
// CHECK4-NEXT: [[TMP17:%.*]] = load i32, i32* [[LOW_ADDR]], align 4
// CHECK4-NEXT: store i32 [[TMP17]], i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK4-NEXT: [[TMP18:%.*]] = load i32, i32* [[UP_ADDR]], align 4
// CHECK4-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK4-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK4-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK4-NEXT: [[SUB21:%.*]] = sub i32 [[TMP19]], [[TMP20]]
// CHECK4-NEXT: [[SUB22:%.*]] = sub i32 [[SUB21]], 1
// CHECK4-NEXT: [[ADD23:%.*]] = add i32 [[SUB22]], 1
// CHECK4-NEXT: [[DIV24:%.*]] = udiv i32 [[ADD23]], 1
// CHECK4-NEXT: [[SUB25:%.*]] = sub i32 [[DIV24]], 1
// CHECK4-NEXT: store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_20]], align 4
// CHECK4-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK4-NEXT: store i32 [[TMP21]], i32* [[I26]], align 4
// CHECK4-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK4-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK4-NEXT: [[CMP27:%.*]] = icmp slt i32 [[TMP22]], [[TMP23]]
// CHECK4-NEXT: br i1 [[CMP27]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK4: omp.precond.then:
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_20]], align 4
// CHECK4-NEXT: store i32 [[TMP24]], i32* [[DOTOMP_UB]], align 4
// CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
// CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK4-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_20]], align 4
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB12:[0-9]+]])
// CHECK4-NEXT: call void @__kmpc_dispatch_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 66, i32 0, i32 [[TMP25]], i32 1, i32 1)
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK4: omp.dispatch.cond:
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB12]])
// CHECK4-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_dispatch_next_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]])
// CHECK4-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP26]], 0
// CHECK4-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK4: omp.dispatch.body:
// CHECK4-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK4-NEXT: store i32 [[TMP27]], i32* [[DOTOMP_IV16]], align 4
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]]
// CHECK4-IRBUILDER: omp.inner.for.cond30:
// CHECK4-NORMAL: omp.inner.for.cond29:
// CHECK4-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV16]], align 4, !llvm.access.group !7
// CHECK4-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !7
// CHECK4-NEXT: [[ADD30:%.*]] = add i32 [[TMP29]], 1
// CHECK4-NEXT: [[CMP31:%.*]] = icmp ult i32 [[TMP28]], [[ADD30]]
// CHECK4-NEXT: br i1 [[CMP31]], label [[OMP_INNER_FOR_BODY32:%.*]], label [[OMP_INNER_FOR_END40:%.*]]
// CHECK4-IRBUILDER: omp.inner.for.body33:
// CHECK4-NORMAL: omp.inner.for.body32:
// CHECK4-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4, !llvm.access.group !7
// CHECK4-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV16]], align 4, !llvm.access.group !7
// CHECK4-NEXT: [[MUL33:%.*]] = mul i32 [[TMP31]], 1
// CHECK4-NEXT: [[ADD34:%.*]] = add i32 [[TMP30]], [[MUL33]]
// CHECK4-NEXT: store i32 [[ADD34]], i32* [[I28]], align 4, !llvm.access.group !7
// CHECK4-NEXT: [[TMP32:%.*]] = load i32, i32* [[I28]], align 4, !llvm.access.group !7
// CHECK4-NEXT: [[IDXPROM35:%.*]] = sext i32 [[TMP32]] to i64
// CHECK4-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM35]]
// CHECK4-NEXT: store float 0.000000e+00, float* [[ARRAYIDX36]], align 4, !llvm.access.group !7
// CHECK4-NEXT: call void @__captured_stmt.1(i32* [[I28]]), !llvm.access.group !7
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE37:%.*]]
// CHECK4-IRBUILDER: omp.body.continue38:
// CHECK4-NORMAL: omp.body.continue37:
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC38:%.*]]
// CHECK4-IRBUILDER: omp.inner.for.inc39:
// CHECK4-NORMAL: omp.inner.for.inc38:
// CHECK4-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV16]], align 4, !llvm.access.group !7
// CHECK4-NEXT: [[ADD39:%.*]] = add i32 [[TMP33]], 1
// CHECK4-NEXT: store i32 [[ADD39]], i32* [[DOTOMP_IV16]], align 4, !llvm.access.group !7
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB12]])
// CHECK4-NEXT: call void @__kmpc_dispatch_fini_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]]), !llvm.access.group !7
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP8:![0-9]+]]
// CHECK4-IRBUILDER: omp.inner.for.end42:
// CHECK4-NORMAL: omp.inner.for.end40:
// CHECK4-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK4: omp.dispatch.inc:
// CHECK4-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK4: omp.dispatch.end:
// CHECK4-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
// CHECK4-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
// CHECK4-NEXT: br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
// CHECK4: .omp.final.then:
// CHECK4-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK4-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK4-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4
// CHECK4-NEXT: [[SUB41:%.*]] = sub i32 [[TMP37]], [[TMP38]]
// CHECK4-NEXT: [[SUB42:%.*]] = sub i32 [[SUB41]], 1
// CHECK4-NEXT: [[ADD43:%.*]] = add i32 [[SUB42]], 1
// CHECK4-NEXT: [[DIV44:%.*]] = udiv i32 [[ADD43]], 1
// CHECK4-NEXT: [[MUL45:%.*]] = mul i32 [[DIV44]], 1
// CHECK4-NEXT: [[ADD46:%.*]] = add i32 [[TMP36]], [[MUL45]]
// CHECK4-NEXT: store i32 [[ADD46]], i32* [[I28]], align 4
// CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]]
// CHECK4: .omp.final.done:
// CHECK4-NEXT: br label [[OMP_PRECOND_END]]
// CHECK4: omp.precond.end:
// CHECK4-IRBUILDER-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK4-IRBUILDER-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP0]])
// CHECK4-NORMAL-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@__captured_stmt
// CHECK4-SAME: (i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK4-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM]]
// CHECK4-NEXT: store float 1.000000e+00, float* [[ARRAYIDX]], align 4
// CHECK4-NEXT: ret void
//
//
// CHECK4-LABEL: define {{[^@]+}}@__captured_stmt.1
// CHECK4-SAME: (i32* nonnull align 4 dereferenceable(4) [[I:%.*]]) #[[ATTR3]] {
// CHECK4-NEXT: entry:
// CHECK4-NEXT: [[I_ADDR:%.*]] = alloca i32*, align 8
// CHECK4-NEXT: store i32* [[I]], i32** [[I_ADDR]], align 8
// CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[I_ADDR]], align 8
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK4-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM]]
// CHECK4-NEXT: store float 1.000000e+00, float* [[ARRAYIDX]], align 4
// CHECK4-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
// CHECK5-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK5-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK5-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK5-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK5-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK5-NEXT: store i32 32000000, i32* [[I]], align 4
// CHECK5-NEXT: br label [[FOR_COND:%.*]]
// CHECK5: for.cond:
// CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[I]], align 4
// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP0]], 33
// CHECK5-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// CHECK5: for.body:
// CHECK5-NEXT: [[TMP1:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[I]], align 4
// CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP2]] to i64
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 [[IDXPROM]]
// CHECK5-NEXT: [[TMP3:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK5-NEXT: [[TMP4:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[I]], align 4
// CHECK5-NEXT: [[IDXPROM1:%.*]] = sext i32 [[TMP5]] to i64
// CHECK5-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP4]], i64 [[IDXPROM1]]
// CHECK5-NEXT: [[TMP6:%.*]] = load float, float* [[ARRAYIDX2]], align 4
// CHECK5-NEXT: [[MUL:%.*]] = fmul float [[TMP3]], [[TMP6]]
// CHECK5-NEXT: [[TMP7:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[I]], align 4
// CHECK5-NEXT: [[IDXPROM3:%.*]] = sext i32 [[TMP8]] to i64
// CHECK5-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP7]], i64 [[IDXPROM3]]
// CHECK5-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX4]], align 4
// CHECK5-NEXT: [[MUL5:%.*]] = fmul float [[MUL]], [[TMP9]]
// CHECK5-NEXT: [[TMP10:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK5-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP11]] to i64
// CHECK5-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP10]], i64 [[IDXPROM6]]
// CHECK5-NEXT: store float [[MUL5]], float* [[ARRAYIDX7]], align 4
// CHECK5-NEXT: br label [[FOR_INC:%.*]]
// CHECK5: for.inc:
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], -7
// CHECK5-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK5-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP2:![0-9]+]]
// CHECK5: for.end:
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@_Z8dynamic1PfS_S_S_
// CHECK5-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK5-NEXT: [[I:%.*]] = alloca i64, align 8
// CHECK5-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK5-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK5-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK5-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK5-NEXT: store i64 131071, i64* [[I]], align 8
// CHECK5-NEXT: br label [[FOR_COND:%.*]]
// CHECK5: for.cond:
// CHECK5-NEXT: [[TMP0:%.*]] = load i64, i64* [[I]], align 8
// CHECK5-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP0]], 2147483647
// CHECK5-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// CHECK5: for.body:
// CHECK5-NEXT: [[TMP1:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[I]], align 8
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 [[TMP2]]
// CHECK5-NEXT: [[TMP3:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK5-NEXT: [[TMP4:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK5-NEXT: [[TMP5:%.*]] = load i64, i64* [[I]], align 8
// CHECK5-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, float* [[TMP4]], i64 [[TMP5]]
// CHECK5-NEXT: [[TMP6:%.*]] = load float, float* [[ARRAYIDX1]], align 4
// CHECK5-NEXT: [[MUL:%.*]] = fmul float [[TMP3]], [[TMP6]]
// CHECK5-NEXT: [[TMP7:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK5-NEXT: [[TMP8:%.*]] = load i64, i64* [[I]], align 8
// CHECK5-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP7]], i64 [[TMP8]]
// CHECK5-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX2]], align 4
// CHECK5-NEXT: [[MUL3:%.*]] = fmul float [[MUL]], [[TMP9]]
// CHECK5-NEXT: [[TMP10:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK5-NEXT: [[TMP11:%.*]] = load i64, i64* [[I]], align 8
// CHECK5-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP10]], i64 [[TMP11]]
// CHECK5-NEXT: store float [[MUL3]], float* [[ARRAYIDX4]], align 4
// CHECK5-NEXT: br label [[FOR_INC:%.*]]
// CHECK5: for.inc:
// CHECK5-NEXT: [[TMP12:%.*]] = load i64, i64* [[I]], align 8
// CHECK5-NEXT: [[ADD:%.*]] = add i64 [[TMP12]], 127
// CHECK5-NEXT: store i64 [[ADD]], i64* [[I]], align 8
// CHECK5-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
// CHECK5: for.end:
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@_Z9test_autoPfS_S_S_
// CHECK5-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK5-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[Y:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I:%.*]] = alloca i8, align 1
// CHECK5-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK5-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK5-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK5-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK5-NEXT: store i32 0, i32* [[X]], align 4
// CHECK5-NEXT: store i32 0, i32* [[Y]], align 4
// CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[Y]], align 4
// CHECK5-NEXT: [[CONV:%.*]] = trunc i32 [[TMP0]] to i8
// CHECK5-NEXT: store i8 [[CONV]], i8* [[I]], align 1
// CHECK5-NEXT: br label [[FOR_COND:%.*]]
// CHECK5: for.cond:
// CHECK5-NEXT: [[TMP1:%.*]] = load i8, i8* [[I]], align 1
// CHECK5-NEXT: [[CONV1:%.*]] = sext i8 [[TMP1]] to i32
// CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV1]], 57
// CHECK5-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END13:%.*]]
// CHECK5: for.body:
// CHECK5-NEXT: store i32 11, i32* [[X]], align 4
// CHECK5-NEXT: br label [[FOR_COND2:%.*]]
// CHECK5: for.cond2:
// CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[X]], align 4
// CHECK5-NEXT: [[CMP3:%.*]] = icmp ugt i32 [[TMP2]], 0
// CHECK5-NEXT: br i1 [[CMP3]], label [[FOR_BODY4:%.*]], label [[FOR_END:%.*]]
// CHECK5: for.body4:
// CHECK5-NEXT: [[TMP3:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK5-NEXT: [[TMP4:%.*]] = load i8, i8* [[I]], align 1
// CHECK5-NEXT: [[IDXPROM:%.*]] = sext i8 [[TMP4]] to i64
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP3]], i64 [[IDXPROM]]
// CHECK5-NEXT: [[TMP5:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK5-NEXT: [[TMP6:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK5-NEXT: [[TMP7:%.*]] = load i8, i8* [[I]], align 1
// CHECK5-NEXT: [[IDXPROM5:%.*]] = sext i8 [[TMP7]] to i64
// CHECK5-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP6]], i64 [[IDXPROM5]]
// CHECK5-NEXT: [[TMP8:%.*]] = load float, float* [[ARRAYIDX6]], align 4
// CHECK5-NEXT: [[MUL:%.*]] = fmul float [[TMP5]], [[TMP8]]
// CHECK5-NEXT: [[TMP9:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK5-NEXT: [[TMP10:%.*]] = load i8, i8* [[I]], align 1
// CHECK5-NEXT: [[IDXPROM7:%.*]] = sext i8 [[TMP10]] to i64
// CHECK5-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[TMP9]], i64 [[IDXPROM7]]
// CHECK5-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX8]], align 4
// CHECK5-NEXT: [[MUL9:%.*]] = fmul float [[MUL]], [[TMP11]]
// CHECK5-NEXT: [[TMP12:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK5-NEXT: [[TMP13:%.*]] = load i8, i8* [[I]], align 1
// CHECK5-NEXT: [[IDXPROM10:%.*]] = sext i8 [[TMP13]] to i64
// CHECK5-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[IDXPROM10]]
// CHECK5-NEXT: store float [[MUL9]], float* [[ARRAYIDX11]], align 4
// CHECK5-NEXT: br label [[FOR_INC:%.*]]
// CHECK5: for.inc:
// CHECK5-NEXT: [[TMP14:%.*]] = load i32, i32* [[X]], align 4
// CHECK5-NEXT: [[DEC:%.*]] = add i32 [[TMP14]], -1
// CHECK5-NEXT: store i32 [[DEC]], i32* [[X]], align 4
// CHECK5-NEXT: br label [[FOR_COND2]], !llvm.loop [[LOOP5:![0-9]+]]
// CHECK5: for.end:
// CHECK5-NEXT: br label [[FOR_INC12:%.*]]
// CHECK5: for.inc12:
// CHECK5-NEXT: [[TMP15:%.*]] = load i8, i8* [[I]], align 1
// CHECK5-NEXT: [[INC:%.*]] = add i8 [[TMP15]], 1
// CHECK5-NEXT: store i8 [[INC]], i8* [[I]], align 1
// CHECK5-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]]
// CHECK5: for.end13:
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@_Z7runtimePfS_S_S_
// CHECK5-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK5-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK5-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK5-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I:%.*]] = alloca i8, align 1
// CHECK5-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK5-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK5-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK5-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK5-NEXT: store i32 0, i32* [[X]], align 4
// CHECK5-NEXT: store i8 48, i8* [[I]], align 1
// CHECK5-NEXT: br label [[FOR_COND:%.*]]
// CHECK5: for.cond:
// CHECK5-NEXT: [[TMP0:%.*]] = load i8, i8* [[I]], align 1
// CHECK5-NEXT: [[CONV:%.*]] = zext i8 [[TMP0]] to i32
// CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV]], 57
// CHECK5-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END13:%.*]]
// CHECK5: for.body:
// CHECK5-NEXT: store i32 -10, i32* [[X]], align 4
// CHECK5-NEXT: br label [[FOR_COND1:%.*]]
// CHECK5: for.cond1:
// CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[X]], align 4
// CHECK5-NEXT: [[CMP2:%.*]] = icmp slt i32 [[TMP1]], 10
// CHECK5-NEXT: br i1 [[CMP2]], label [[FOR_BODY3:%.*]], label [[FOR_END:%.*]]
// CHECK5: for.body3:
// CHECK5-NEXT: [[TMP2:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK5-NEXT: [[TMP3:%.*]] = load i8, i8* [[I]], align 1
// CHECK5-NEXT: [[IDXPROM:%.*]] = zext i8 [[TMP3]] to i64
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 [[IDXPROM]]
// CHECK5-NEXT: [[TMP4:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK5-NEXT: [[TMP5:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK5-NEXT: [[TMP6:%.*]] = load i8, i8* [[I]], align 1
// CHECK5-NEXT: [[IDXPROM4:%.*]] = zext i8 [[TMP6]] to i64
// CHECK5-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, float* [[TMP5]], i64 [[IDXPROM4]]
// CHECK5-NEXT: [[TMP7:%.*]] = load float, float* [[ARRAYIDX5]], align 4
// CHECK5-NEXT: [[MUL:%.*]] = fmul float [[TMP4]], [[TMP7]]
// CHECK5-NEXT: [[TMP8:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK5-NEXT: [[TMP9:%.*]] = load i8, i8* [[I]], align 1
// CHECK5-NEXT: [[IDXPROM6:%.*]] = zext i8 [[TMP9]] to i64
// CHECK5-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP8]], i64 [[IDXPROM6]]
// CHECK5-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX7]], align 4
// CHECK5-NEXT: [[MUL8:%.*]] = fmul float [[MUL]], [[TMP10]]
// CHECK5-NEXT: [[TMP11:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK5-NEXT: [[TMP12:%.*]] = load i8, i8* [[I]], align 1
// CHECK5-NEXT: [[IDXPROM9:%.*]] = zext i8 [[TMP12]] to i64
// CHECK5-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP11]], i64 [[IDXPROM9]]
// CHECK5-NEXT: store float [[MUL8]], float* [[ARRAYIDX10]], align 4
// CHECK5-NEXT: br label [[FOR_INC:%.*]]
// CHECK5: for.inc:
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[X]], align 4
// CHECK5-NEXT: [[INC:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK5-NEXT: store i32 [[INC]], i32* [[X]], align 4
// CHECK5-NEXT: br label [[FOR_COND1]], !llvm.loop [[LOOP7:![0-9]+]]
// CHECK5: for.end:
// CHECK5-NEXT: br label [[FOR_INC11:%.*]]
// CHECK5: for.inc11:
// CHECK5-NEXT: [[TMP14:%.*]] = load i8, i8* [[I]], align 1
// CHECK5-NEXT: [[INC12:%.*]] = add i8 [[TMP14]], 1
// CHECK5-NEXT: store i8 [[INC12]], i8* [[I]], align 1
// CHECK5-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]]
// CHECK5: for.end13:
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@_Z8foo_simdii
// CHECK5-SAME: (i32 [[LOW:%.*]], i32 [[UP:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[LOW_ADDR:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[UP_ADDR:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[_TMP18:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTCAPTURE_EXPR_20:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I27:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_IV30:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[I31:%.*]] = alloca i32, align 4
// CHECK5-NEXT: store i32 [[LOW]], i32* [[LOW_ADDR]], align 4
// CHECK5-NEXT: store i32 [[UP]], i32* [[UP_ADDR]], align 4
// CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[LOW_ADDR]], align 4
// CHECK5-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[UP_ADDR]], align 4
// CHECK5-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK5-NEXT: [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]]
// CHECK5-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1
// CHECK5-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1
// CHECK5-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK5-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1
// CHECK5-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK5-NEXT: store i32 [[TMP4]], i32* [[I]], align 4
// CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK5-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP5]], [[TMP6]]
// CHECK5-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK5: simd.if.then:
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IV]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK5: omp.inner.for.cond:
// CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !9
// CHECK5-NEXT: [[ADD6:%.*]] = add i32 [[TMP8]], 1
// CHECK5-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TMP7]], [[ADD6]]
// CHECK5-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK5: omp.inner.for.body:
// CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !9
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9
// CHECK5-NEXT: [[MUL:%.*]] = mul i32 [[TMP10]], 1
// CHECK5-NEXT: [[ADD8:%.*]] = add i32 [[TMP9]], [[MUL]]
// CHECK5-NEXT: store i32 [[ADD8]], i32* [[I5]], align 4, !llvm.access.group !9
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !9
// CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM]]
// CHECK5-NEXT: store float 0.000000e+00, float* [[ARRAYIDX]], align 4, !llvm.access.group !9
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !9
// CHECK5-NEXT: [[IDXPROM9:%.*]] = sext i32 [[TMP12]] to i64
// CHECK5-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM9]]
// CHECK5-NEXT: store float 1.000000e+00, float* [[ARRAYIDX10]], align 4, !llvm.access.group !9
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK5: omp.body.continue:
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK5: omp.inner.for.inc:
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9
// CHECK5-NEXT: [[ADD11:%.*]] = add i32 [[TMP13]], 1
// CHECK5-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
// CHECK5: omp.inner.for.end:
// CHECK5-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK5-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK5-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK5-NEXT: [[SUB12:%.*]] = sub i32 [[TMP15]], [[TMP16]]
// CHECK5-NEXT: [[SUB13:%.*]] = sub i32 [[SUB12]], 1
// CHECK5-NEXT: [[ADD14:%.*]] = add i32 [[SUB13]], 1
// CHECK5-NEXT: [[DIV15:%.*]] = udiv i32 [[ADD14]], 1
// CHECK5-NEXT: [[MUL16:%.*]] = mul i32 [[DIV15]], 1
// CHECK5-NEXT: [[ADD17:%.*]] = add i32 [[TMP14]], [[MUL16]]
// CHECK5-NEXT: store i32 [[ADD17]], i32* [[I5]], align 4
// CHECK5-NEXT: br label [[SIMD_IF_END]]
// CHECK5: simd.if.end:
// CHECK5-NEXT: [[TMP17:%.*]] = load i32, i32* [[LOW_ADDR]], align 4
// CHECK5-NEXT: store i32 [[TMP17]], i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK5-NEXT: [[TMP18:%.*]] = load i32, i32* [[UP_ADDR]], align 4
// CHECK5-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_20]], align 4
// CHECK5-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_20]], align 4
// CHECK5-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK5-NEXT: [[SUB22:%.*]] = sub i32 [[TMP19]], [[TMP20]]
// CHECK5-NEXT: [[SUB23:%.*]] = sub i32 [[SUB22]], 1
// CHECK5-NEXT: [[ADD24:%.*]] = add i32 [[SUB23]], 1
// CHECK5-NEXT: [[DIV25:%.*]] = udiv i32 [[ADD24]], 1
// CHECK5-NEXT: [[SUB26:%.*]] = sub i32 [[DIV25]], 1
// CHECK5-NEXT: store i32 [[SUB26]], i32* [[DOTCAPTURE_EXPR_21]], align 4
// CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK5-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4
// CHECK5-NEXT: store i32 [[TMP21]], i32* [[DOTOMP_UB]], align 4
// CHECK5-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK5-NEXT: store i32 [[TMP22]], i32* [[I27]], align 4
// CHECK5-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK5-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_20]], align 4
// CHECK5-NEXT: [[CMP28:%.*]] = icmp slt i32 [[TMP23]], [[TMP24]]
// CHECK5-NEXT: br i1 [[CMP28]], label [[SIMD_IF_THEN29:%.*]], label [[SIMD_IF_END52:%.*]]
// CHECK5: simd.if.then29:
// CHECK5-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK5-NEXT: store i32 [[TMP25]], i32* [[DOTOMP_IV30]], align 4
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND32:%.*]]
// CHECK5: omp.inner.for.cond32:
// CHECK5-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_IV30]], align 4, !llvm.access.group !13
// CHECK5-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !13
// CHECK5-NEXT: [[ADD33:%.*]] = add i32 [[TMP27]], 1
// CHECK5-NEXT: [[CMP34:%.*]] = icmp ult i32 [[TMP26]], [[ADD33]]
// CHECK5-NEXT: br i1 [[CMP34]], label [[OMP_INNER_FOR_BODY35:%.*]], label [[OMP_INNER_FOR_END45:%.*]]
// CHECK5: omp.inner.for.body35:
// CHECK5-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4, !llvm.access.group !13
// CHECK5-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV30]], align 4, !llvm.access.group !13
// CHECK5-NEXT: [[MUL36:%.*]] = mul i32 [[TMP29]], 1
// CHECK5-NEXT: [[ADD37:%.*]] = add i32 [[TMP28]], [[MUL36]]
// CHECK5-NEXT: store i32 [[ADD37]], i32* [[I31]], align 4, !llvm.access.group !13
// CHECK5-NEXT: [[TMP30:%.*]] = load i32, i32* [[I31]], align 4, !llvm.access.group !13
// CHECK5-NEXT: [[IDXPROM38:%.*]] = sext i32 [[TMP30]] to i64
// CHECK5-NEXT: [[ARRAYIDX39:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM38]]
// CHECK5-NEXT: store float 0.000000e+00, float* [[ARRAYIDX39]], align 4, !llvm.access.group !13
// CHECK5-NEXT: [[TMP31:%.*]] = load i32, i32* [[I31]], align 4, !llvm.access.group !13
// CHECK5-NEXT: [[IDXPROM40:%.*]] = sext i32 [[TMP31]] to i64
// CHECK5-NEXT: [[ARRAYIDX41:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM40]]
// CHECK5-NEXT: store float 1.000000e+00, float* [[ARRAYIDX41]], align 4, !llvm.access.group !13
// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE42:%.*]]
// CHECK5: omp.body.continue42:
// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC43:%.*]]
// CHECK5: omp.inner.for.inc43:
// CHECK5-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV30]], align 4, !llvm.access.group !13
// CHECK5-NEXT: [[ADD44:%.*]] = add i32 [[TMP32]], 1
// CHECK5-NEXT: store i32 [[ADD44]], i32* [[DOTOMP_IV30]], align 4, !llvm.access.group !13
// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND32]], !llvm.loop [[LOOP14:![0-9]+]]
// CHECK5: omp.inner.for.end45:
// CHECK5-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK5-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_20]], align 4
// CHECK5-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK5-NEXT: [[SUB46:%.*]] = sub i32 [[TMP34]], [[TMP35]]
// CHECK5-NEXT: [[SUB47:%.*]] = sub i32 [[SUB46]], 1
// CHECK5-NEXT: [[ADD48:%.*]] = add i32 [[SUB47]], 1
// CHECK5-NEXT: [[DIV49:%.*]] = udiv i32 [[ADD48]], 1
// CHECK5-NEXT: [[MUL50:%.*]] = mul i32 [[DIV49]], 1
// CHECK5-NEXT: [[ADD51:%.*]] = add i32 [[TMP33]], [[MUL50]]
// CHECK5-NEXT: store i32 [[ADD51]], i32* [[I31]], align 4
// CHECK5-NEXT: br label [[SIMD_IF_END52]]
// CHECK5: simd.if.end52:
// CHECK5-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@_Z18static_not_chunkedPfS_S_S_
// CHECK6-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK6-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK6-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK6-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK6-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK6-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK6-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK6-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK6-NEXT: store i32 32000000, i32* [[I]], align 4
// CHECK6-NEXT: br label [[FOR_COND:%.*]]
// CHECK6: for.cond:
// CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[I]], align 4
// CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP0]], 33
// CHECK6-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// CHECK6: for.body:
// CHECK6-NEXT: [[TMP1:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[I]], align 4
// CHECK6-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP2]] to i64
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 [[IDXPROM]]
// CHECK6-NEXT: [[TMP3:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK6-NEXT: [[TMP4:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[I]], align 4
// CHECK6-NEXT: [[IDXPROM1:%.*]] = sext i32 [[TMP5]] to i64
// CHECK6-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP4]], i64 [[IDXPROM1]]
// CHECK6-NEXT: [[TMP6:%.*]] = load float, float* [[ARRAYIDX2]], align 4
// CHECK6-NEXT: [[MUL:%.*]] = fmul float [[TMP3]], [[TMP6]]
// CHECK6-NEXT: [[TMP7:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[I]], align 4
// CHECK6-NEXT: [[IDXPROM3:%.*]] = sext i32 [[TMP8]] to i64
// CHECK6-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP7]], i64 [[IDXPROM3]]
// CHECK6-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX4]], align 4
// CHECK6-NEXT: [[MUL5:%.*]] = fmul float [[MUL]], [[TMP9]]
// CHECK6-NEXT: [[TMP10:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK6-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4
// CHECK6-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP11]] to i64
// CHECK6-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP10]], i64 [[IDXPROM6]]
// CHECK6-NEXT: store float [[MUL5]], float* [[ARRAYIDX7]], align 4
// CHECK6-NEXT: br label [[FOR_INC:%.*]]
// CHECK6: for.inc:
// CHECK6-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4
// CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], -7
// CHECK6-NEXT: store i32 [[ADD]], i32* [[I]], align 4
// CHECK6-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP2:![0-9]+]]
// CHECK6: for.end:
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@_Z8dynamic1PfS_S_S_
// CHECK6-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK6-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK6-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK6-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK6-NEXT: [[I:%.*]] = alloca i64, align 8
// CHECK6-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK6-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK6-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK6-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK6-NEXT: store i64 131071, i64* [[I]], align 8
// CHECK6-NEXT: br label [[FOR_COND:%.*]]
// CHECK6: for.cond:
// CHECK6-NEXT: [[TMP0:%.*]] = load i64, i64* [[I]], align 8
// CHECK6-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP0]], 2147483647
// CHECK6-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// CHECK6: for.body:
// CHECK6-NEXT: [[TMP1:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK6-NEXT: [[TMP2:%.*]] = load i64, i64* [[I]], align 8
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP1]], i64 [[TMP2]]
// CHECK6-NEXT: [[TMP3:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK6-NEXT: [[TMP4:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK6-NEXT: [[TMP5:%.*]] = load i64, i64* [[I]], align 8
// CHECK6-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, float* [[TMP4]], i64 [[TMP5]]
// CHECK6-NEXT: [[TMP6:%.*]] = load float, float* [[ARRAYIDX1]], align 4
// CHECK6-NEXT: [[MUL:%.*]] = fmul float [[TMP3]], [[TMP6]]
// CHECK6-NEXT: [[TMP7:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK6-NEXT: [[TMP8:%.*]] = load i64, i64* [[I]], align 8
// CHECK6-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[TMP7]], i64 [[TMP8]]
// CHECK6-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX2]], align 4
// CHECK6-NEXT: [[MUL3:%.*]] = fmul float [[MUL]], [[TMP9]]
// CHECK6-NEXT: [[TMP10:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK6-NEXT: [[TMP11:%.*]] = load i64, i64* [[I]], align 8
// CHECK6-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[TMP10]], i64 [[TMP11]]
// CHECK6-NEXT: store float [[MUL3]], float* [[ARRAYIDX4]], align 4
// CHECK6-NEXT: br label [[FOR_INC:%.*]]
// CHECK6: for.inc:
// CHECK6-NEXT: [[TMP12:%.*]] = load i64, i64* [[I]], align 8
// CHECK6-NEXT: [[ADD:%.*]] = add i64 [[TMP12]], 127
// CHECK6-NEXT: store i64 [[ADD]], i64* [[I]], align 8
// CHECK6-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
// CHECK6: for.end:
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@_Z9test_autoPfS_S_S_
// CHECK6-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK6-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK6-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK6-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK6-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[Y:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[I:%.*]] = alloca i8, align 1
// CHECK6-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK6-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK6-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK6-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK6-NEXT: store i32 0, i32* [[X]], align 4
// CHECK6-NEXT: store i32 0, i32* [[Y]], align 4
// CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[Y]], align 4
// CHECK6-NEXT: [[CONV:%.*]] = trunc i32 [[TMP0]] to i8
// CHECK6-NEXT: store i8 [[CONV]], i8* [[I]], align 1
// CHECK6-NEXT: br label [[FOR_COND:%.*]]
// CHECK6: for.cond:
// CHECK6-NEXT: [[TMP1:%.*]] = load i8, i8* [[I]], align 1
// CHECK6-NEXT: [[CONV1:%.*]] = sext i8 [[TMP1]] to i32
// CHECK6-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV1]], 57
// CHECK6-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END13:%.*]]
// CHECK6: for.body:
// CHECK6-NEXT: store i32 11, i32* [[X]], align 4
// CHECK6-NEXT: br label [[FOR_COND2:%.*]]
// CHECK6: for.cond2:
// CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[X]], align 4
// CHECK6-NEXT: [[CMP3:%.*]] = icmp ugt i32 [[TMP2]], 0
// CHECK6-NEXT: br i1 [[CMP3]], label [[FOR_BODY4:%.*]], label [[FOR_END:%.*]]
// CHECK6: for.body4:
// CHECK6-NEXT: [[TMP3:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK6-NEXT: [[TMP4:%.*]] = load i8, i8* [[I]], align 1
// CHECK6-NEXT: [[IDXPROM:%.*]] = sext i8 [[TMP4]] to i64
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP3]], i64 [[IDXPROM]]
// CHECK6-NEXT: [[TMP5:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK6-NEXT: [[TMP6:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK6-NEXT: [[TMP7:%.*]] = load i8, i8* [[I]], align 1
// CHECK6-NEXT: [[IDXPROM5:%.*]] = sext i8 [[TMP7]] to i64
// CHECK6-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP6]], i64 [[IDXPROM5]]
// CHECK6-NEXT: [[TMP8:%.*]] = load float, float* [[ARRAYIDX6]], align 4
// CHECK6-NEXT: [[MUL:%.*]] = fmul float [[TMP5]], [[TMP8]]
// CHECK6-NEXT: [[TMP9:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK6-NEXT: [[TMP10:%.*]] = load i8, i8* [[I]], align 1
// CHECK6-NEXT: [[IDXPROM7:%.*]] = sext i8 [[TMP10]] to i64
// CHECK6-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[TMP9]], i64 [[IDXPROM7]]
// CHECK6-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX8]], align 4
// CHECK6-NEXT: [[MUL9:%.*]] = fmul float [[MUL]], [[TMP11]]
// CHECK6-NEXT: [[TMP12:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK6-NEXT: [[TMP13:%.*]] = load i8, i8* [[I]], align 1
// CHECK6-NEXT: [[IDXPROM10:%.*]] = sext i8 [[TMP13]] to i64
// CHECK6-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[IDXPROM10]]
// CHECK6-NEXT: store float [[MUL9]], float* [[ARRAYIDX11]], align 4
// CHECK6-NEXT: br label [[FOR_INC:%.*]]
// CHECK6: for.inc:
// CHECK6-NEXT: [[TMP14:%.*]] = load i32, i32* [[X]], align 4
// CHECK6-NEXT: [[DEC:%.*]] = add i32 [[TMP14]], -1
// CHECK6-NEXT: store i32 [[DEC]], i32* [[X]], align 4
// CHECK6-NEXT: br label [[FOR_COND2]], !llvm.loop [[LOOP5:![0-9]+]]
// CHECK6: for.end:
// CHECK6-NEXT: br label [[FOR_INC12:%.*]]
// CHECK6: for.inc12:
// CHECK6-NEXT: [[TMP15:%.*]] = load i8, i8* [[I]], align 1
// CHECK6-NEXT: [[INC:%.*]] = add i8 [[TMP15]], 1
// CHECK6-NEXT: store i8 [[INC]], i8* [[I]], align 1
// CHECK6-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]]
// CHECK6: for.end13:
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@_Z7runtimePfS_S_S_
// CHECK6-SAME: (float* [[A:%.*]], float* [[B:%.*]], float* [[C:%.*]], float* [[D:%.*]]) #[[ATTR0]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK6-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK6-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK6-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK6-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[I:%.*]] = alloca i8, align 1
// CHECK6-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK6-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK6-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK6-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK6-NEXT: store i32 0, i32* [[X]], align 4
// CHECK6-NEXT: store i8 48, i8* [[I]], align 1
// CHECK6-NEXT: br label [[FOR_COND:%.*]]
// CHECK6: for.cond:
// CHECK6-NEXT: [[TMP0:%.*]] = load i8, i8* [[I]], align 1
// CHECK6-NEXT: [[CONV:%.*]] = zext i8 [[TMP0]] to i32
// CHECK6-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV]], 57
// CHECK6-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END13:%.*]]
// CHECK6: for.body:
// CHECK6-NEXT: store i32 -10, i32* [[X]], align 4
// CHECK6-NEXT: br label [[FOR_COND1:%.*]]
// CHECK6: for.cond1:
// CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[X]], align 4
// CHECK6-NEXT: [[CMP2:%.*]] = icmp slt i32 [[TMP1]], 10
// CHECK6-NEXT: br i1 [[CMP2]], label [[FOR_BODY3:%.*]], label [[FOR_END:%.*]]
// CHECK6: for.body3:
// CHECK6-NEXT: [[TMP2:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK6-NEXT: [[TMP3:%.*]] = load i8, i8* [[I]], align 1
// CHECK6-NEXT: [[IDXPROM:%.*]] = zext i8 [[TMP3]] to i64
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 [[IDXPROM]]
// CHECK6-NEXT: [[TMP4:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK6-NEXT: [[TMP5:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK6-NEXT: [[TMP6:%.*]] = load i8, i8* [[I]], align 1
// CHECK6-NEXT: [[IDXPROM4:%.*]] = zext i8 [[TMP6]] to i64
// CHECK6-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, float* [[TMP5]], i64 [[IDXPROM4]]
// CHECK6-NEXT: [[TMP7:%.*]] = load float, float* [[ARRAYIDX5]], align 4
// CHECK6-NEXT: [[MUL:%.*]] = fmul float [[TMP4]], [[TMP7]]
// CHECK6-NEXT: [[TMP8:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK6-NEXT: [[TMP9:%.*]] = load i8, i8* [[I]], align 1
// CHECK6-NEXT: [[IDXPROM6:%.*]] = zext i8 [[TMP9]] to i64
// CHECK6-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[TMP8]], i64 [[IDXPROM6]]
// CHECK6-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX7]], align 4
// CHECK6-NEXT: [[MUL8:%.*]] = fmul float [[MUL]], [[TMP10]]
// CHECK6-NEXT: [[TMP11:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK6-NEXT: [[TMP12:%.*]] = load i8, i8* [[I]], align 1
// CHECK6-NEXT: [[IDXPROM9:%.*]] = zext i8 [[TMP12]] to i64
// CHECK6-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP11]], i64 [[IDXPROM9]]
// CHECK6-NEXT: store float [[MUL8]], float* [[ARRAYIDX10]], align 4
// CHECK6-NEXT: br label [[FOR_INC:%.*]]
// CHECK6: for.inc:
// CHECK6-NEXT: [[TMP13:%.*]] = load i32, i32* [[X]], align 4
// CHECK6-NEXT: [[INC:%.*]] = add nsw i32 [[TMP13]], 1
// CHECK6-NEXT: store i32 [[INC]], i32* [[X]], align 4
// CHECK6-NEXT: br label [[FOR_COND1]], !llvm.loop [[LOOP7:![0-9]+]]
// CHECK6: for.end:
// CHECK6-NEXT: br label [[FOR_INC11:%.*]]
// CHECK6: for.inc11:
// CHECK6-NEXT: [[TMP14:%.*]] = load i8, i8* [[I]], align 1
// CHECK6-NEXT: [[INC12:%.*]] = add i8 [[TMP14]], 1
// CHECK6-NEXT: store i8 [[INC12]], i8* [[I]], align 1
// CHECK6-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]]
// CHECK6: for.end13:
// CHECK6-NEXT: ret void
//
//
// CHECK6-LABEL: define {{[^@]+}}@_Z8foo_simdii
// CHECK6-SAME: (i32 [[LOW:%.*]], i32 [[UP:%.*]]) #[[ATTR0]] {
// CHECK6-NEXT: entry:
// CHECK6-NEXT: [[LOW_ADDR:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[UP_ADDR:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[I5:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[_TMP18:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTCAPTURE_EXPR_20:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[I27:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[DOTOMP_IV30:%.*]] = alloca i32, align 4
// CHECK6-NEXT: [[I31:%.*]] = alloca i32, align 4
// CHECK6-NEXT: store i32 [[LOW]], i32* [[LOW_ADDR]], align 4
// CHECK6-NEXT: store i32 [[UP]], i32* [[UP_ADDR]], align 4
// CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[LOW_ADDR]], align 4
// CHECK6-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[UP_ADDR]], align 4
// CHECK6-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK6-NEXT: [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]]
// CHECK6-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1
// CHECK6-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1
// CHECK6-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1
// CHECK6-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1
// CHECK6-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_2]], align 4
// CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK6-NEXT: store i32 [[TMP4]], i32* [[I]], align 4
// CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK6-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP5]], [[TMP6]]
// CHECK6-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
// CHECK6: simd.if.then:
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_IV]], align 4
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK6: omp.inner.for.cond:
// CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9
// CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !9
// CHECK6-NEXT: [[ADD6:%.*]] = add i32 [[TMP8]], 1
// CHECK6-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TMP7]], [[ADD6]]
// CHECK6-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK6: omp.inner.for.body:
// CHECK6-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !9
// CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9
// CHECK6-NEXT: [[MUL:%.*]] = mul i32 [[TMP10]], 1
// CHECK6-NEXT: [[ADD8:%.*]] = add i32 [[TMP9]], [[MUL]]
// CHECK6-NEXT: store i32 [[ADD8]], i32* [[I5]], align 4, !llvm.access.group !9
// CHECK6-NEXT: [[TMP11:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !9
// CHECK6-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM]]
// CHECK6-NEXT: store float 0.000000e+00, float* [[ARRAYIDX]], align 4, !llvm.access.group !9
// CHECK6-NEXT: [[TMP12:%.*]] = load i32, i32* [[I5]], align 4, !llvm.access.group !9
// CHECK6-NEXT: [[IDXPROM9:%.*]] = sext i32 [[TMP12]] to i64
// CHECK6-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM9]]
// CHECK6-NEXT: store float 1.000000e+00, float* [[ARRAYIDX10]], align 4, !llvm.access.group !9
// CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK6: omp.body.continue:
// CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK6: omp.inner.for.inc:
// CHECK6-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9
// CHECK6-NEXT: [[ADD11:%.*]] = add i32 [[TMP13]], 1
// CHECK6-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
// CHECK6: omp.inner.for.end:
// CHECK6-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK6-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
// CHECK6-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
// CHECK6-NEXT: [[SUB12:%.*]] = sub i32 [[TMP15]], [[TMP16]]
// CHECK6-NEXT: [[SUB13:%.*]] = sub i32 [[SUB12]], 1
// CHECK6-NEXT: [[ADD14:%.*]] = add i32 [[SUB13]], 1
// CHECK6-NEXT: [[DIV15:%.*]] = udiv i32 [[ADD14]], 1
// CHECK6-NEXT: [[MUL16:%.*]] = mul i32 [[DIV15]], 1
// CHECK6-NEXT: [[ADD17:%.*]] = add i32 [[TMP14]], [[MUL16]]
// CHECK6-NEXT: store i32 [[ADD17]], i32* [[I5]], align 4
// CHECK6-NEXT: br label [[SIMD_IF_END]]
// CHECK6: simd.if.end:
// CHECK6-NEXT: [[TMP17:%.*]] = load i32, i32* [[LOW_ADDR]], align 4
// CHECK6-NEXT: store i32 [[TMP17]], i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK6-NEXT: [[TMP18:%.*]] = load i32, i32* [[UP_ADDR]], align 4
// CHECK6-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR_20]], align 4
// CHECK6-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_20]], align 4
// CHECK6-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK6-NEXT: [[SUB22:%.*]] = sub i32 [[TMP19]], [[TMP20]]
// CHECK6-NEXT: [[SUB23:%.*]] = sub i32 [[SUB22]], 1
// CHECK6-NEXT: [[ADD24:%.*]] = add i32 [[SUB23]], 1
// CHECK6-NEXT: [[DIV25:%.*]] = udiv i32 [[ADD24]], 1
// CHECK6-NEXT: [[SUB26:%.*]] = sub i32 [[DIV25]], 1
// CHECK6-NEXT: store i32 [[SUB26]], i32* [[DOTCAPTURE_EXPR_21]], align 4
// CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
// CHECK6-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4
// CHECK6-NEXT: store i32 [[TMP21]], i32* [[DOTOMP_UB]], align 4
// CHECK6-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK6-NEXT: store i32 [[TMP22]], i32* [[I27]], align 4
// CHECK6-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK6-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_20]], align 4
// CHECK6-NEXT: [[CMP28:%.*]] = icmp slt i32 [[TMP23]], [[TMP24]]
// CHECK6-NEXT: br i1 [[CMP28]], label [[SIMD_IF_THEN29:%.*]], label [[SIMD_IF_END52:%.*]]
// CHECK6: simd.if.then29:
// CHECK6-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
// CHECK6-NEXT: store i32 [[TMP25]], i32* [[DOTOMP_IV30]], align 4
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND32:%.*]]
// CHECK6: omp.inner.for.cond32:
// CHECK6-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_IV30]], align 4, !llvm.access.group !13
// CHECK6-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !13
// CHECK6-NEXT: [[ADD33:%.*]] = add i32 [[TMP27]], 1
// CHECK6-NEXT: [[CMP34:%.*]] = icmp ult i32 [[TMP26]], [[ADD33]]
// CHECK6-NEXT: br i1 [[CMP34]], label [[OMP_INNER_FOR_BODY35:%.*]], label [[OMP_INNER_FOR_END45:%.*]]
// CHECK6: omp.inner.for.body35:
// CHECK6-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4, !llvm.access.group !13
// CHECK6-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV30]], align 4, !llvm.access.group !13
// CHECK6-NEXT: [[MUL36:%.*]] = mul i32 [[TMP29]], 1
// CHECK6-NEXT: [[ADD37:%.*]] = add i32 [[TMP28]], [[MUL36]]
// CHECK6-NEXT: store i32 [[ADD37]], i32* [[I31]], align 4, !llvm.access.group !13
// CHECK6-NEXT: [[TMP30:%.*]] = load i32, i32* [[I31]], align 4, !llvm.access.group !13
// CHECK6-NEXT: [[IDXPROM38:%.*]] = sext i32 [[TMP30]] to i64
// CHECK6-NEXT: [[ARRAYIDX39:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM38]]
// CHECK6-NEXT: store float 0.000000e+00, float* [[ARRAYIDX39]], align 4, !llvm.access.group !13
// CHECK6-NEXT: [[TMP31:%.*]] = load i32, i32* [[I31]], align 4, !llvm.access.group !13
// CHECK6-NEXT: [[IDXPROM40:%.*]] = sext i32 [[TMP31]] to i64
// CHECK6-NEXT: [[ARRAYIDX41:%.*]] = getelementptr inbounds [10 x float], [10 x float]* @f, i64 0, i64 [[IDXPROM40]]
// CHECK6-NEXT: store float 1.000000e+00, float* [[ARRAYIDX41]], align 4, !llvm.access.group !13
// CHECK6-NEXT: br label [[OMP_BODY_CONTINUE42:%.*]]
// CHECK6: omp.body.continue42:
// CHECK6-NEXT: br label [[OMP_INNER_FOR_INC43:%.*]]
// CHECK6: omp.inner.for.inc43:
// CHECK6-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV30]], align 4, !llvm.access.group !13
// CHECK6-NEXT: [[ADD44:%.*]] = add i32 [[TMP32]], 1
// CHECK6-NEXT: store i32 [[ADD44]], i32* [[DOTOMP_IV30]], align 4, !llvm.access.group !13
// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND32]], !llvm.loop [[LOOP14:![0-9]+]]
// CHECK6: omp.inner.for.end45:
// CHECK6-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK6-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_20]], align 4
// CHECK6-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_19]], align 4
// CHECK6-NEXT: [[SUB46:%.*]] = sub i32 [[TMP34]], [[TMP35]]
// CHECK6-NEXT: [[SUB47:%.*]] = sub i32 [[SUB46]], 1
// CHECK6-NEXT: [[ADD48:%.*]] = add i32 [[SUB47]], 1
// CHECK6-NEXT: [[DIV49:%.*]] = udiv i32 [[ADD48]], 1
// CHECK6-NEXT: [[MUL50:%.*]] = mul i32 [[DIV49]], 1
// CHECK6-NEXT: [[ADD51:%.*]] = add i32 [[TMP33]], [[MUL50]]
// CHECK6-NEXT: store i32 [[ADD51]], i32* [[I31]], align 4
// CHECK6-NEXT: br label [[SIMD_IF_END52]]
// CHECK6: simd.if.end52:
// CHECK6-NEXT: ret void
//