summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--compiler/dex/local_value_numbering.h2
-rw-r--r--compiler/dex/mir_field_info.h4
-rw-r--r--compiler/dex/mir_graph.h2
-rw-r--r--compiler/dex/mir_method_info.h12
-rw-r--r--compiler/dex/mir_optimization.cc18
-rw-r--r--compiler/dex/quick/arm/target_arm.cc4
-rw-r--r--compiler/dex/quick/arm64/assemble_arm64.cc4
-rw-r--r--compiler/dex/quick/codegen_util.cc4
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.cc80
-rw-r--r--compiler/dex/quick/mir_to_lir.h22
-rw-r--r--compiler/dex/quick/quick_compiler.cc27
-rw-r--r--compiler/dex/quick/resource_mask.cc46
-rw-r--r--compiler/jni/quick/arm/calling_convention_arm.cc4
-rw-r--r--compiler/oat_writer.h4
-rw-r--r--compiler/optimizing/locations.h20
-rw-r--r--compiler/utils/arena_allocator.cc2
16 files changed, 130 insertions, 125 deletions
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index dd8d2db8f4..7c3b7d87ce 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -353,7 +353,7 @@ class LocalValueNumbering {
GlobalValueNumbering* gvn_;
// We're using the block id as a 16-bit operand value for some lookups.
- COMPILE_ASSERT(sizeof(BasicBlockId) == sizeof(uint16_t), BasicBlockId_must_be_16_bit);
+ static_assert(sizeof(BasicBlockId) == sizeof(uint16_t), "BasicBlockId must be 16 bit");
BasicBlockId id_;
SregValueMap sreg_value_map_;
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index c6281827cb..e97f7a00f5 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -130,7 +130,7 @@ class MirIFieldLoweringInfo : public MirFieldInfo {
kBitFastPut,
kIFieldLoweringInfoBitEnd
};
- COMPILE_ASSERT(kIFieldLoweringInfoBitEnd <= 16, too_many_flags);
+ static_assert(kIFieldLoweringInfoBitEnd <= 16, "Too many flags");
static constexpr uint16_t kFlagFastGet = 1u << kBitFastGet;
static constexpr uint16_t kFlagFastPut = 1u << kBitFastPut;
@@ -198,7 +198,7 @@ class MirSFieldLoweringInfo : public MirFieldInfo {
kBitClassIsInDexCache,
kSFieldLoweringInfoBitEnd
};
- COMPILE_ASSERT(kSFieldLoweringInfoBitEnd <= 16, too_many_flags);
+ static_assert(kSFieldLoweringInfoBitEnd <= 16, "Too many flags");
static constexpr uint16_t kFlagFastGet = 1u << kBitFastGet;
static constexpr uint16_t kFlagFastPut = 1u << kBitFastPut;
static constexpr uint16_t kFlagIsReferrersClass = 1u << kBitIsReferrersClass;
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index fd4c473444..cb1183becc 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -1266,7 +1266,7 @@ class MIRGraph {
ArenaVector<BasicBlockId> dom_post_order_traversal_;
ArenaVector<BasicBlockId> topological_order_;
// Indexes in topological_order_ need to be only as big as the BasicBlockId.
- COMPILE_ASSERT(sizeof(BasicBlockId) == sizeof(uint16_t), assuming_16_bit_BasicBlockId);
+ static_assert(sizeof(BasicBlockId) == sizeof(uint16_t), "Assuming 16 bit BasicBlockId");
// For each loop head, remember the past-the-end index of the end of the loop. 0 if not loop head.
ArenaVector<uint16_t> topological_order_loop_ends_;
// Map BB ids to topological_order_ indexes. 0xffff if not included (hidden or null block).
diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h
index e64b028a9e..08fb103cf6 100644
--- a/compiler/dex/mir_method_info.h
+++ b/compiler/dex/mir_method_info.h
@@ -60,7 +60,7 @@ class MirMethodInfo {
kBitIsStatic = 0,
kMethodInfoBitEnd
};
- COMPILE_ASSERT(kMethodInfoBitEnd <= 16, too_many_flags);
+ static_assert(kMethodInfoBitEnd <= 16, "Too many flags");
static constexpr uint16_t kFlagIsStatic = 1u << kBitIsStatic;
MirMethodInfo(uint16_t method_idx, uint16_t flags)
@@ -170,15 +170,15 @@ class MirMethodLoweringInfo : public MirMethodInfo {
kBitClassIsInitialized,
kMethodLoweringInfoBitEnd
};
- COMPILE_ASSERT(kMethodLoweringInfoBitEnd <= 16, too_many_flags);
+ static_assert(kMethodLoweringInfoBitEnd <= 16, "Too many flags");
static constexpr uint16_t kFlagFastPath = 1u << kBitFastPath;
static constexpr uint16_t kFlagIsReferrersClass = 1u << kBitIsReferrersClass;
static constexpr uint16_t kFlagClassIsInitialized = 1u << kBitClassIsInitialized;
static constexpr uint16_t kInvokeTypeMask = 7u;
- COMPILE_ASSERT((1u << (kBitInvokeTypeEnd - kBitInvokeTypeBegin)) - 1u == kInvokeTypeMask,
- assert_invoke_type_bits_ok);
- COMPILE_ASSERT((1u << (kBitSharpTypeEnd - kBitSharpTypeBegin)) - 1u == kInvokeTypeMask,
- assert_sharp_type_bits_ok);
+ static_assert((1u << (kBitInvokeTypeEnd - kBitInvokeTypeBegin)) - 1u == kInvokeTypeMask,
+ "assert invoke type bits failed");
+ static_assert((1u << (kBitSharpTypeEnd - kBitSharpTypeBegin)) - 1u == kInvokeTypeMask,
+ "assert sharp type bits failed");
uintptr_t direct_code_;
uintptr_t direct_method_;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 8e583ccfff..846dbb79ba 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -215,8 +215,8 @@ static constexpr ConditionCode kIfCcZConditionCodes[] = {
kCondEq, kCondNe, kCondLt, kCondGe, kCondGt, kCondLe
};
-COMPILE_ASSERT(arraysize(kIfCcZConditionCodes) == Instruction::IF_LEZ - Instruction::IF_EQZ + 1,
- if_ccz_ccodes_size1);
+static_assert(arraysize(kIfCcZConditionCodes) == Instruction::IF_LEZ - Instruction::IF_EQZ + 1,
+ "if_ccz_ccodes_size1");
static constexpr bool IsInstructionIfCcZ(Instruction::Code opcode) {
return Instruction::IF_EQZ <= opcode && opcode <= Instruction::IF_LEZ;
@@ -226,12 +226,12 @@ static constexpr ConditionCode ConditionCodeForIfCcZ(Instruction::Code opcode) {
return kIfCcZConditionCodes[opcode - Instruction::IF_EQZ];
}
-COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_EQZ) == kCondEq, check_if_eqz_ccode);
-COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_NEZ) == kCondNe, check_if_nez_ccode);
-COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_LTZ) == kCondLt, check_if_ltz_ccode);
-COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_GEZ) == kCondGe, check_if_gez_ccode);
-COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_GTZ) == kCondGt, check_if_gtz_ccode);
-COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_LEZ) == kCondLe, check_if_lez_ccode);
+static_assert(ConditionCodeForIfCcZ(Instruction::IF_EQZ) == kCondEq, "if_eqz ccode");
+static_assert(ConditionCodeForIfCcZ(Instruction::IF_NEZ) == kCondNe, "if_nez ccode");
+static_assert(ConditionCodeForIfCcZ(Instruction::IF_LTZ) == kCondLt, "if_ltz ccode");
+static_assert(ConditionCodeForIfCcZ(Instruction::IF_GEZ) == kCondGe, "if_gez ccode");
+static_assert(ConditionCodeForIfCcZ(Instruction::IF_GTZ) == kCondGt, "if_gtz ccode");
+static_assert(ConditionCodeForIfCcZ(Instruction::IF_LEZ) == kCondLe, "if_lez ccode");
int MIRGraph::GetSSAUseCount(int s_reg) {
DCHECK_LT(static_cast<size_t>(s_reg), ssa_subscripts_.size());
@@ -1087,7 +1087,7 @@ void MIRGraph::EliminateNullChecksEnd() {
for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
constexpr int kMarkToIgnoreNullCheckShift = kMIRMark - kMIRIgnoreNullCheck;
- COMPILE_ASSERT(kMarkToIgnoreNullCheckShift > 0, check_valid_shift_right);
+ static_assert(kMarkToIgnoreNullCheckShift > 0, "Not a valid right-shift");
uint16_t mirMarkAdjustedToIgnoreNullCheck =
(mir->optimization_flags & MIR_MARK) >> kMarkToIgnoreNullCheckShift;
mir->optimization_flags |= mirMarkAdjustedToIgnoreNullCheck;
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 7100a285a6..d6434d9a62 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -904,8 +904,8 @@ RegStorage ArmMir2Lir::InToRegStorageArmMapper::GetNextReg(bool is_double_or_flo
const RegStorage fpArgMappingToPhysicalReg[] =
{rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15};
- const uint32_t fpArgMappingToPhysicalRegSize = arraysize(fpArgMappingToPhysicalReg);
- COMPILE_ASSERT(fpArgMappingToPhysicalRegSize % 2 == 0, knum_of_fp_arg_regs_not_even);
+ constexpr uint32_t fpArgMappingToPhysicalRegSize = arraysize(fpArgMappingToPhysicalReg);
+ static_assert(fpArgMappingToPhysicalRegSize % 2 == 0, "Number of FP Arg regs is not even");
if (kArm32QuickCodeUseSoftFloat) {
is_double_or_float = false; // Regard double as long, float as int.
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index e2ff090293..85f502ced6 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -779,8 +779,8 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
// and zr. This means that these two registers do not need any special treatment, as
// their bottom 5 bits are correctly set to 31 == 0b11111, which is the right
// value for encoding both sp and zr.
- COMPILE_ASSERT((rxzr & 0x1f) == 0x1f, rzr_register_number_must_be_31);
- COMPILE_ASSERT((rsp & 0x1f) == 0x1f, rsp_register_number_must_be_31);
+ static_assert((rxzr & 0x1f) == 0x1f, "rzr register number must be 31");
+ static_assert((rsp & 0x1f) == 0x1f, "rsp register number must be 31");
}
value = (operand << encoder->field_loc[i].start) &
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 80a1ac4c52..d594196aff 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -768,8 +768,8 @@ void Mir2Lir::CreateNativeGcMap() {
int Mir2Lir::AssignLiteralOffset(CodeOffset offset) {
offset = AssignLiteralOffsetCommon(literal_list_, offset);
constexpr unsigned int ptr_size = sizeof(uint32_t);
- COMPILE_ASSERT(ptr_size >= sizeof(mirror::HeapReference<mirror::Object>),
- ptr_size_cannot_hold_a_heap_reference);
+ static_assert(ptr_size >= sizeof(mirror::HeapReference<mirror::Object>),
+ "Pointer size cannot hold a heap reference");
offset = AssignLiteralPointerOffsetCommon(code_literal_list_, offset, ptr_size);
offset = AssignLiteralPointerOffsetCommon(method_literal_list_, offset, ptr_size);
offset = AssignLiteralPointerOffsetCommon(class_literal_list_, offset, ptr_size);
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 0f1d765d8c..0c83a0ad75 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -68,38 +68,39 @@ static constexpr bool kIntrinsicIsStatic[] = {
false, // kIntrinsicUnsafePut
true, // kIntrinsicSystemArrayCopyCharArray
};
-COMPILE_ASSERT(arraysize(kIntrinsicIsStatic) == kInlineOpNop, check_arraysize_kIntrinsicIsStatic);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicDoubleCvt], DoubleCvt_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicFloatCvt], FloatCvt_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicReverseBits], ReverseBits_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicReverseBytes], ReverseBytes_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicAbsInt], AbsInt_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicAbsLong], AbsLong_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicAbsFloat], AbsFloat_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicAbsDouble], AbsDouble_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxInt], MinMaxInt_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxLong], MinMaxLong_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxFloat], MinMaxFloat_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxDouble], MinMaxDouble_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicSqrt], Sqrt_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicCeil], Ceil_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicFloor], Floor_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRint], Rint_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRoundFloat], RoundFloat_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRoundDouble], RoundDouble_must_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicReferenceGetReferent], Get_must_not_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCharAt], CharAt_must_not_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCompareTo], CompareTo_must_not_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicIsEmptyOrLength], IsEmptyOrLength_must_not_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicIndexOf], IndexOf_must_not_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicCurrentThread], CurrentThread_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicPeek], Peek_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicPoke], Poke_must_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCas], Cas_must_not_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicUnsafeGet], UnsafeGet_must_not_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicUnsafePut], UnsafePut_must_not_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicSystemArrayCopyCharArray],
- SystemArrayCopyCharArray_must_be_static);
+static_assert(arraysize(kIntrinsicIsStatic) == kInlineOpNop,
+ "arraysize of kIntrinsicIsStatic unexpected");
+static_assert(kIntrinsicIsStatic[kIntrinsicDoubleCvt], "DoubleCvt must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicFloatCvt], "FloatCvt must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicReverseBits], "ReverseBits must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicReverseBytes], "ReverseBytes must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicAbsInt], "AbsInt must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicAbsLong], "AbsLong must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicAbsFloat], "AbsFloat must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicAbsDouble], "AbsDouble must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicMinMaxInt], "MinMaxInt must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicMinMaxLong], "MinMaxLong_must_be_static");
+static_assert(kIntrinsicIsStatic[kIntrinsicMinMaxFloat], "MinMaxFloat_must_be_static");
+static_assert(kIntrinsicIsStatic[kIntrinsicMinMaxDouble], "MinMaxDouble_must_be_static");
+static_assert(kIntrinsicIsStatic[kIntrinsicSqrt], "Sqrt must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicCeil], "Ceil must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicFloor], "Floor must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicRint], "Rint must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicRoundFloat], "RoundFloat must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicRoundDouble], "RoundDouble must be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicReferenceGetReferent], "Get must not be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicCharAt], "CharAt must not be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicCompareTo], "CompareTo must not be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicIsEmptyOrLength], "IsEmptyOrLength must not be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicIndexOf], "IndexOf must not be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicCurrentThread], "CurrentThread must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicPeek], "Peek must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicPoke], "Poke must be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicCas], "Cas must not be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeGet], "UnsafeGet_must_not_be_static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafePut], "UnsafePut must not be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicSystemArrayCopyCharArray],
+ "SystemArrayCopyCharArray must be static");
MIR* AllocReplacementMIR(MIRGraph* mir_graph, MIR* invoke, MIR* move_return) {
MIR* insn = mir_graph->NewMIR();
@@ -395,12 +396,15 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods
DexFileMethodInliner::DexFileMethodInliner()
: lock_("DexFileMethodInliner lock", kDexFileMethodInlinerLock),
dex_file_(NULL) {
- COMPILE_ASSERT(kClassCacheFirst == 0, kClassCacheFirst_not_0);
- COMPILE_ASSERT(arraysize(kClassCacheNames) == kClassCacheLast, bad_arraysize_kClassCacheNames);
- COMPILE_ASSERT(kNameCacheFirst == 0, kNameCacheFirst_not_0);
- COMPILE_ASSERT(arraysize(kNameCacheNames) == kNameCacheLast, bad_arraysize_kNameCacheNames);
- COMPILE_ASSERT(kProtoCacheFirst == 0, kProtoCacheFirst_not_0);
- COMPILE_ASSERT(arraysize(kProtoCacheDefs) == kProtoCacheLast, bad_arraysize_kProtoCacheNames);
+ static_assert(kClassCacheFirst == 0, "kClassCacheFirst not 0");
+ static_assert(arraysize(kClassCacheNames) == kClassCacheLast,
+ "bad arraysize for kClassCacheNames");
+ static_assert(kNameCacheFirst == 0, "kNameCacheFirst not 0");
+ static_assert(arraysize(kNameCacheNames) == kNameCacheLast,
+ "bad arraysize for kNameCacheNames");
+ static_assert(kProtoCacheFirst == 0, "kProtoCacheFirst not 0");
+ static_assert(arraysize(kProtoCacheDefs) == kProtoCacheLast,
+ "bad arraysize kProtoCacheNames");
}
DexFileMethodInliner::~DexFileMethodInliner() {
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index f4e6dfead2..98dfaac590 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -1192,17 +1192,17 @@ class Mir2Lir : public Backend {
virtual RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) {
if (wide_kind == kWide) {
DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 <= reg && reg < kFArg15) || (kRet0 == reg));
- COMPILE_ASSERT((kArg1 == kArg0 + 1) && (kArg2 == kArg1 + 1) && (kArg3 == kArg2 + 1) &&
- (kArg4 == kArg3 + 1) && (kArg5 == kArg4 + 1) && (kArg6 == kArg5 + 1) &&
- (kArg7 == kArg6 + 1), kargs_range_unexpected);
- COMPILE_ASSERT((kFArg1 == kFArg0 + 1) && (kFArg2 == kFArg1 + 1) && (kFArg3 == kFArg2 + 1) &&
- (kFArg4 == kFArg3 + 1) && (kFArg5 == kFArg4 + 1) && (kFArg6 == kFArg5 + 1) &&
- (kFArg7 == kFArg6 + 1) && (kFArg8 == kFArg7 + 1) && (kFArg9 == kFArg8 + 1) &&
- (kFArg10 == kFArg9 + 1) && (kFArg11 == kFArg10 + 1) &&
- (kFArg12 == kFArg11 + 1) && (kFArg13 == kFArg12 + 1) &&
- (kFArg14 == kFArg13 + 1) && (kFArg15 == kFArg14 + 1),
- kfargs_range_unexpected);
- COMPILE_ASSERT(kRet1 == kRet0 + 1, kret_range_unexpected);
+ static_assert((kArg1 == kArg0 + 1) && (kArg2 == kArg1 + 1) && (kArg3 == kArg2 + 1) &&
+ (kArg4 == kArg3 + 1) && (kArg5 == kArg4 + 1) && (kArg6 == kArg5 + 1) &&
+ (kArg7 == kArg6 + 1), "kargs range unexpected");
+ static_assert((kFArg1 == kFArg0 + 1) && (kFArg2 == kFArg1 + 1) && (kFArg3 == kFArg2 + 1) &&
+ (kFArg4 == kFArg3 + 1) && (kFArg5 == kFArg4 + 1) && (kFArg6 == kFArg5 + 1) &&
+ (kFArg7 == kFArg6 + 1) && (kFArg8 == kFArg7 + 1) && (kFArg9 == kFArg8 + 1) &&
+ (kFArg10 == kFArg9 + 1) && (kFArg11 == kFArg10 + 1) &&
+ (kFArg12 == kFArg11 + 1) && (kFArg13 == kFArg12 + 1) &&
+ (kFArg14 == kFArg13 + 1) && (kFArg15 == kFArg14 + 1),
+ "kfargs range unexpected");
+ static_assert(kRet1 == kRet0 + 1, "kret range unexpected");
return RegStorage::MakeRegPair(TargetReg(reg),
TargetReg(static_cast<SpecialTargetRegister>(reg + 1)));
} else {
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 8f7bd3033a..426a2d56ce 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -78,14 +78,14 @@ class QuickCompiler : public Compiler {
DISALLOW_COPY_AND_ASSIGN(QuickCompiler);
};
-COMPILE_ASSERT(0U == static_cast<size_t>(kNone), kNone_not_0);
-COMPILE_ASSERT(1U == static_cast<size_t>(kArm), kArm_not_1);
-COMPILE_ASSERT(2U == static_cast<size_t>(kArm64), kArm64_not_2);
-COMPILE_ASSERT(3U == static_cast<size_t>(kThumb2), kThumb2_not_3);
-COMPILE_ASSERT(4U == static_cast<size_t>(kX86), kX86_not_4);
-COMPILE_ASSERT(5U == static_cast<size_t>(kX86_64), kX86_64_not_5);
-COMPILE_ASSERT(6U == static_cast<size_t>(kMips), kMips_not_6);
-COMPILE_ASSERT(7U == static_cast<size_t>(kMips64), kMips64_not_7);
+static_assert(0U == static_cast<size_t>(kNone), "kNone not 0");
+static_assert(1U == static_cast<size_t>(kArm), "kArm not 1");
+static_assert(2U == static_cast<size_t>(kArm64), "kArm64 not 2");
+static_assert(3U == static_cast<size_t>(kThumb2), "kThumb2 not 3");
+static_assert(4U == static_cast<size_t>(kX86), "kX86 not 4");
+static_assert(5U == static_cast<size_t>(kX86_64), "kX86_64 not 5");
+static_assert(6U == static_cast<size_t>(kMips), "kMips not 6");
+static_assert(7U == static_cast<size_t>(kMips64), "kMips64 not 7");
// Additional disabled optimizations (over generally disabled) per instruction set.
static constexpr uint32_t kDisabledOptimizationsPerISA[] = {
@@ -118,7 +118,8 @@ static constexpr uint32_t kDisabledOptimizationsPerISA[] = {
// 7 = kMips64.
~0U
};
-COMPILE_ASSERT(sizeof(kDisabledOptimizationsPerISA) == 8 * sizeof(uint32_t), kDisabledOpts_unexp);
+static_assert(sizeof(kDisabledOptimizationsPerISA) == 8 * sizeof(uint32_t),
+ "kDisabledOpts unexpected");
// Supported shorty types per instruction set. nullptr means that all are available.
// Z : boolean
@@ -149,7 +150,7 @@ static const char* kSupportedTypes[] = {
// 7 = kMips64.
""
};
-COMPILE_ASSERT(sizeof(kSupportedTypes) == 8 * sizeof(char*), kSupportedTypes_unexp);
+static_assert(sizeof(kSupportedTypes) == 8 * sizeof(char*), "kSupportedTypes unexpected");
static int kAllOpcodes[] = {
Instruction::NOP,
@@ -460,7 +461,7 @@ static const int* kUnsupportedOpcodes[] = {
// 7 = kMips64.
kAllOpcodes
};
-COMPILE_ASSERT(sizeof(kUnsupportedOpcodes) == 8 * sizeof(int*), kUnsupportedOpcodes_unexp);
+static_assert(sizeof(kUnsupportedOpcodes) == 8 * sizeof(int*), "kUnsupportedOpcodes unexpected");
// Size of the arrays stored above.
static const size_t kUnsupportedOpcodesSize[] = {
@@ -481,8 +482,8 @@ static const size_t kUnsupportedOpcodesSize[] = {
// 7 = kMips64.
arraysize(kAllOpcodes),
};
-COMPILE_ASSERT(sizeof(kUnsupportedOpcodesSize) == 8 * sizeof(size_t),
- kUnsupportedOpcodesSize_unexp);
+static_assert(sizeof(kUnsupportedOpcodesSize) == 8 * sizeof(size_t),
+ "kUnsupportedOpcodesSize unexpected");
// The maximum amount of Dalvik register in a method for which we will start compiling. Tries to
// avoid an abort when we need to manage more SSA registers than we can.
diff --git a/compiler/dex/quick/resource_mask.cc b/compiler/dex/quick/resource_mask.cc
index 17995fbf79..088bec870e 100644
--- a/compiler/dex/quick/resource_mask.cc
+++ b/compiler/dex/quick/resource_mask.cc
@@ -33,16 +33,16 @@ constexpr ResourceMask kNoRegMasks[] = {
ResourceMask::Bit(ResourceMask::kCCode),
};
// The 127-bit is the same as CLZ(masks_[1]) for a ResourceMask with only that bit set.
-COMPILE_ASSERT(kNoRegMasks[127-ResourceMask::kHeapRef].Equals(
- kEncodeHeapRef), check_kNoRegMasks_heap_ref_index);
-COMPILE_ASSERT(kNoRegMasks[127-ResourceMask::kLiteral].Equals(
- kEncodeLiteral), check_kNoRegMasks_literal_index);
-COMPILE_ASSERT(kNoRegMasks[127-ResourceMask::kDalvikReg].Equals(
- kEncodeDalvikReg), check_kNoRegMasks_dalvik_reg_index);
-COMPILE_ASSERT(kNoRegMasks[127-ResourceMask::kFPStatus].Equals(
- ResourceMask::Bit(ResourceMask::kFPStatus)), check_kNoRegMasks_fp_status_index);
-COMPILE_ASSERT(kNoRegMasks[127-ResourceMask::kCCode].Equals(
- ResourceMask::Bit(ResourceMask::kCCode)), check_kNoRegMasks_ccode_index);
+static_assert(kNoRegMasks[127-ResourceMask::kHeapRef].Equals(
+ kEncodeHeapRef), "kNoRegMasks heap ref index unexpected");
+static_assert(kNoRegMasks[127-ResourceMask::kLiteral].Equals(
+ kEncodeLiteral), "kNoRegMasks literal index unexpected");
+static_assert(kNoRegMasks[127-ResourceMask::kDalvikReg].Equals(
+ kEncodeDalvikReg), "kNoRegMasks dalvik reg index unexpected");
+static_assert(kNoRegMasks[127-ResourceMask::kFPStatus].Equals(
+ ResourceMask::Bit(ResourceMask::kFPStatus)), "kNoRegMasks fp status index unexpected");
+static_assert(kNoRegMasks[127-ResourceMask::kCCode].Equals(
+ ResourceMask::Bit(ResourceMask::kCCode)), "kNoRegMasks ccode index unexpected");
template <size_t special_bit>
constexpr ResourceMask OneRegOneSpecial(size_t reg) {
@@ -74,19 +74,19 @@ constexpr size_t SingleRegMaskIndex(size_t main_index, size_t sub_index) {
}
// The 127-bit is the same as CLZ(masks_[1]) for a ResourceMask with only that bit set.
-COMPILE_ASSERT(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kHeapRef, 0)].Equals(
- OneRegOneSpecial<ResourceMask::kHeapRef>(0)), check_kSingleRegMasks_heap_ref_index);
-COMPILE_ASSERT(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kLiteral, 0)].Equals(
- OneRegOneSpecial<ResourceMask::kLiteral>(0)), check_kSingleRegMasks_literal_index);
-COMPILE_ASSERT(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kDalvikReg, 0)].Equals(
- OneRegOneSpecial<ResourceMask::kDalvikReg>(0)), check_kSingleRegMasks_dalvik_reg_index);
-COMPILE_ASSERT(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kFPStatus, 0)].Equals(
- OneRegOneSpecial<ResourceMask::kFPStatus>(0)), check_kSingleRegMasks_fp_status_index);
-COMPILE_ASSERT(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kCCode, 0)].Equals(
- OneRegOneSpecial<ResourceMask::kCCode>(0)), check_kSingleRegMasks_ccode_index);
+static_assert(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kHeapRef, 0)].Equals(
+ OneRegOneSpecial<ResourceMask::kHeapRef>(0)), "kSingleRegMasks heap ref index unexpected");
+static_assert(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kLiteral, 0)].Equals(
+ OneRegOneSpecial<ResourceMask::kLiteral>(0)), "kSingleRegMasks literal index unexpected");
+static_assert(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kDalvikReg, 0)].Equals(
+ OneRegOneSpecial<ResourceMask::kDalvikReg>(0)), "kSingleRegMasks dalvik reg index unexpected");
+static_assert(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kFPStatus, 0)].Equals(
+ OneRegOneSpecial<ResourceMask::kFPStatus>(0)), "kSingleRegMasks fp status index unexpected");
+static_assert(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kCCode, 0)].Equals(
+ OneRegOneSpecial<ResourceMask::kCCode>(0)), "kSingleRegMasks ccode index unexpected");
// NOTE: arraysize(kNoRegMasks) multiplied by 32 due to the gcc bug workaround, see above.
-COMPILE_ASSERT(arraysize(kSingleRegMasks) == arraysize(kNoRegMasks) * 32, check_arraysizes);
+static_assert(arraysize(kSingleRegMasks) == arraysize(kNoRegMasks) * 32, "arraysizes unexpected");
constexpr ResourceMask kTwoRegsMasks[] = {
#define TWO(a, b) ResourceMask::Bit(a).Union(ResourceMask::Bit(b))
@@ -115,7 +115,7 @@ constexpr ResourceMask kTwoRegsMasks[] = {
TWO(8, 15), TWO(9, 15), TWO(10, 15), TWO(11, 15), TWO(12, 15), TWO(13, 15), TWO(14, 15),
#undef TWO
};
-COMPILE_ASSERT(arraysize(kTwoRegsMasks) == 16 * 15 / 2, check_arraysize_kTwoRegsMasks);
+static_assert(arraysize(kTwoRegsMasks) == 16 * 15 / 2, "arraysize of kTwoRegsMasks unexpected");
constexpr size_t TwoRegsIndex(size_t higher, size_t lower) {
return (higher * (higher - 1)) / 2u + lower;
@@ -136,7 +136,7 @@ constexpr bool CheckTwoRegsMaskTable(size_t lines) {
(CheckTwoRegsMaskLine(lines - 1) && CheckTwoRegsMaskTable(lines - 1u));
}
-COMPILE_ASSERT(CheckTwoRegsMaskTable(16), check_two_regs_masks_table);
+static_assert(CheckTwoRegsMaskTable(16), "two regs masks table check failed");
} // anonymous namespace
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index 9545896f64..769cd4c83d 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -34,8 +34,8 @@ static const DRegister kHFDArgumentRegisters[] = {
D0, D1, D2, D3, D4, D5, D6, D7
};
-COMPILE_ASSERT(arraysize(kHFDArgumentRegisters) * 2 == arraysize(kHFSArgumentRegisters),
- ks_d_argument_registers_mismatch);
+static_assert(arraysize(kHFDArgumentRegisters) * 2 == arraysize(kHFSArgumentRegisters),
+ "ks d argument registers mismatch");
// Calling convention
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index a1e61b936a..5b61f2113d 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -231,10 +231,10 @@ class OatWriter {
// data to write
- COMPILE_ASSERT(mirror::Class::Status::kStatusMax < (2 ^ 16), class_status_wont_fit_in_16bits);
+ static_assert(mirror::Class::Status::kStatusMax < (2 ^ 16), "class status won't fit in 16bits");
int16_t status_;
- COMPILE_ASSERT(OatClassType::kOatClassMax < (2 ^ 16), oat_class_type_wont_fit_in_16bits);
+ static_assert(OatClassType::kOatClassMax < (2 ^ 16), "oat_class type won't fit in 16bits");
uint16_t type_;
uint32_t method_bitmap_size_;
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index d7295aa112..7e45ffc3ec 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -71,16 +71,16 @@ class Location : public ValueObject {
Location() : value_(kInvalid) {
// Verify that non-constant location kinds do not interfere with kConstant.
- COMPILE_ASSERT((kInvalid & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kUnallocated & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kStackSlot & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kDoubleStackSlot & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kRegister & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kQuickParameter & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kFpuRegister & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kRegisterPair & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kFpuRegisterPair & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kConstant & kLocationConstantMask) == kConstant, TagError);
+ static_assert((kInvalid & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kUnallocated & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kStackSlot & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kDoubleStackSlot & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kRegister & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kQuickParameter & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kFpuRegister & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kRegisterPair & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kFpuRegisterPair & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kConstant & kLocationConstantMask) == kConstant, "TagError");
DCHECK(!IsValid());
}
diff --git a/compiler/utils/arena_allocator.cc b/compiler/utils/arena_allocator.cc
index 0c93f0a71b..004af98852 100644
--- a/compiler/utils/arena_allocator.cc
+++ b/compiler/utils/arena_allocator.cc
@@ -114,7 +114,7 @@ void ArenaAllocatorStatsImpl<kCount>::Dump(std::ostream& os, const Arena* first,
<< num_allocations << ", avg size: " << bytes_allocated / num_allocations << "\n";
}
os << "===== Allocation by kind\n";
- COMPILE_ASSERT(arraysize(kAllocNames) == kNumArenaAllocKinds, check_arraysize_kAllocNames);
+ static_assert(arraysize(kAllocNames) == kNumArenaAllocKinds, "arraysize of kAllocNames");
for (int i = 0; i < kNumArenaAllocKinds; i++) {
os << kAllocNames[i] << std::setw(10) << alloc_stats_[i] << "\n";
}