diff options
author | Andreas Gampe <agampe@google.com> | 2014-07-04 18:02:38 -0700 |
---|---|---|
committer | Andreas Gampe <agampe@google.com> | 2014-07-12 13:33:12 -0700 |
commit | ccc60264229ac96d798528d2cb7dbbdd0deca993 (patch) | |
tree | 998378a38ca4d510090c7b4e7832379989354680 /compiler/dex/quick/x86 | |
parent | bc9127a5d451058aede5562e2b015caec618d008 (diff) | |
download | android_art-ccc60264229ac96d798528d2cb7dbbdd0deca993.tar.gz android_art-ccc60264229ac96d798528d2cb7dbbdd0deca993.tar.bz2 android_art-ccc60264229ac96d798528d2cb7dbbdd0deca993.zip |
ART: Rework TargetReg(symbolic_reg, wide)
Make the standard implementation in Mir2Lir and the specialized one
in the x86 backend return a pair when wide = "true". Introduce
WideKind enumeration to improve code readability. Simplify generic
code based on this implementation.
Change-Id: I670d45aa2572eedfdc77ac763e6486c83f8e26b4
Diffstat (limited to 'compiler/dex/quick/x86')
-rw-r--r-- | compiler/dex/quick/x86/call_x86.cc | 2 | ||||
-rw-r--r-- | compiler/dex/quick/x86/codegen_x86.h | 25 | ||||
-rwxr-xr-x | compiler/dex/quick/x86/int_x86.cc | 60 | ||||
-rwxr-xr-x | compiler/dex/quick/x86/target_x86.cc | 49 |
4 files changed, 79 insertions, 57 deletions
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc index 8e2a1e3532..b7441d7649 100644 --- a/compiler/dex/quick/x86/call_x86.cc +++ b/compiler/dex/quick/x86/call_x86.cc @@ -151,7 +151,7 @@ void X86Mir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) { // Making a call - use explicit registers FlushAllRegs(); /* Everything to home location */ - RegStorage array_ptr = TargetRefReg(kArg0); + RegStorage array_ptr = TargetReg(kArg0, kRef); RegStorage payload = TargetPtrReg(kArg1); RegStorage method_start = TargetPtrReg(kArg2); diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h index b0c54e86e9..dd1d2dc1c5 100644 --- a/compiler/dex/quick/x86/codegen_x86.h +++ b/compiler/dex/quick/x86/codegen_x86.h @@ -89,19 +89,26 @@ class X86Mir2Lir : public Mir2Lir { // Required for target - register utilities. RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE; RegStorage TargetReg32(SpecialTargetRegister reg); - RegStorage TargetReg(SpecialTargetRegister symbolic_reg, bool is_wide) OVERRIDE { - RegStorage reg = TargetReg32(symbolic_reg); - if (is_wide) { - return (reg.Is64Bit()) ? reg : As64BitReg(reg); + RegStorage TargetReg(SpecialTargetRegister symbolic_reg, WideKind wide_kind) OVERRIDE { + if (wide_kind == kWide) { + if (cu_->target64) { + return As64BitReg(TargetReg32(symbolic_reg)); + } else { + // x86: construct a pair. + DCHECK((kArg0 <= symbolic_reg && symbolic_reg < kArg3) || + (kFArg0 <= symbolic_reg && symbolic_reg < kFArg3) || + (kRet0 == symbolic_reg)); + return RegStorage::MakeRegPair(TargetReg32(symbolic_reg), + TargetReg32(static_cast<SpecialTargetRegister>(symbolic_reg + 1))); + } + } else if (wide_kind == kRef && cu_->target64) { + return As64BitReg(TargetReg32(symbolic_reg)); } else { - return (reg.Is32Bit()) ? reg : As32BitReg(reg); + return TargetReg32(symbolic_reg); } } - RegStorage TargetRefReg(SpecialTargetRegister symbolic_reg) OVERRIDE { - return TargetReg(symbolic_reg, cu_->target64); - } RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE { - return TargetReg(symbolic_reg, cu_->target64); + return TargetReg(symbolic_reg, cu_->target64 ? kWide : kNotWide); } RegStorage GetArgMappingToPhysicalReg(int arg_num); RegStorage GetCoreArgMappingToPhysicalReg(int core_arg_num); diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index 1c63da40d3..2f27482e55 100755 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -858,7 +858,8 @@ bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) { RegLocation rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg); RegLocation rl_offset = LoadValueWide(rl_src_offset, kCoreReg); LoadValueDirectWide(rl_src_expected, rs_r0q); - NewLIR5(kX86LockCmpxchg64AR, rl_object.reg.GetReg(), rl_offset.reg.GetReg(), 0, 0, rl_new_value.reg.GetReg()); + NewLIR5(kX86LockCmpxchg64AR, rl_object.reg.GetReg(), rl_offset.reg.GetReg(), 0, 0, + rl_new_value.reg.GetReg()); // After a store we need to insert barrier in case of potential load. Since the // locked cmpxchg has full barrier semantics, only a scheduling barrier will be generated. @@ -954,7 +955,8 @@ bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) { rl_offset = LoadValue(rl_src_offset, kCoreReg); } LoadValueDirect(rl_src_expected, rs_r0); - NewLIR5(kX86LockCmpxchgAR, rl_object.reg.GetReg(), rl_offset.reg.GetReg(), 0, 0, rl_new_value.reg.GetReg()); + NewLIR5(kX86LockCmpxchgAR, rl_object.reg.GetReg(), rl_offset.reg.GetReg(), 0, 0, + rl_new_value.reg.GetReg()); // After a store we need to insert barrier to prevent reordering with either // earlier or later memory accesses. Since @@ -1069,23 +1071,23 @@ void X86Mir2Lir::GenArrayBoundsCheck(RegStorage index, RegStorage new_index = index_; // Move index out of kArg1, either directly to kArg0, or to kArg2. // TODO: clean-up to check not a number but with type - if (index_ == m2l_->TargetReg(kArg1, false)) { - if (array_base_ == m2l_->TargetRefReg(kArg0)) { - m2l_->OpRegCopy(m2l_->TargetReg(kArg2, false), index_); - new_index = m2l_->TargetReg(kArg2, false); + if (index_ == m2l_->TargetReg(kArg1, kNotWide)) { + if (array_base_ == m2l_->TargetReg(kArg0, kRef)) { + m2l_->OpRegCopy(m2l_->TargetReg(kArg2, kNotWide), index_); + new_index = m2l_->TargetReg(kArg2, kNotWide); } else { - m2l_->OpRegCopy(m2l_->TargetReg(kArg0, false), index_); - new_index = m2l_->TargetReg(kArg0, false); + m2l_->OpRegCopy(m2l_->TargetReg(kArg0, kNotWide), index_); + new_index = m2l_->TargetReg(kArg0, kNotWide); } } // Load array length to kArg1. - m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, false), array_base_, len_offset_); + m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, kNotWide), array_base_, len_offset_); if (cu_->target64) { m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds), - new_index, m2l_->TargetReg(kArg1, false), true); + new_index, m2l_->TargetReg(kArg1, kNotWide), true); } else { m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), - new_index, m2l_->TargetReg(kArg1, false), true); + new_index, m2l_->TargetReg(kArg1, kNotWide), true); } } @@ -1118,14 +1120,16 @@ void X86Mir2Lir::GenArrayBoundsCheck(int32_t index, GenerateTargetLabel(kPseudoThrowTarget); // Load array length to kArg1. - m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, false), array_base_, len_offset_); - m2l_->LoadConstant(m2l_->TargetReg(kArg0, false), index_); + m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, kNotWide), array_base_, len_offset_); + m2l_->LoadConstant(m2l_->TargetReg(kArg0, kNotWide), index_); if (cu_->target64) { m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds), - m2l_->TargetReg(kArg0, false), m2l_->TargetReg(kArg1, false), true); + m2l_->TargetReg(kArg0, kNotWide), + m2l_->TargetReg(kArg1, kNotWide), true); } else { m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), - m2l_->TargetReg(kArg0, false), m2l_->TargetReg(kArg1, false), true); + m2l_->TargetReg(kArg0, kNotWide), + m2l_->TargetReg(kArg1, kNotWide), true); } } @@ -1471,7 +1475,8 @@ void X86Mir2Lir::GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src, int displacement = SRegOffset(rl_src.s_reg_low); ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - LIR *lir = NewLIR3(x86op, cu_->target64 ? rl_dest.reg.GetReg() : rl_dest.reg.GetLowReg(), r_base, displacement + LOWORD_OFFSET); + LIR *lir = NewLIR3(x86op, cu_->target64 ? rl_dest.reg.GetReg() : rl_dest.reg.GetLowReg(), + r_base, displacement + LOWORD_OFFSET); AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2, true /* is_load */, true /* is64bit */); if (!cu_->target64) { @@ -2350,8 +2355,9 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, // If Method* is already in a register, we can save a copy. RegLocation rl_method = mir_graph_->GetMethodLoc(); - int32_t offset_of_type = mirror::Array::DataOffset(sizeof(mirror::HeapReference<mirror::Class*>)).Int32Value() + - (sizeof(mirror::HeapReference<mirror::Class*>) * type_idx); + int32_t offset_of_type = mirror::Array::DataOffset( + sizeof(mirror::HeapReference<mirror::Class*>)).Int32Value() + + (sizeof(mirror::HeapReference<mirror::Class*>) * type_idx); if (rl_method.location == kLocPhysReg) { if (use_declaring_class) { @@ -2399,10 +2405,10 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k FlushAllRegs(); // May generate a call - use explicit registers. LockCallTemps(); - RegStorage method_reg = TargetRefReg(kArg1); // kArg1 gets current Method*. + RegStorage method_reg = TargetReg(kArg1, kRef); // kArg1 gets current Method*. LoadCurrMethodDirect(method_reg); - RegStorage class_reg = TargetRefReg(kArg2); // kArg2 will hold the Class*. - RegStorage ref_reg = TargetRefReg(kArg0); // kArg2 will hold the ref. + RegStorage class_reg = TargetReg(kArg2, kRef); // kArg2 will hold the Class*. + RegStorage ref_reg = TargetReg(kArg0, kRef); // kArg2 will hold the ref. // Reference must end up in kArg0. if (needs_access_check) { // Check we have access to type_idx and if not throw IllegalAccessError, @@ -2414,7 +2420,7 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), type_idx, true); } - OpRegCopy(class_reg, TargetRefReg(kRet0)); + OpRegCopy(class_reg, TargetReg(kRet0, kRef)); LoadValueDirectFixed(rl_src, ref_reg); } else if (use_declaring_class) { LoadValueDirectFixed(rl_src, ref_reg); @@ -2438,7 +2444,7 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k } else { CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true); } - OpRegCopy(class_reg, TargetRefReg(kRet0)); // Align usage with fast path. + OpRegCopy(class_reg, TargetReg(kRet0, kRef)); // Align usage with fast path. LoadValueDirectFixed(rl_src, ref_reg); /* Reload Ref. */ // Rejoin code paths LIR* hop_target = NewLIR0(kPseudoTargetLabel); @@ -2459,7 +2465,7 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k // Is the class NULL? LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, NULL); - RegStorage ref_class_reg = TargetRefReg(kArg1); // kArg2 will hold the Class*. + RegStorage ref_class_reg = TargetReg(kArg1, kRef); // kArg2 will hold the Class*. /* Load object->klass_. */ DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); LoadRefDisp(ref_reg, mirror::Object::ClassOffset().Int32Value(), ref_class_reg, @@ -2477,7 +2483,7 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k LoadConstant(rl_result.reg, 1); // Assume result succeeds. branchover = OpCmpBranch(kCondEq, ref_class_reg, class_reg, NULL); } - OpRegCopy(TargetRefReg(kArg0), class_reg); + OpRegCopy(TargetReg(kArg0, kRef), class_reg); if (cu_->target64) { OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial)); } else { @@ -2617,7 +2623,7 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, } else { if (shift_op) { // X86 doesn't require masking and must use ECX. - RegStorage t_reg = TargetReg(kCount, false); // rCX + RegStorage t_reg = TargetReg(kCount, kNotWide); // rCX LoadValueDirectFixed(rl_rhs, t_reg); if (is_two_addr) { // Can we do this directly into memory? @@ -2805,7 +2811,7 @@ void X86Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, } // X86 doesn't require masking and must use ECX. - RegStorage t_reg = TargetReg(kCount, false); // rCX + RegStorage t_reg = TargetReg(kCount, kNotWide); // rCX LoadValueDirectFixed(rl_shift, t_reg); if (is_two_addr) { // Can we do this directly into memory? diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc index 0083128501..bb1f379fe5 100755 --- a/compiler/dex/quick/x86/target_x86.cc +++ b/compiler/dex/quick/x86/target_x86.cc @@ -204,7 +204,8 @@ RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) { case kSuspend: res_reg = RegStorage::InvalidReg(); break; case kLr: res_reg = RegStorage::InvalidReg(); break; case kPc: res_reg = RegStorage::InvalidReg(); break; - case kSp: res_reg = rs_rX86_SP; break; + case kSp: res_reg = rs_rX86_SP_32; break; // This must be the concrete one, as _SP is target- + // specific size. case kArg0: res_reg = rs_rX86_ARG0; break; case kArg1: res_reg = rs_rX86_ARG1; break; case kArg2: res_reg = rs_rX86_ARG2; break; @@ -968,7 +969,8 @@ void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeT uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); // Generate the move instruction with the unique pointer and save index, dex_file, and type. - LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg, false).GetReg(), + LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, + TargetReg(symbolic_reg, kNotWide).GetReg(), static_cast<int>(target_method_id_ptr), target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type); AppendLIR(move); @@ -985,7 +987,8 @@ void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic uintptr_t ptr = reinterpret_cast<uintptr_t>(&id); // Generate the move instruction with the unique pointer and save index and type. - LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg, false).GetReg(), + LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, + TargetReg(symbolic_reg, kNotWide).GetReg(), static_cast<int>(ptr), type_idx); AppendLIR(move); class_type_address_insns_.Insert(move); @@ -1321,7 +1324,8 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { } } else { // Load the start index from stack, remembering that we pushed EDI. - int displacement = SRegOffset(rl_start.s_reg_low) + (cu_->target64 ? 2 : 1) * sizeof(uint32_t); + int displacement = SRegOffset(rl_start.s_reg_low) + + (cu_->target64 ? 2 : 1) * sizeof(uint32_t); { ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); Load32Disp(rs_rX86_SP, displacement, tmpReg); @@ -2241,21 +2245,24 @@ LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) { } // ------------ ABI support: mapping of args to physical registers ------------- -RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide, bool is_ref) { +RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide, + bool is_ref) { const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3, kArg4, kArg5}; - const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister); + const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / + sizeof(SpecialTargetRegister); const SpecialTargetRegister fpArgMappingToPhysicalReg[] = {kFArg0, kFArg1, kFArg2, kFArg3, - kFArg4, kFArg5, kFArg6, kFArg7}; - const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister); + kFArg4, kFArg5, kFArg6, kFArg7}; + const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / + sizeof(SpecialTargetRegister); if (is_double_or_float) { if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) { - return ml_->TargetReg(fpArgMappingToPhysicalReg[cur_fp_reg_++], is_wide); + return ml_->TargetReg(fpArgMappingToPhysicalReg[cur_fp_reg_++], is_wide ? kWide : kNotWide); } } else { if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) { - return is_ref ? ml_->TargetRefReg(coreArgMappingToPhysicalReg[cur_core_reg_++]) : - ml_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], is_wide); + return ml_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], + is_ref ? kRef : (is_wide ? kWide : kNotWide)); } } return RegStorage::InvalidReg(); @@ -2267,7 +2274,8 @@ RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) { return res != mapping_.end() ? res->second : RegStorage::InvalidReg(); } -void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper) { +void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count, + InToRegStorageMapper* mapper) { DCHECK(mapper != nullptr); max_mapped_in_ = -1; is_there_stack_mapped_ = false; @@ -2338,13 +2346,13 @@ void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { RegLocation rl_src = rl_method; rl_src.location = kLocPhysReg; - rl_src.reg = TargetRefReg(kArg0); + rl_src.reg = TargetReg(kArg0, kRef); rl_src.home = false; MarkLive(rl_src); StoreValue(rl_method, rl_src); // If Method* has been promoted, explicitly flush if (rl_method.location == kLocPhysReg) { - StoreRefDisp(rs_rX86_SP, 0, As32BitReg(TargetRefReg(kArg0)), kNotVolatile); + StoreRefDisp(rs_rX86_SP, 0, As32BitReg(TargetReg(kArg0, kRef)), kNotVolatile); } if (cu_->num_ins == 0) { @@ -2502,7 +2510,8 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, // The rest can be copied together int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low); - int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped, cu_->instruction_set); + int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped, + cu_->instruction_set); int current_src_offset = start_offset; int current_dest_offset = outs_offset; @@ -2598,7 +2607,7 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, // Instead of allocating a new temp, simply reuse one of the registers being used // for argument passing. - RegStorage temp = TargetReg(kArg3, false); + RegStorage temp = TargetReg(kArg3, kNotWide); // Now load the argument VR and store to the outs. Load32Disp(rs_rX86_SP, current_src_offset, temp); @@ -2614,8 +2623,8 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, // Now handle rest not registers if they are if (in_to_reg_storage_mapping.IsThereStackMapped()) { - RegStorage regSingle = TargetReg(kArg2, false); - RegStorage regWide = TargetReg(kArg3, true); + RegStorage regSingle = TargetReg(kArg2, kNotWide); + RegStorage regWide = TargetReg(kArg3, kWide); for (int i = start_index; i < last_mapped_in + size_of_the_last_mapped + regs_left_to_pass_via_stack; i++) { RegLocation rl_arg = info->args[i]; @@ -2674,13 +2683,13 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, direct_code, direct_method, type); if (pcrLabel) { if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { - *pcrLabel = GenExplicitNullCheck(TargetRefReg(kArg1), info->opt_flags); + *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags); } else { *pcrLabel = nullptr; // In lieu of generating a check for kArg1 being null, we need to // perform a load when doing implicit checks. RegStorage tmp = AllocTemp(); - Load32Disp(TargetRefReg(kArg1), 0, tmp); + Load32Disp(TargetReg(kArg1, kRef), 0, tmp); MarkPossibleNullPointerException(info->opt_flags); FreeTemp(tmp); } |