Android ART虚拟机 对象创建内存分配流程

前言

本篇文章介绍我们在日常开发使用Java时new对象的时,ART在堆上的内存分配是如何分配的。内存又和gc相关,下篇文章会分析ART的gc流程。本文可以参考之前写的Dalivk虚拟机下的对象创建时内存分配流程一起看,会对ART虚拟机理解的更深刻些。

解释执行下

NEW_INSTANCE、NEW_ARRAY指令执行流程如下。

art/runtime/interpreter/interpreter_switch_impl.cc#ExecuteSwitchImpl

template<bool do_access_check, bool transaction_active>
JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
                         ShadowFrame& shadow_frame, JValue result_register,
                         bool interpret_one_instruction) {
    
    
  constexpr bool do_assignability_check = do_access_check;
  self->VerifyStack();

  uint32_t dex_pc = shadow_frame.GetDexPC();
  const auto* const instrumentation = Runtime::Current()->GetInstrumentation();
  const uint16_t* const insns = code_item->insns_;
  const Instruction* inst = Instruction::At(insns + dex_pc);
  uint16_t inst_data;
  ArtMethod* method = shadow_frame.GetMethod();
  jit::Jit* jit = Runtime::Current()->GetJit();

  std::unique_ptr<lambda::ClosureBuilder> lambda_closure_builder;
  size_t lambda_captured_variable_index = 0;
  do {
    
    
    dex_pc = inst->GetDexPc(insns);
    shadow_frame.SetDexPC(dex_pc);
    TraceExecution(shadow_frame, inst, dex_pc);
    inst_data = inst->Fetch16(0);
    switch (inst->Opcode(inst_data)) {
    
    
      case Instruction::NEW_INSTANCE: {
    
    
        PREAMBLE();
        Object* obj = nullptr;
        Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(), self, false, do_access_check);
        if (LIKELY(c != nullptr)) {
    
    
          if (UNLIKELY(c->IsStringClass())) {
    
    
            // 是String的话
            gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
            mirror::SetStringCountVisitor visitor(0);
            obj = String::Alloc<true>(self, 0, allocator_type, visitor);
          } else {
    
    
            // 非String调用AllocObjectFromCode分配内存
            obj = AllocObjectFromCode<do_access_check, true>(inst->VRegB_21c(), shadow_frame.GetMethod(), self, Runtime::Current()->GetHeap()->GetCurrentAllocator());
          }
        }
          ...
          inst = inst->Next_2xx();
        }
        break;
      }
      case Instruction::NEW_ARRAY: {
    
    
        int32_t length = shadow_frame.GetVReg(inst->VRegB_22c(inst_data));
        // 调用AllocArrayFromCode进行分配
        Object* obj = AllocArrayFromCode<do_access_check, true>(inst->VRegC_22c(), length, shadow_frame.GetMethod(), self, Runtime::Current()->GetHeap()->GetCurrentAllocator());
        shadow_frame.SetVRegReference(inst->VRegA_22c(inst_data), obj);
        inst = inst->Next_2xx();
        break;
      }
  } while (!interpret_one_instruction);
  // Record where we stopped.
  shadow_frame.SetDexPC(inst->GetDexPc(insns));
  return result_register;
}

String::Alloc

最终调用AllocObjectWithAllocator

art/runtime/mirror/string.h
art/runtime/mirror/string.cc
art/runtime/mirror/string-inl.h

// C++ mirror of java.lang.String
class MANAGED String FINAL : public Object {
    
    
  // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
  int32_t count_;
  uint32_t hash_code_;
  // 真正存储字符串的地方
  uint16_t value_[0];
}

template <bool kIsInstrumented, typename PreFenceVisitor>
inline String* String::Alloc(Thread* self, int32_t utf16_length, gc::AllocatorType allocator_type, const PreFenceVisitor& pre_fence_visitor) {
    
    
  constexpr size_t header_size = sizeof(String);
  static_assert(sizeof(utf16_length) <= sizeof(size_t), "static_cast<size_t>(utf16_length) must not lose bits.");
  size_t length = static_cast<size_t>(utf16_length);
  size_t data_size = sizeof(uint16_t) * length; // 字符内容大小
  size_t size = header_size + data_size; // 该字符串整个的大小

  // String.equals() intrinsics assume zero-padding up to kObjectAlignment, so make sure the allocator clears the padding as well.
  size_t alloc_size = RoundUp(size, kObjectAlignment);
  Class* string_class = GetJavaLangString();
  ...
  gc::Heap* heap = Runtime::Current()->GetHeap();
  return down_cast<String*>(
      heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, string_class, alloc_size,
                                                            allocator_type, pre_fence_visitor));
}

AllocObjectFromCode

最终调用AllocObjectWithAllocator

art/runtime/entrypoints/entrypoint_utils-inl.h
art/runtime/mirror/class-inl.h

// Given the context of a calling Method, use its DexCache to resolve a type to a Class. 
// If it cannot be resolved, throw an error. If it can, use it to create an instance.
// When verification/compiler hasn't been able to verify access, optionally perform an access check.
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE
inline mirror::Object* AllocObjectFromCode(uint32_t type_idx, ArtMethod* method, Thread* self, gc::AllocatorType allocator_type) {
    
    
  mirror::Class* klass = CheckObjectAlloc<kAccessCheck>(type_idx, method, self, &slow_path);
  return klass->Alloc<kInstrumented>(self, allocator_type);
}

template<bool kIsInstrumented, bool kCheckAddFinalizer>
inline Object* Class::Alloc(Thread* self, gc::AllocatorType allocator_type) {
    
    
  CheckObjectAlloc();
  gc::Heap* heap = Runtime::Current()->GetHeap();
  mirror::Object* obj =
      heap->AllocObjectWithAllocator<kIsInstrumented, false>(self, this, this->object_size_,
                                                             allocator_type, VoidFunctor());
  return obj;
}

AllocArrayFromCode

最终调用AllocObjectWithAllocator

art/runtime/entrypoints/entrypoint_utils-inl.h
art/runtime/mirror/array.h
art/runtime/mirror/array-inl.h

// Given the context of a calling Method, use its DexCache to resolve a type to an array Class. 
// If it cannot be resolved, throw an error. If it can, use it to create an array.
// When verification/compiler hasn't been able to verify access, optionally perform an access check.
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE
inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
                                         int32_t component_count,
                                         ArtMethod* method,
                                         Thread* self,
                                         gc::AllocatorType allocator_type) {
    
    

  mirror::Class* klass = CheckArrayAlloc<kAccessCheck>(type_idx, component_count, method,
                                                       &slow_path);
  return mirror::Array::Alloc<kInstrumented>(self, klass, component_count,
                                             klass->GetComponentSizeShift(), allocator_type);
}

class MANAGED Array : public Object {
    
    
  // The number of array elements.
  int32_t length_;
  // Marker for the data (used by generated code)
  uint32_t first_element_[0];
}

template <bool kIsInstrumented, bool kFillUsable>
inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count, size_t component_size_shift, gc::AllocatorType allocator_type) {
    
    
  // 计算数组最终需要的内存大小
  size_t size = ComputeArraySize(component_count, component_size_shift);
  gc::Heap* heap = Runtime::Current()->GetHeap();
  Array* result;
  if (!kFillUsable) {
    
    
    SetLengthVisitor visitor(component_count);
    result = down_cast<Array*>(heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, array_class, size, allocator_type, visitor));
  } else {
    
    
    SetLengthToUsableSizeVisitor visitor(component_count, DataOffset(1U << component_size_shift).SizeValue(), component_size_shift);
    result = down_cast<Array*>(heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, array_class, size, allocator_type, visitor));
  }

  return result;
}

机器码执行下

机器码执行下最终也调用到AllocObjectWithAllocator
流程略

AllocObjectWithAllocator

art/runtime/gc/heap-inl.h

// kInstrumented,和工具使用有关
// kCheckLargeObject,判断要分配的内存大小是否属于大对象范围
// PreFenceVisitor,函数对象,用于分配完成的回调
template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
                                                      mirror::Class* klass,
                                                      size_t byte_count,
                                                      AllocatorType allocator,
                                                      const PreFenceVisitor& pre_fence_visitor) {
    
    
  mirror::Object* obj;
  if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
    
    
    // AllocLargeObject以kAllocatorTypeLOS类型再次调用AllocObjectWithAllocator,且kCheckLargeObject为false
    obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count, pre_fence_visitor);
    if (obj != nullptr) {
    
    
      return obj;
    } else {
    
    
      // There should be an OOM exception, since we are retrying, clear it.
      self->ClearException();
    }
    // If the large object allocation failed, try to use the normal spaces (main space, non moving space). 
    // This can happen if there is significant virtual address space fragmentation.
    // kAllocatorTypeLOS内存分配器分配失败了,就在其他类型的内存分配器上尝试下
  }

  // bytes allocated for the (individual) object.
  size_t bytes_allocated;
  size_t usable_size;
  size_t new_num_bytes_allocated = 0;
  if (allocator == kAllocatorTypeTLAB || allocator == kAllocatorTypeRegionTLAB) {
    
    
    byte_count = RoundUp(byte_count, space::BumpPointerSpace::kAlignment);
  }
  // If we have a thread local allocation we don't need to update bytes allocated.
  // 如果是使用线程本地资源,如果TLAB足够分配就在TLAB内分配;
  if ((allocator == kAllocatorTypeTLAB || allocator == kAllocatorTypeRegionTLAB) &&
      byte_count <= self->TlabSize()) {
    
    
    obj = self->AllocTlab(byte_count);
    obj->SetClass(klass);
    bytes_allocated = byte_count;
    usable_size = bytes_allocated;
    pre_fence_visitor(obj, usable_size); // 回调
    QuasiAtomic::ThreadFenceForConstructor();
  } else if (!kInstrumented && allocator == kAllocatorTypeRosAlloc &&
             (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) &&
             LIKELY(obj != nullptr)) {
    
    
    // 如果使用kAllocatorTypeRosAlloc则在rosalloc_space_内分配
    obj->SetClass(klass);
    usable_size = bytes_allocated;
    pre_fence_visitor(obj, usable_size);
    QuasiAtomic::ThreadFenceForConstructor();
  } else {
    
    
    // bytes allocated that takes bulk thread-local buffer allocations into account.
    // 前面都分配失败了(可能是内存分配器不足了),则调用TryToAllocate进行分配
    size_t bytes_tl_bulk_allocated = 0;
    obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated, &usable_size, &bytes_tl_bulk_allocated);

    if (UNLIKELY(obj == nullptr)) {
    
    
      // AllocateInternalWithGc can cause thread suspension, if someone instruments the entrypoints or changes the allocator in a suspend point here, we need to retry the allocation.
      // TryToAllocate分配失败后调用AllocateInternalWithGc进行分配,会触发gc
      obj = AllocateInternalWithGc(self, allocator, kInstrumented, byte_count, &bytes_allocated, &usable_size, &bytes_tl_bulk_allocated, &klass);

      if (obj == nullptr) {
    
    
        // The only way that we can get a null return if there is no pending exception is if the allocator or instrumentation changed.
        // 无挂起异常的情况下,返回null的唯一情况就是分配器切换或instrumentation变化
        if (!self->IsExceptionPending()) {
    
    
          // AllocObject will pick up the new allocator type, and instrumented as true is the safe default.
          // 无挂起异常,再次重新分配一次吧,使用新的内存分配器
          return AllocObject</*kInstrumented*/true>(self, klass, byte_count, pre_fence_visitor);
        }

        // 有挂起异常,还是null了,说明内存耗尽了,直接OOM吧
        return nullptr;
      }
    }

    obj->SetClass(klass);
    if (collector::SemiSpace::kUseRememberedSet && UNLIKELY(allocator == kAllocatorTypeNonMoving)) {
    
    
      WriteBarrierField(obj, mirror::Object::ClassOffset(), klass);
    }
    pre_fence_visitor(obj, usable_size);
    QuasiAtomic::ThreadFenceForConstructor();
    new_num_bytes_allocated = static_cast<size_t>( num_bytes_allocated_.FetchAndAddRelaxed(bytes_tl_bulk_allocated)) + bytes_tl_bulk_allocated;
  }


  // 如果分配器不为kAllocatorTypeBumpPointer、kAllocatorTypeTLAB、kAllocatorTypeRegion、kAllocatorTypeRegionTLAB时,PushOnAllocationStack将把obj保存到self线程对应的数据结构
  if (AllocatorHasAllocationStack(allocator)) {
    
    
    PushOnAllocationStack(self, &obj);
  }

  // check gc
  if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()) {
    
    
    CheckConcurrentGC(self, new_num_bytes_allocated, &obj);
  }
  VerifyObject(obj);
  self->VerifyStack();
  return obj;
}

// 分配对象内存大小 > 12K,并且对象类型为基础数据数组类型 或者是 字符串类型
inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const {
    
    
  // We need to have a zygote space or else our newly allocated large object can end up in the Zygote resulting in it being prematurely freed.
  // We can only do this for primitive objects since large objects will not be within the card table range. This also means that we rely on SetClass not dirtying the object's card.
  return byte_count >= large_object_threshold_ && (c->IsPrimitiveArray() || c->IsStringClass());
}

template <bool kInstrumented, typename PreFenceVisitor>
inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class** klass, size_t byte_count, const PreFenceVisitor& pre_fence_visitor) {
    
    
  // Save and restore the class in case it moves.
  StackHandleScope<1> hs(self);
  auto klass_wrapper = hs.NewHandleWrapper(klass);
  return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, *klass, byte_count, kAllocatorTypeLOS, pre_fence_visitor);
}

static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
    
    
    return
        allocator_type != kAllocatorTypeBumpPointer &&
        allocator_type != kAllocatorTypeTLAB &&
        allocator_type != kAllocatorTypeRegion &&
        allocator_type != kAllocatorTypeRegionTLAB;
}
static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
    
    
    return
        allocator_type != kAllocatorTypeBumpPointer &&
        allocator_type != kAllocatorTypeTLAB;
}

TryToAllocate

template <const bool kInstrumented, const bool kGrow>
inline mirror::Object* Heap::TryToAllocate(Thread* self,
                                           AllocatorType allocator_type,
                                           size_t alloc_size,
                                           size_t* bytes_allocated,
                                           size_t* usable_size,
                                           size_t* bytes_tl_bulk_allocated) {
    
    
  if (allocator_type != kAllocatorTypeTLAB &&
      allocator_type != kAllocatorTypeRegionTLAB &&
      allocator_type != kAllocatorTypeRosAlloc &&
      UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) {
    
    
    return nullptr;
  }

  mirror::Object* ret;
  // 根据内存分配器类型进行分配
  switch (allocator_type) {
    
    
    case kAllocatorTypeBumpPointer: {
    
    
      alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment);
      ret = bump_pointer_space_->AllocNonvirtual(alloc_size);
      if (LIKELY(ret != nullptr)) {
    
    
        *bytes_allocated = alloc_size;
        *usable_size = alloc_size;
        *bytes_tl_bulk_allocated = alloc_size;
      }
      break;
    }
    case kAllocatorTypeRosAlloc: {
    
    
      if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
    
    
        ...
      } else {
    
    
        // MaxBytesBulkAllocatedForNonvirtual返回能匹配alloc_size的slot所属的run需要多大内存
        size_t max_bytes_tl_bulk_allocated = rosalloc_space_->MaxBytesBulkAllocatedForNonvirtual(alloc_size);
        // 是否超过水位线
        if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, max_bytes_tl_bulk_allocated))) {
    
    
          return nullptr;
        }
        ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
      }
      break;
    }
    case kAllocatorTypeDlMalloc: {
    
    
      if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
    
    
        ...
      } else {
    
    
        ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
      }
      break;
    }
    case kAllocatorTypeNonMoving: {
    
    
      // non_moving_space_的类型为MallocSpace,即他是DlMallocSpace或RosMallocSpace
      ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
      break;
    }
    // 其他内存分配器
    case kAllocatorTypeLOS:  ...
    case kAllocatorTypeTLAB: ...
    case kAllocatorTypeRegion: ...
    case kAllocatorTypeRegionTLAB: ...
  }
  return ret;
}

AllocateInternalWithGc

AllocateInternalWithGc,它尽全力分配内存,

  • 如果分配失败,则加大垃圾回收力度。然后继续尝试分配内存,直到无计可施。
  • 垃圾回收后如果有了足够的空闲内存,则分配成功。
enum GcType {
    
    
  // Placeholder for when no GC has been performed.
  kGcTypeNone,
  // Sticky mark bits GC that attempts to only free objects allocated since the last GC.
  kGcTypeSticky,
  // Partial GC that marks the application heap but not the Zygote.
  kGcTypePartial,
  // Full GC that marks and frees in both the application and Zygote heap.
  kGcTypeFull,
  // Number of different GC types.
  kGcTypeMax,
};

mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
                                             AllocatorType allocator,
                                             bool instrumented,
                                             size_t alloc_size,
                                             size_t* bytes_allocated,
                                             size_t* usable_size,
                                             size_t* bytes_tl_bulk_allocated,
                                             mirror::Class** klass) {
    
    

  // If the GC is running, block until it completes, and then retry the allocation.
  collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);

  if (last_gc != collector::kGcTypeNone) {
    
    
    // A GC was in progress and we blocked, retry allocation now that memory has been freed.
    mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
    if (ptr != nullptr) {
    
    
      return ptr;
    }
  }
  
  // next_gc_type_在构造函数时进行初始化,赋值为kGcTypePartial
  collector::GcType tried_type = next_gc_type_;
  const bool gc_ran = CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
  if (gc_ran) {
    
    
    mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
    if (ptr != nullptr) {
    
    
      return ptr;
    }
  }

  // Loop through our different Gc types and try to Gc until we get enough free memory.
  // gc_plan_在ChangeCollector时候进行初始化,kCollectorTypeCMS时该值为[kGcTypeSticky,kGcTypePartial,kGcTypeFull]
  for (collector::GcType gc_type : gc_plan_) {
    
    
    if (gc_type == tried_type) {
    
    
      continue;
    }
    // Attempt to run the collector, if we succeed, re-try the allocation.
    const bool plan_gc_ran = CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
    if (plan_gc_ran) {
    
    
      // Did we free sufficient memory for the allocation to succeed?
      mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
      if (ptr != nullptr) {
    
    
        return ptr;
      }
    }
  }
  // Allocations have failed after GCs;  this is an exceptional state.
  // Try harder, growing the heap if necessary.
  // kGrow传的为true,去增长堆在分配一次吧
  mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
  if (ptr != nullptr) {
    
    
    return ptr;
  }

  // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
  // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
  // VM spec requires that all SoftReferences have been collected and cleared before throwing
  // OOME.
  // 以gc_plan_的最后一个力度最强的类型触发一次gc,并把软引用也清除了
  CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
  ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size, bytes_tl_bulk_allocated);

  // 根据内存分配器类型做内存压缩Compat,compat成功后再次尝试分配
  if (ptr == nullptr) {
    
    
    const uint64_t current_time = NanoTime();
    switch (allocator) {
    
    
      case kAllocatorTypeRosAlloc:
      case kAllocatorTypeDlMalloc: {
    
    
        if (use_homogeneous_space_compaction_for_oom_ &&
            current_time - last_time_homogeneous_space_compaction_by_oom_ >
            min_interval_homogeneous_space_compaction_by_oom_) {
    
    
          last_time_homogeneous_space_compaction_by_oom_ = current_time;
          // 内存压缩
          HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
          switch (result) {
    
    
            case HomogeneousSpaceCompactResult::kSuccess:
              // If the allocation succeeded, we delayed an oom.
              ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
              if (ptr != nullptr) {
    
    
                count_delayed_oom_++;
              }
              break;
          }
        break;
      }
      case kAllocatorTypeNonMoving: {
    
    
        // Try to transition the heap if the allocation failure was due to the space being full.
        if (!IsOutOfMemoryOnAllocation<false>(allocator, alloc_size)) {
    
    
          DisableMovingGc();
          // If we are still a moving GC then something must have caused the transition to fail.
          if (IsMovingGc(collector_type_)) {
    
    
          } else {
    
    
            ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
                                            usable_size, bytes_tl_bulk_allocated);
          }
        }
        break;
      }
      default: {
    
    
        // Do nothing for others allocators.
      }
    }
  }
  // If the allocation hasn't succeeded by this point, throw an OOM error.
  if (ptr == nullptr) {
    
    
    ThrowOutOfMemoryError(self, alloc_size, allocator);
  }
  return ptr;
}

PushOnAllocationStack

Allocation Stack的作用,它和Heap中GC策略中的kGcTypeSticky关系密切。
kGcTypeSticky表示扫描并处理从上一次GC完成到本次GC这一段时间内所创建的对象。显然,我们需要记住两次GC期间所创建的对象。
而Allocation Stack就是ART中记录这些新创建的对象的好地方。这也是为什么在Heap AllocObjectWithAllocator中调用PushOnAllocationStack的原因。

art/runtime/gc/heap-inl.h
art/runtime/thread-inl.h

// The size of a thread-local allocation stack in the number of references.
static constexpr size_t kThreadLocalAllocationStackSize = 128;

inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) {
    
    
  if (kUseThreadLocalAllocationStack) {
    
    
    if (UNLIKELY(!self->PushOnThreadLocalAllocationStack(*obj))) {
    
    
      // 如果返回false,则说明AllocationStack内存不足,需要调用如下函数触发gc并重新分配
      PushOnThreadLocalAllocationStackWithInternalGC(self, obj);
    }
  }
}

inline bool Thread::PushOnThreadLocalAllocationStack(mirror::Object* obj) {
    
    
  if (tlsPtr_.thread_local_alloc_stack_top < tlsPtr_.thread_local_alloc_stack_end) {
    
    
    tlsPtr_.thread_local_alloc_stack_top->Assign(obj);
    ++tlsPtr_.thread_local_alloc_stack_top;
    return true;
  }
  return false;
}

猜你喜欢

转载自blog.csdn.net/u014099894/article/details/129942499