Android ART虚拟机 GC流程分析

前言

本文分析ART虚拟机的GC流程,主要函数从CollectGarbageInternal开始。

CollectGarbageInternal

当配置CMS为默认回收器时:

  • collector_type_为kCollectorTypeCMS。
  • foreground_collector_type_取值为kCollectorTypeCMS。它代表程序位于前台时使用的回收器类型。
  • background_collector_type_的取值为kCollectorTypeHomogeneousSpaceCompact。它代表程序位于后台时使用的回收器类型。
  • garbage_collectors_中有四个回收器对象,它们的数据类型依次是StickyMarkSweep、PartialMarkSweep、MarkSweep以及SemiSpace(回收器的类型为kCollectorTypeSS)。
  • gc_plan_数组存储的是回收策略,其所存元素的值依次为kGcTypeSticky、kGcTypePartial、kGcTypeFull。
// 返回值为本次gc使用的GC回收策略,当返回kGcTypeNone时表示此次调用没有进行GC。
collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
                                               GcCause gc_cause,
                                               bool clear_soft_references) {
    
    
  Thread* self = Thread::Current();
  Runtime* runtime = Runtime::Current();

  // If the heap can't run the GC, silently fail and return that no GC was run.
  switch (gc_type) {
    
    
    case collector::kGcTypePartial: {
    
    
      if (!HasZygoteSpace()) {
    
    
        // 不存在ZygoteSpace时,则不会做kGcTypePartial类型的回收
        return collector::kGcTypeNone;
      }
      break;
    }
  }

  ScopedThreadStateChange tsc(self, kWaitingPerformingGc);

  bool compacting_gc;
  {
    
    
    ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
    MutexLock mu(self, *gc_complete_lock_);
    // Ensure there is only one GC at a time.
    WaitForGcToCompleteLocked(gc_cause, self);
    
    compacting_gc = IsMovingGc(collector_type_);  // CMS此处为false
    collector_type_running_ = collector_type_;
  }

  if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
    
    
    ++runtime->GetStats()->gc_for_alloc_count;
    ++self->GetStats()->gc_for_alloc_count;
  }
  const uint64_t bytes_allocated_before_gc = GetBytesAllocated();

  collector::GarbageCollector* collector = nullptr;

  if (compacting_gc) {
    
    
    ...
  } else if (current_allocator_ == kAllocatorTypeRosAlloc || current_allocator_ == kAllocatorTypeDlMalloc) {
    
    
    // 根据GC type返回不同的回收器,在CMS下,kGcTypeSticky对应的是StickyMarkSweep
    collector = FindCollectorByGcType(gc_type);
  } else {
    
    
      ...
  }
  if (IsGcConcurrent()) {
    
    
    concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
  }

  // It's time to clear all inline caches, in case some classes can be unloaded.
  if ((gc_type == collector::kGcTypeFull) && (runtime->GetJit() != nullptr)) {
    
    
    runtime->GetJit()->GetCodeCache()->ClearGcRootsInInlineCaches(self);
  }

  // 调用垃圾回收器的Run,开始执行回收
  collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());

  // 请求Trim,会向task_processor_中添加一个HeapTrimTask,内部将会调用HeapTrim
  RequestTrim(self);

  // Enqueue cleared references.
  reference_processor_->EnqueueClearedReferences(self);

  // Grow the heap so that we know when to perform the next GC.
  GrowForUtilization(collector, bytes_allocated_before_gc);

  LogGC(gc_cause, collector);
  FinishGC(self, gc_type);

  // Unload native libraries for class unloading. We do this after calling FinishGC to prevent deadlocks in case the JNI_OnUnload function does allocations.
  {
    
    
    ScopedObjectAccess soa(self);
    soa.Vm()->UnloadNativeLibraries();
  }
  return gc_type;
}

  static bool IsMovingGc(CollectorType collector_type) {
    
    
    return
        collector_type == kCollectorTypeSS ||
        collector_type == kCollectorTypeGSS ||
        collector_type == kCollectorTypeCC ||
        collector_type == kCollectorTypeMC ||
        collector_type == kCollectorTypeHomogeneousSpaceCompact;
  }
  bool IsGcConcurrent() const ALWAYS_INLINE {
    
    
    return collector_type_ == kCollectorTypeCMS || collector_type_ == kCollectorTypeCC;
  }

collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
    
    
  for (const auto& collector : garbage_collectors_) {
    
    
    if (collector->GetCollectorType() == collector_type_ &&
        collector->GetGcType() == gc_type) {
    
    
      return collector;
    }
  }
  return nullptr;
}

GarbageCollector

image.png
image.png

namespace collector {
    
    
  class ConcurrentCopying;
  class GarbageCollector;
  class MarkCompact;
  class SemiSpace;

  class MarkSweep;
  class PartialMarkSweep;
  class StickyMarkSweep;
}  // namespace collector

class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public MarkObjectVisitor {
    
    
 public:
  virtual GcType GetGcType() const = 0;
  virtual CollectorType GetCollectorType() const = 0;
  // Run the garbage collector.
  void Run(GcCause gc_cause, bool clear_soft_references) REQUIRES(!pause_histogram_lock_);
  Heap* GetHeap() const {
    
    
    return heap_;
  }

 protected:
  // Run all of the GC phases.
  virtual void RunPhases() = 0;

  Heap* const heap_;
  std::string name_;
  uint64_t total_time_ns_;
  uint64_t total_freed_objects_;
  int64_t total_freed_bytes_;
}

Run

void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
    
    
  Thread* self = Thread::Current();
  // GC开始时间
  uint64_t start_time = NanoTime();  

  Iteration* current_iteration = GetCurrentIteration();
  current_iteration->Reset(gc_cause, clear_soft_references);
  RunPhases();  // Run all the GC phases.

  // Add the current timings to the cumulative timings.
  cumulative_timings_.AddLogger(*GetTimings());
  // Update cumulative statistics with how many bytes the GC iteration freed.
  total_freed_objects_ += current_iteration->GetFreedObjects() + current_iteration->GetFreedLargeObjects();
  total_freed_bytes_ += current_iteration->GetFreedBytes() + current_iteration->GetFreedLargeObjectBytes();

  uint64_t end_time = NanoTime();
  current_iteration->SetDurationNs(end_time - start_time);

  total_time_ns_ += current_iteration->GetDurationNs();
  for (uint64_t pause_time : current_iteration->GetPauseTimes()) {
    
    
    MutexLock mu(self, pause_histogram_lock_);
    pause_histogram_.AdjustAndAddValue(pause_time);
  }
}

GrowForUtilization

void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
                              uint64_t bytes_allocated_before_gc) {
    
    
  // We know what our utilization is at this moment.
  // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
  const uint64_t bytes_allocated = GetBytesAllocated();
  uint64_t target_size;
  collector::GcType gc_type = collector_ran->GetGcType();
  const double multiplier = HeapGrowthMultiplier();  // Use the multiplier to grow more for
  // foreground.
  const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
  const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);

  // 根据gc_type调整堆大小
  if (gc_type != collector::kGcTypeSticky) {
    
    
    // Grow the heap for non sticky GC.
    ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
    CHECK_GE(delta, 0);
    target_size = bytes_allocated + delta * multiplier;
    target_size = std::min(target_size, bytes_allocated + adjusted_max_free);
    target_size = std::max(target_size, bytes_allocated + adjusted_min_free);
    native_need_to_run_finalization_ = true;
    next_gc_type_ = collector::kGcTypeSticky;
  } else {
    
    
    collector::GcType non_sticky_gc_type = HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
    // Find what the next non sticky collector will be.
    collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
    // If the throughput of the current sticky GC >= throughput of the non sticky collector, then do another sticky collection next.
    if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
        non_sticky_collector->GetEstimatedMeanThroughput() &&
        non_sticky_collector->NumberOfIterations() > 0 &&
        bytes_allocated <= max_allowed_footprint_) {
    
    
      next_gc_type_ = collector::kGcTypeSticky;
    } else {
    
    
      next_gc_type_ = non_sticky_gc_type;
    }
    // If we have freed enough memory, shrink the heap back down.
    if (bytes_allocated + adjusted_max_free < max_allowed_footprint_) {
    
    
      target_size = bytes_allocated + adjusted_max_free;
    } else {
    
    
      target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_));
    }
  }

  if (!ignore_max_footprint_) {
    
    
    // 设置堆大小
    SetIdealFootprint(target_size);

    // 设置下次触发gc的concurrent_start_bytes_
    if (IsGcConcurrent()) {
    
    
      const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() + current_gc_iteration_.GetFreedLargeObjectBytes() + current_gc_iteration_.GetFreedRevokeBytes();
      const uint64_t bytes_allocated_during_gc = bytes_allocated + freed_bytes - bytes_allocated_before_gc;
      // Calculate when to perform the next ConcurrentGC.
      // Calculate the estimated GC duration.
      const double gc_duration_seconds = NsToMs(current_gc_iteration_.GetDurationNs()) / 1000.0;
      // Estimate how many remaining bytes we will have when we need to start the next GC.
      size_t remaining_bytes = bytes_allocated_during_gc * gc_duration_seconds;
      remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
      remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
      if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
    
    
        // A never going to happen situation that from the estimated allocation rate we will exceed
        // the applications entire footprint with the given estimated allocation rate. Schedule
        // another GC nearly straight away.
        remaining_bytes = kMinConcurrentRemainingBytes;
      }

      // Start a concurrent GC when we get close to the estimated remaining bytes. When the
      // allocation rate is very high, remaining_bytes could tell us that we should start a GC
      // right away.
      concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
                                         static_cast<size_t>(bytes_allocated));
    }
  }
}

LogGC

void Heap::LogGC(GcCause gc_cause, collector::GarbageCollector* collector) {
    
    
  const size_t duration = GetCurrentGcIteration()->GetDurationNs();
  const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
  // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
  // (mutator time blocked >= long_pause_log_threshold_).
  bool log_gc = gc_cause == kGcCauseExplicit;
  if (!log_gc && CareAboutPauseTimes()) {
    
    
    // GC for alloc pauses the allocating thread, so consider it as a pause.
    log_gc = duration > long_gc_log_threshold_ ||
        (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
    for (uint64_t pause : pause_times) {
    
    
      log_gc = log_gc || pause >= long_pause_log_threshold_;
    }
  }
  if (log_gc) {
    
    
    const size_t percent_free = GetPercentFree();
    const size_t current_heap_size = GetBytesAllocated();
    const size_t total_memory = GetTotalMemory();
    std::ostringstream pause_string;
    for (size_t i = 0; i < pause_times.size(); ++i) {
    
    
      pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
                   << ((i != pause_times.size() - 1) ? "," : "");
    }
    LOG(INFO) << gc_cause << " " << collector->GetName()
              << " GC freed "  << current_gc_iteration_.GetFreedObjects() << "("
              << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, "
              << current_gc_iteration_.GetFreedLargeObjects() << "("
              << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
              << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
              << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
              << " total " << PrettyDuration((duration / 1000) * 1000);
    VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
  }
}

猜你喜欢

转载自blog.csdn.net/u014099894/article/details/129969915