在g1VMOperations.cpp中可以看到不同的gc操作
g1中的gc不論如何觸發(fā),最后就只有兩種,do_full_collection或者 do_collection_pause_at_safepoint
盡量避免full gc,可以看這兩個(gè)方法的參數(shù),full gc沒(méi)有傳入時(shí)間,是不可控的
按照理論:
gc分為兩個(gè)階段,一個(gè)young only一個(gè)mixed,實(shí)在不行就會(huì)進(jìn)行full gc
Young-only phase和Space-reclamation phase,整個(gè)過(guò)程是一個(gè)交替的環(huán),稱之為Garbage Collection Cycle

圖中:
大圈表示循環(huán),每個(gè)實(shí)心的圈表示gc暫停
藍(lán)色實(shí)心球:
young-only引發(fā)的暫停,老年代超過(guò)了閾值InitiatingHeapOccupancyPercent會(huì)觸發(fā)并發(fā)標(biāo)記
橙色實(shí)心球:
第一個(gè)表示標(biāo)記完成Remark 階段,到第二個(gè)Cleanup 中間額外發(fā)生young-only-gc,在cleanUp之后完成最后一次yonggc就開(kāi)啟了space-reclamation phase
紅色實(shí)心球:
mixed 混合回收,也就是回收年輕代有時(shí)間富余再去回收老年代
After space-reclamation, the collection cycle restarts with another young-only phase. As backup, if the application runs out of memory while gathering liveness information, G1 performs an in-place stop-the-world full heap compaction (Full GC) like other collectors.
在空間回收階段完成循環(huán)后,將會(huì)開(kāi)啟一個(gè)新的young-only階段,如果在收集過(guò)程中出現(xiàn)了oom,會(huì)進(jìn)入一個(gè)full gc過(guò)程和g1之前的gc一樣
結(jié)合g1VMOperations假設(shè)do_full_collection就是full gc無(wú)法控制時(shí)間在cycle之外(跳出循環(huán)后面有方法force_concurrent_start_if_outside_cycle能夠再次開(kāi)啟cycle),do_collection_pause_at_safepoint能控制就理解成cycle的回收方法
詳細(xì)的英文請(qǐng)參考o(jì)racle官方文件:
https://docs.oracle.com/en/java/javase/17/gctuning/garbage-first-g1-garbage-collector1.html#GUID-F1BE86FA-3EDC-4D4F-BDB4-4B044AD83180
do_collection_pause_at_safepoint_helper
void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_pause_time_ms) {
GCIdMark gc_id_mark;
SvcGCMarker sgcm(SvcGCMarker::MINOR);
ResourceMark rm;
policy()->note_gc_start();
gc_tracer_report_gc_start();
//等待并發(fā)線程標(biāo)記完成
wait_for_root_region_scanning();
print_heap_before_gc();
print_heap_regions();
trace_heap_before_gc(_gc_tracer_stw);
_verifier->verify_region_sets_optional();
_verifier->verify_dirty_young_regions();
// We should not be doing concurrent start unless the concurrent mark thread is running
if (!_cm_thread->should_terminate()) {
// This call will decide whether this pause is a concurrent start
// pause. If it is, in_concurrent_start_gc() will return true
// for the duration of this pause.
//開(kāi)啟并發(fā)標(biāo)記
policy()->decide_on_conc_mark_initiation();
}
// We do not allow concurrent start to be piggy-backed on a mixed GC.
assert(!collector_state()->in_concurrent_start_gc() ||
collector_state()->in_young_only_phase(), "sanity");
// We also do not allow mixed GCs during marking.
assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
// Record whether this pause may need to trigger a concurrent operation. Later,
// when we signal the G1ConcurrentMarkThread, the collector state has already
// been reset for the next pause.
bool should_start_concurrent_mark_operation = collector_state()->in_concurrent_start_gc();
bool concurrent_operation_is_full_mark = false;
// Inner scope for scope based logging, timers, and stats collection
{
G1EvacuationInfo evacuation_info;
GCTraceCPUTime tcpu;
char young_gc_name[MaxYoungGCNameLength];
set_young_gc_name(young_gc_name);
GCTraceTime(Info, gc) tm(young_gc_name, NULL, gc_cause(), true);
uint active_workers = WorkerPolicy::calc_active_workers(workers()->total_workers(),
workers()->active_workers(),
Threads::number_of_non_daemon_threads());
active_workers = workers()->update_active_workers(active_workers);
log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
G1MonitoringScope ms(g1mm(),
false /* full_gc */,
collector_state()->in_mixed_phase() /* all_memory_pools_affected */);
G1HeapTransition heap_transition(this);
{
IsGCActiveMark x;
gc_prologue(false);
//判斷是yong-only還是mixed模式
G1HeapVerifier::G1VerifyType verify_type = young_collection_verify_type();
//逐個(gè)region檢查remset,檢查cardtable
verify_before_young_collection(verify_type);
{
// The elapsed time induced by the start time below deliberately elides
// the possible verification above.
double sample_start_time_sec = os::elapsedTime();
// Please see comment in g1CollectedHeap.hpp and
// G1CollectedHeap::ref_processing_init() to see how
// reference processing currently works in G1.
//引用檢查開(kāi)關(guān)打開(kāi)
_ref_processor_stw->enable_discovery();
// We want to temporarily turn off discovery by the
// CM ref processor, if necessary, and turn it back on
// on again later if we do. Using a scoped
// NoRefDiscovery object will do this.
//關(guān)閉并發(fā)標(biāo)記引用開(kāi)關(guān),后續(xù)會(huì)打開(kāi)
NoRefDiscovery no_cm_discovery(_ref_processor_cm);
//gc記錄not-full-gc時(shí)間,狀態(tài),清理年輕代survivors_age_table等
policy()->record_collection_pause_start(sample_start_time_sec);
// Forget the current allocation region (we might even choose it to be part
// of the collection set!).
//mutator_alloc_regions釋放,里面的region是在內(nèi)存分配attempt_allocation_force優(yōu)先使用的,個(gè)人理解為eden的young region
_allocator->release_mutator_alloc_regions();
//選擇eden,survivor到cset, 預(yù)計(jì)消耗時(shí)間
calculate_collection_set(evacuation_info, target_pause_time_ms);
G1RedirtyCardsQueueSet rdcqs(G1BarrierSet::dirty_card_queue_set().allocator());
G1ParScanThreadStateSet per_thread_states(this,
&rdcqs,
workers()->active_workers(),
collection_set()->young_region_length(),
collection_set()->optional_region_length());
pre_evacuate_collection_set(evacuation_info, &per_thread_states);
bool may_do_optional_evacuation = _collection_set.optional_region_length() != 0;
// Actually do the work...
evacuate_initial_collection_set(&per_thread_states, may_do_optional_evacuation);
if (may_do_optional_evacuation) {
evacuate_optional_collection_set(&per_thread_states);
}
//執(zhí)行g(shù)c
post_evacuate_collection_set(evacuation_info, &rdcqs, &per_thread_states);
//為下一次gc準(zhǔn)備cset
start_new_collection_set();
_survivor_evac_stats.adjust_desired_plab_sz();
_old_evac_stats.adjust_desired_plab_sz();
allocate_dummy_regions();
_allocator->init_mutator_alloc_regions();
expand_heap_after_young_collection();
// Refine the type of a concurrent mark operation now that we did the
// evacuation, eventually aborting it.
concurrent_operation_is_full_mark = policy()->concurrent_operation_is_full_mark("Revise IHOP");
// Need to report the collection pause now since record_collection_pause_end()
// modifies it to the next state.
_gc_tracer_stw->report_young_gc_pause(collector_state()->young_gc_pause_type(concurrent_operation_is_full_mark));
double sample_end_time_sec = os::elapsedTime();
double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
policy()->record_collection_pause_end(pause_time_ms, concurrent_operation_is_full_mark);
}
verify_after_young_collection(verify_type);
gc_epilogue(false);
}
// Print the remainder of the GC log output.
if (evacuation_failed()) {
log_info(gc)("To-space exhausted");
}
policy()->print_phases();
heap_transition.print();
_hrm.verify_optional();
_verifier->verify_region_sets_optional();
TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
print_heap_after_gc();
print_heap_regions();
trace_heap_after_gc(_gc_tracer_stw);
// We must call G1MonitoringSupport::update_sizes() in the same scoping level
// as an active TraceMemoryManagerStats object (i.e. before the destructor for the
// TraceMemoryManagerStats is called) so that the G1 memory pools are updated
// before any GC notifications are raised.
g1mm()->update_sizes();
gc_tracer_report_gc_end(concurrent_operation_is_full_mark, evacuation_info);
}
// It should now be safe to tell the concurrent mark thread to start
// without its logging output interfering with the logging output
// that came from the pause.
if (should_start_concurrent_mark_operation) {
// CAUTION: after the start_concurrent_cycle() call below, the concurrent marking
// thread(s) could be running concurrently with us. Make sure that anything
// after this point does not assume that we are the only GC thread running.
// Note: of course, the actual marking work will not start until the safepoint
// itself is released in SuspendibleThreadSet::desynchronize().
start_concurrent_cycle(concurrent_operation_is_full_mark);
ConcurrentGCBreakpoints::notify_idle_to_active();
}
}