diff --git a/eng/pipelines/libraries/helix-queues-setup.yml b/eng/pipelines/libraries/helix-queues-setup.yml index fdebac85bc327a..71713d3bccb3c7 100644 --- a/eng/pipelines/libraries/helix-queues-setup.yml +++ b/eng/pipelines/libraries/helix-queues-setup.yml @@ -37,7 +37,7 @@ jobs: # Linux musl x64 - ${{ if eq(parameters.platform, 'linux_musl_x64') }}: - - ${{ if or(eq(parameters.jobParameters.isExtraPlatformsBuild, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: + - ${{ if or(eq(parameters.jobParameters.isExtraPlatformsBuild, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - (Alpine.edge.Amd64.Open)AzureLinux.3.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:alpine-edge-helix-amd64 - ${{ if or(ne(parameters.jobParameters.isExtraPlatformsBuild, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - (Alpine.322.Amd64.Open)AzureLinux.3.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:alpine-3.22-helix-amd64 @@ -75,7 +75,7 @@ jobs: - Ubuntu.2204.Amd64.Open - (AzureLinux.3.0.Amd64.Open)AzureLinux.3.Amd64.open@mcr.microsoft.com/dotnet-buildtools/prereqs:azurelinux-3.0-helix-amd64 - (Centos.10.Amd64.Open)AzureLinux.3.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:centos-stream-10-helix-amd64 - + # OSX arm64 - ${{ if eq(parameters.platform, 'osx_arm64') }}: - osx.13.arm64.open diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index 1c990c6988463b..cacae581ae019a 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -428,9 +428,26 @@ size_t gib (size_t num) #ifdef BACKGROUND_GC uint32_t bgc_alloc_spin_count = 140; -uint32_t bgc_alloc_spin_count_uoh = 16; uint32_t bgc_alloc_spin = 2; +// The following 2 ratios dictate how UOH allocations that happen during a BGC should be handled. Because +// UOH is not collected till the very end of a BGC, by default we don't want to allow UOH to grow too large +// during a BGC. So if we only increase the size by 10%, we will allow to allocate normally. But if it's +// too much (ie, > bgc_uoh_inc_ratio_alloc_wait), we will make the allocation wait till the BGC is done. +// +// This means threads that allocate heavily on UOH may be paused during a BGC. If you're willing to accept +// larger UOH sizes in exchange for fewer pauses, you can use the UOHWaitBGCSizeIncPercent config to increase +// the wait ratio. Likewise, set it to use a smaller ratio if you observe that UOH grows too large during +// BGCs. +float bgc_uoh_inc_ratio_alloc_normal = 0.1f; +// This ratio is 2x for regions because regions could start with a much smaller size since a lot of +// memory could be in the free pool. +#ifdef USE_REGIONS +float bgc_uoh_inc_ratio_alloc_wait = 2.0f; +#else +float bgc_uoh_inc_ratio_alloc_wait = 1.0f; +#endif //USE_REGIONS + inline void c_write (uint32_t& place, uint32_t value) { @@ -2720,10 +2737,9 @@ heap_segment* gc_heap::freeable_soh_segment = 0; size_t gc_heap::bgc_overflow_count = 0; -size_t gc_heap::bgc_begin_loh_size = 0; -size_t gc_heap::end_loh_size = 0; -size_t gc_heap::bgc_begin_poh_size = 0; -size_t gc_heap::end_poh_size = 0; +size_t gc_heap::bgc_begin_uoh_size[uoh_generation_count] = {}; +size_t gc_heap::bgc_uoh_current_size[uoh_generation_count] = {}; +size_t gc_heap::end_uoh_size[uoh_generation_count] = {}; size_t gc_heap::uoh_a_no_bgc[uoh_generation_count] = {}; size_t gc_heap::uoh_a_bgc_marking[uoh_generation_count] = {}; @@ -2732,16 +2748,10 @@ size_t gc_heap::uoh_a_bgc_planning[uoh_generation_count] = {}; size_t gc_heap::bgc_maxgen_end_fl_size = 0; #endif //BGC_SERVO_TUNING -size_t gc_heap::bgc_loh_size_increased = 0; - -size_t gc_heap::bgc_poh_size_increased = 0; - size_t gc_heap::background_soh_size_end_mark = 0; size_t gc_heap::background_soh_alloc_count = 0; -size_t gc_heap::background_uoh_alloc_count = 0; - uint8_t** gc_heap::background_mark_stack_tos = 0; uint8_t** gc_heap::background_mark_stack_array = 0; @@ -8166,19 +8176,6 @@ bool gc_heap::new_allocation_allowed (int gen_number) { if (dd_new_allocation (dynamic_data_of (gen_number)) < 0) { - if (gen_number != 0) - { - // For UOH we will give it more budget before we try a GC. - if (settings.concurrent) - { - dynamic_data* dd2 = dynamic_data_of (gen_number); - - if (dd_new_allocation (dd2) <= (ptrdiff_t)(-2 * dd_desired_allocation (dd2))) - { - return TRUE; - } - } - } return FALSE; } #ifndef MULTIPLE_HEAPS @@ -14515,6 +14512,26 @@ HRESULT gc_heap::initialize_gc (size_t soh_segment_size, #endif //WRITE_WATCH #ifdef BACKGROUND_GC +#ifdef USE_REGIONS + int bgc_uoh_inc_percent_alloc_wait = (int)GCConfig::GetUOHWaitBGCSizeIncPercent(); + if (bgc_uoh_inc_percent_alloc_wait != -1) + { + bgc_uoh_inc_ratio_alloc_wait = (float)bgc_uoh_inc_percent_alloc_wait / 100.0f; + } + else + { + bgc_uoh_inc_percent_alloc_wait = (int)(bgc_uoh_inc_ratio_alloc_wait * 100.0f); + } + + if (bgc_uoh_inc_ratio_alloc_normal > bgc_uoh_inc_ratio_alloc_wait) + { + bgc_uoh_inc_ratio_alloc_normal = bgc_uoh_inc_ratio_alloc_wait; + } + GCConfig::SetUOHWaitBGCSizeIncPercent (bgc_uoh_inc_percent_alloc_wait); + dprintf (1, ("UOH allocs during BGC are allowed normally when inc ratio is < %.3f, will wait when > %.3f", + bgc_uoh_inc_ratio_alloc_normal, bgc_uoh_inc_ratio_alloc_wait)); +#endif + // leave the first page to contain only segment info // because otherwise we could need to revisit the first page frequently in // background GC. @@ -15584,10 +15601,11 @@ gc_heap::init_gc_heap (int h_number) bgc_threads_timeout_cs.Initialize(); current_bgc_state = bgc_not_in_process; background_soh_alloc_count = 0; - background_uoh_alloc_count = 0; bgc_overflow_count = 0; - end_loh_size = dd_min_size (dynamic_data_of (loh_generation)); - end_poh_size = dd_min_size (dynamic_data_of (poh_generation)); + for (int i = uoh_start_generation; i < total_generation_count; i++) + { + end_uoh_size[i - uoh_start_generation] = dd_min_size (dynamic_data_of (i)); + } current_sweep_pos = 0; #ifdef DOUBLY_LINKED_FL @@ -18023,6 +18041,7 @@ BOOL gc_heap::a_fit_segment_end_p (int gen_number, #ifdef BACKGROUND_GC if (cookie != -1) { + bgc_record_uoh_end_seg_allocation (gen_number, limit); allocated += limit; bgc_uoh_alloc_clr (old_alloc, limit, acontext, flags, gen_number, align_const, cookie, TRUE, seg); } @@ -18050,6 +18069,10 @@ BOOL gc_heap::a_fit_segment_end_p (int gen_number, limit += Align(min_obj_size, align_const); } +#ifdef BACKGROUND_GC + bgc_record_uoh_end_seg_allocation (gen_number, limit); +#endif + allocated += limit; adjust_limit_clr (old_alloc, limit, size, acontext, flags, seg, align_const, gen_number); } @@ -18562,60 +18585,6 @@ void gc_heap::bgc_untrack_uoh_alloc() dprintf (3, ("h%d: dec lc: %d", heap_number, (int32_t)uoh_alloc_thread_count)); } } - -// We need to throttle the UOH allocations during BGC since we can't -// collect UOH when BGC is in progress (when BGC sweeps UOH allocations on UOH are disallowed) -// We allow the UOH heap size to double during a BGC. And for every -// 10% increase we will have the UOH allocating thread sleep for one more -// ms. So we are already 30% over the original heap size the thread will -// sleep for 3ms. -int bgc_allocate_spin(size_t min_gc_size, size_t bgc_begin_size, size_t bgc_size_increased, size_t end_size) -{ - if ((bgc_begin_size + bgc_size_increased) < (min_gc_size * 10)) - { - // just do it, no spinning - return 0; - } - - if ((bgc_begin_size >= (2 * end_size)) || (bgc_size_increased >= bgc_begin_size)) - { - if (bgc_begin_size >= (2 * end_size)) - { - dprintf (3, ("alloc-ed too much before bgc started")); - } - else - { - dprintf (3, ("alloc-ed too much after bgc started")); - } - - // -1 means wait for bgc - return -1; - } - else - { - return (int)(((float)bgc_size_increased / (float)bgc_begin_size) * 10); - } -} - -int gc_heap::bgc_loh_allocate_spin() -{ - size_t min_gc_size = dd_min_size (dynamic_data_of (loh_generation)); - size_t bgc_begin_size = bgc_begin_loh_size; - size_t bgc_size_increased = bgc_loh_size_increased; - size_t end_size = end_loh_size; - - return bgc_allocate_spin(min_gc_size, bgc_begin_size, bgc_size_increased, end_size); -} - -int gc_heap::bgc_poh_allocate_spin() -{ - size_t min_gc_size = dd_min_size (dynamic_data_of (poh_generation)); - size_t bgc_begin_size = bgc_begin_poh_size; - size_t bgc_size_increased = bgc_poh_size_increased; - size_t end_size = end_poh_size; - - return bgc_allocate_spin(min_gc_size, bgc_begin_size, bgc_size_increased, end_size); -} #endif //BACKGROUND_GC size_t gc_heap::get_uoh_seg_size (size_t size) @@ -18728,19 +18697,6 @@ BOOL gc_heap::uoh_try_fit (int gen_number, acontext, flags, align_const, commit_failed_p, oom_r); -#ifdef BACKGROUND_GC - if (can_allocate && gc_heap::background_running_p()) - { - if (gen_number == poh_generation) - { - bgc_poh_size_increased += size; - } - else - { - bgc_loh_size_increased += size; - } - } -#endif //BACKGROUND_GC } return can_allocate; @@ -18853,26 +18809,83 @@ bool gc_heap::should_retry_other_heap (int gen_number, size_t size) } #ifdef BACKGROUND_GC +uoh_allocation_action gc_heap::get_bgc_allocate_action (int gen_number) +{ + int uoh_idx = gen_number - uoh_start_generation; + + // We always allocate normally if the total size is small enough. + if (bgc_uoh_current_size[uoh_idx] < (dd_min_size (dynamic_data_of (gen_number)) * 10)) + { + return uoh_alloc_normal; + } + +#ifndef USE_REGIONS + // This is legacy behavior for segments - segments' sizes are usually very stable. But for regions we could + // have released a bunch of regions into the free pool during the last gen2 GC so checking the last UOH size + // doesn't make sense. + if (bgc_begin_uoh_size[uoh_idx] >= (2 * end_uoh_size[uoh_idx])) + { + dprintf (3, ("h%d alloc-ed too much before bgc started, last end %Id, this start %Id, wait", + heap_number, end_uoh_size[uoh_idx], bgc_begin_uoh_size[uoh_idx])); + return uoh_alloc_wait; + } +#endif //USE_REGIONS + + size_t size_increased = bgc_uoh_current_size[uoh_idx] - bgc_begin_uoh_size[uoh_idx]; + float size_increased_ratio = (float)size_increased / (float)bgc_begin_uoh_size[uoh_idx]; + + if (size_increased_ratio < bgc_uoh_inc_ratio_alloc_normal) + { + return uoh_alloc_normal; + } + else if (size_increased_ratio > bgc_uoh_inc_ratio_alloc_wait) + { + return uoh_alloc_wait; + } + else + { + return uoh_alloc_yield; + } +} + void gc_heap::bgc_record_uoh_allocation(int gen_number, size_t size) { assert((gen_number >= uoh_start_generation) && (gen_number < total_generation_count)); + int uoh_idx = gen_number - uoh_start_generation; + if (gc_heap::background_running_p()) { - background_uoh_alloc_count++; - if (current_c_gc_state == c_gc_state_planning) { - uoh_a_bgc_planning[gen_number - uoh_start_generation] += size; + uoh_a_bgc_planning[uoh_idx] += size; } else { - uoh_a_bgc_marking[gen_number - uoh_start_generation] += size; + uoh_a_bgc_marking[uoh_idx] += size; } } else { - uoh_a_no_bgc[gen_number - uoh_start_generation] += size; + uoh_a_no_bgc[uoh_idx] += size; + } +} + +void gc_heap::bgc_record_uoh_end_seg_allocation (int gen_number, size_t size) +{ + if ((gen_number >= uoh_start_generation) && gc_heap::background_running_p()) + { + int uoh_idx = gen_number - uoh_start_generation; + bgc_uoh_current_size[uoh_idx] += size; + +#ifdef SIMPLE_DPRINTF + dynamic_data* dd_uoh = dynamic_data_of (gen_number); + size_t gen_size = generation_size (gen_number); + dprintf (3, ("h%d g%d size is now %Id (inc-ed %Id), size is %Id (gen size is %Id), budget %.3fmb, new alloc %.3fmb", + heap_number, gen_number, bgc_uoh_current_size[uoh_idx], + (bgc_uoh_current_size[uoh_idx] - bgc_begin_uoh_size[uoh_idx]), size, gen_size, + mb (dd_desired_allocation (dd_uoh)), (dd_new_allocation (dd_uoh) / 1000.0 / 1000.0))); +#endif //SIMPLE_DPRINTF } } #endif //BACKGROUND_GC @@ -18903,31 +18916,33 @@ allocation_state gc_heap::allocate_uoh (int gen_number, if (gc_heap::background_running_p()) { - //if ((background_uoh_alloc_count % bgc_alloc_spin_count_uoh) == 0) + uoh_allocation_action action = get_bgc_allocate_action (gen_number); + + if (action == uoh_alloc_yield) { - int spin_for_allocation = (gen_number == loh_generation) ? - bgc_loh_allocate_spin() : - bgc_poh_allocate_spin(); + add_saved_spinlock_info (true, me_release, mt_alloc_large, msl_status); + leave_spin_lock (&more_space_lock_uoh); + bool cooperative_mode = enable_preemptive(); + GCToOSInterface::YieldThread (0); + disable_preemptive (cooperative_mode); - if (spin_for_allocation > 0) - { - add_saved_spinlock_info (true, me_release, mt_alloc_large, msl_status); - leave_spin_lock (&more_space_lock_uoh); - bool cooperative_mode = enable_preemptive(); - GCToOSInterface::YieldThread (spin_for_allocation); - disable_preemptive (cooperative_mode); + msl_status = enter_spin_lock_msl (&more_space_lock_uoh); + if (msl_status == msl_retry_different_heap) return a_state_retry_allocate; - msl_status = enter_spin_lock_msl (&more_space_lock_uoh); - if (msl_status == msl_retry_different_heap) return a_state_retry_allocate; + add_saved_spinlock_info (true, me_acquire, mt_alloc_large, msl_status); + dprintf (SPINLOCK_LOG, ("[%d]spin Emsl uoh", heap_number)); + } + else if (action == uoh_alloc_wait) + { + dynamic_data* dd_uoh = dynamic_data_of (loh_generation); + dprintf (3, ("h%d WAIT loh begin %.3fmb, current size recorded is %.3fmb(begin+%.3fmb), budget %.3fmb, new alloc %.3fmb (alloc-ed %.3fmb)", + heap_number, mb (bgc_begin_uoh_size[0]), mb (bgc_uoh_current_size[0]), + mb (bgc_uoh_current_size[0] - bgc_begin_uoh_size[0]), + mb (dd_desired_allocation (dd_uoh)), (dd_new_allocation (dd_uoh) / 1000.0 / 1000.0), + mb (dd_desired_allocation (dd_uoh) - dd_new_allocation (dd_uoh)))); - add_saved_spinlock_info (true, me_acquire, mt_alloc_large, msl_status); - dprintf (SPINLOCK_LOG, ("[%d]spin Emsl uoh", heap_number)); - } - else if (spin_for_allocation < 0) - { - msl_status = wait_for_background (awr_uoh_alloc_during_bgc, true); - check_msl_status ("uoh a_state_acquire_seg", size); - } + msl_status = wait_for_background (awr_uoh_alloc_during_bgc, true); + check_msl_status ("uoh a_state_acquire_seg", size); } } #endif //BACKGROUND_GC @@ -38792,7 +38807,6 @@ void gc_heap::background_mark_phase () gen0_must_clear_bricks--; background_soh_alloc_count = 0; - background_uoh_alloc_count = 0; bgc_overflow_count = 0; bpromoted_bytes (heap_number) = 0; @@ -38824,8 +38838,6 @@ void gc_heap::background_mark_phase () slow = MAX_PTR; #endif //MULTIPLE_HEAPS - generation* gen = generation_of (max_generation); - dprintf(3,("BGC: stack marking")); sc.concurrent = TRUE; @@ -38836,16 +38848,19 @@ void gc_heap::background_mark_phase () dprintf(3,("BGC: finalization marking")); finalize_queue->GcScanRoots(background_promote_callback, heap_number, 0); - size_t total_soh_size = generation_sizes (generation_of (max_generation)); - size_t total_loh_size = generation_size (loh_generation); - size_t total_poh_size = generation_size (poh_generation); - bgc_begin_loh_size = total_loh_size; - bgc_begin_poh_size = total_poh_size; - bgc_loh_size_increased = 0; - bgc_poh_size_increased = 0; background_soh_size_end_mark = 0; - dprintf (GTC_LOG, ("BM: h%d: loh: %zd, soh: %zd, poh: %zd", heap_number, total_loh_size, total_soh_size, total_poh_size)); + for (int uoh_gen_idx = uoh_start_generation; uoh_gen_idx < total_generation_count; uoh_gen_idx++) + { + size_t uoh_size = generation_size (uoh_gen_idx); + int uoh_idx = uoh_gen_idx - uoh_start_generation; + bgc_begin_uoh_size[uoh_idx] = uoh_size; + bgc_uoh_current_size[uoh_idx] = uoh_size; + } + + dprintf (GTC_LOG, ("BM: h%d: soh: %zd, loh: %zd, poh: %zd", + heap_number, generation_sizes (generation_of (max_generation)), + bgc_uoh_current_size[loh_generation - uoh_start_generation], bgc_uoh_current_size[poh_generation - uoh_start_generation])); //concurrent_print_time_delta ("copying stack roots"); concurrent_print_time_delta ("CS"); @@ -39158,11 +39173,10 @@ void gc_heap::background_mark_phase () //marking sc.concurrent = FALSE; - total_soh_size = generation_sizes (generation_of (max_generation)); - total_loh_size = generation_size (loh_generation); - total_poh_size = generation_size (poh_generation); - - dprintf (GTC_LOG, ("FM: h%d: loh: %zd, soh: %zd, poh: %zd", heap_number, total_loh_size, total_soh_size, total_poh_size)); + dprintf (GTC_LOG, ("FM: h%d: soh: %zd, loh: %zd, poh: %zd", heap_number, + generation_sizes (generation_of (max_generation)), + bgc_uoh_current_size[loh_generation - uoh_start_generation], + bgc_uoh_current_size[poh_generation - uoh_start_generation])); #if defined(FEATURE_BASICFREEZE) && !defined(USE_REGIONS) if (ro_segments_in_range) @@ -44886,11 +44900,7 @@ void gc_heap::compute_new_dynamic_data (int gen_number) gen_data->free_obj_space_after = generation_free_obj_space (gen); gen_data->npinned_surv = out; #ifdef BACKGROUND_GC - if (i == loh_generation) - end_loh_size = total_gen_size; - - if (i == poh_generation) - end_poh_size = total_gen_size; + end_uoh_size[i - uoh_start_generation] = total_gen_size; #endif //BACKGROUND_GC dd_promoted_size (dd) = out; } diff --git a/src/coreclr/gc/gcconfig.h b/src/coreclr/gc/gcconfig.h index 0378323b6e96c5..9d5af77fa74fb0 100644 --- a/src/coreclr/gc/gcconfig.h +++ b/src/coreclr/gc/gcconfig.h @@ -128,6 +128,7 @@ class GCConfigStringHolder INT_CONFIG (BGCFLEnableTBH, "BGCFLEnableTBH", NULL, 0, "Enables TBH") \ INT_CONFIG (BGCFLEnableFF, "BGCFLEnableFF", NULL, 0, "Enables FF") \ INT_CONFIG (BGCG2RatioStep, "BGCG2RatioStep", NULL, 5, "Ratio correction factor for ML loop") \ + INT_CONFIG (UOHWaitBGCSizeIncPercent, "UOHWaitBGCSizeIncPercent", "System.GC.UOHWaitBGCSizeIncPercent",-1, "UOH allocation during a BGC waits till end of BGC after UOH increases by this percent") \ INT_CONFIG (GCHeapHardLimitSOH, "GCHeapHardLimitSOH", "System.GC.HeapHardLimitSOH", 0, "Specifies a hard limit for the GC heap SOH") \ INT_CONFIG (GCHeapHardLimitLOH, "GCHeapHardLimitLOH", "System.GC.HeapHardLimitLOH", 0, "Specifies a hard limit for the GC heap LOH") \ INT_CONFIG (GCHeapHardLimitPOH, "GCHeapHardLimitPOH", "System.GC.HeapHardLimitPOH", 0, "Specifies a hard limit for the GC heap POH") \ diff --git a/src/coreclr/gc/gcpriv.h b/src/coreclr/gc/gcpriv.h index 334601327f0a63..5c8988b7ac8a92 100644 --- a/src/coreclr/gc/gcpriv.h +++ b/src/coreclr/gc/gcpriv.h @@ -561,6 +561,15 @@ enum allocation_state a_state_max }; +#ifdef BACKGROUND_GC +enum uoh_allocation_action +{ + uoh_alloc_normal, + uoh_alloc_yield, + uoh_alloc_wait +}; +#endif //BACKGROUND_GC + enum enter_msl_status { msl_entered, @@ -2308,18 +2317,16 @@ class gc_heap int lock_index, BOOL check_used_p, heap_segment* seg); -#endif //BACKGROUND_GC -#ifdef BACKGROUND_GC PER_HEAP_METHOD void bgc_track_uoh_alloc(); PER_HEAP_METHOD void bgc_untrack_uoh_alloc(); - PER_HEAP_METHOD BOOL bgc_loh_allocate_spin(); - - PER_HEAP_METHOD BOOL bgc_poh_allocate_spin(); + PER_HEAP_METHOD uoh_allocation_action get_bgc_allocate_action (int gen_number); PER_HEAP_METHOD void bgc_record_uoh_allocation(int gen_number, size_t size); + + PER_HEAP_METHOD void bgc_record_uoh_end_seg_allocation (int gen_number, size_t size); #endif //BACKGROUND_GC PER_HEAP_METHOD void add_saved_spinlock_info ( @@ -3551,11 +3558,6 @@ class gc_heap #ifdef BACKGROUND_GC PER_HEAP_FIELD_SINGLE_GC VOLATILE(bgc_state) current_bgc_state; - PER_HEAP_FIELD_SINGLE_GC size_t bgc_begin_loh_size; - PER_HEAP_FIELD_SINGLE_GC size_t bgc_begin_poh_size; - PER_HEAP_FIELD_SINGLE_GC size_t end_loh_size; - PER_HEAP_FIELD_SINGLE_GC size_t end_poh_size; - // We can't process the ephemeral range concurrently so we // wait till final mark to process it. PER_HEAP_FIELD_SINGLE_GC BOOL processed_eph_overflow_p; @@ -3567,6 +3569,9 @@ class gc_heap PER_HEAP_FIELD_SINGLE_GC uint8_t* next_sweep_obj; PER_HEAP_FIELD_SINGLE_GC uint8_t* current_sweep_pos; + PER_HEAP_FIELD_SINGLE_GC size_t bgc_begin_uoh_size[uoh_generation_count]; + PER_HEAP_FIELD_SINGLE_GC size_t end_uoh_size[uoh_generation_count]; + PER_HEAP_FIELD_SINGLE_GC size_t uoh_a_no_bgc[uoh_generation_count]; PER_HEAP_FIELD_SINGLE_GC size_t uoh_a_bgc_marking[uoh_generation_count]; PER_HEAP_FIELD_SINGLE_GC size_t uoh_a_bgc_planning[uoh_generation_count]; @@ -3777,14 +3782,10 @@ class gc_heap #endif //MULTIPLE_HEAPS #ifdef BACKGROUND_GC - // This includes what we allocate at the end of segment - allocating - // in free list doesn't increase the heap size. - PER_HEAP_FIELD_SINGLE_GC_ALLOC size_t bgc_loh_size_increased; - PER_HEAP_FIELD_SINGLE_GC_ALLOC size_t bgc_poh_size_increased; - // Updated by the allocator and reinit-ed in each BGC - PER_HEAP_FIELD_SINGLE_GC_ALLOC size_t background_soh_alloc_count; - PER_HEAP_FIELD_SINGLE_GC_ALLOC size_t background_uoh_alloc_count; + PER_HEAP_FIELD_SINGLE_GC_ALLOC size_t bgc_uoh_current_size[uoh_generation_count]; + + PER_HEAP_FIELD_SINGLE_GC_ALLOC size_t background_soh_alloc_count; PER_HEAP_FIELD_SINGLE_GC_ALLOC VOLATILE(int32_t) uoh_alloc_thread_count; #endif //BACKGROUND_GC @@ -5366,11 +5367,6 @@ class gc_heap #ifdef BACKGROUND_GC PER_HEAP_ISOLATED_FIELD_INIT_ONLY bool gc_can_use_concurrent; - -#ifdef BGC_SERVO_TUNING - // This tells us why we chose to do a bgc in tuning. - PER_HEAP_ISOLATED_FIELD_DIAG_ONLY int saved_bgc_tuning_reason; -#endif //BGC_SERVO_TUNING #endif //BACKGROUND_GC PER_HEAP_ISOLATED_FIELD_INIT_ONLY uint8_t* bookkeeping_start; @@ -5483,6 +5479,11 @@ class gc_heap // This can only go from false to true concurrently so if it is true, // it means the bgc info is ready. PER_HEAP_ISOLATED_FIELD_DIAG_ONLY VOLATILE(bool) is_last_recorded_bgc; + +#ifdef BGC_SERVO_TUNING + // This tells us why we chose to do a bgc in tuning. + PER_HEAP_ISOLATED_FIELD_DIAG_ONLY int saved_bgc_tuning_reason; +#endif //BGC_SERVO_TUNING #endif //BACKGROUND_GC #ifdef DYNAMIC_HEAP_COUNT @@ -6233,10 +6234,13 @@ class heap_segment int plan_gen_num; int old_card_survived; int pinned_survived; - // at the end of each GC, we increase each region in the region free list - // by 1. So we can observe if a region stays in the free list over many - // GCs. We stop at 99. It's initialized to 0 when a region is added to - // the region's free list. + // at the end of each GC, we increase the age of each region in the relevant region + // free list(s) by 1. So we can observe if a region stays in the free list over many + // GCs. We stop at 99. It's initialized to 0 when a region is added to the region's free list. + // + // "Relevant" means we only age basic regions during ephemeral GCs and age all regions + // during gen2 GCs. The only exception is we do age all regions during an ephemeral GC + // done at the beginning of a BGC. #define MAX_AGE_IN_FREE 99 #define AGE_IN_FREE_TO_DECOMMIT_BASIC 20 #define AGE_IN_FREE_TO_DECOMMIT_LARGE 5 diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/Dataflow/HandleCallAction.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/Dataflow/HandleCallAction.cs index 9c5927a7532c03..6f67e769dd3a1d 100644 --- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/Dataflow/HandleCallAction.cs +++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/Dataflow/HandleCallAction.cs @@ -90,15 +90,24 @@ private partial bool TryHandleIntrinsic( } } } - else if (typeInstantiated.Instantiation.IsConstrainedToBeReferenceTypes()) - { - // This will always succeed thanks to the runtime type loader - } else { - triggersWarning = true; - } + if (typeInstantiated.Instantiation.IsConstrainedToBeReferenceTypes()) + { + // This will always succeed thanks to the runtime type loader + } + else + { + triggersWarning = true; + } + // This should technically be in the IsConstrainedToBeReferenceTypes branch above + // but we have trim warning suppressions in dotnet/runtime and elsewhere that rely on the implementation + // detail that reference type instantiations will work, even if the generic is not + // constrained to be a reference type. + // MarkType will try to come up with a reference type type loader template. + _reflectionMarker.MarkType(_diagnosticContext.Origin, typeInstantiated, "MakeGenericType"); + } } else if (value == NullValue.Instance) { diff --git a/src/libraries/Microsoft.Extensions.FileSystemGlobbing/src/FilePatternMatch.cs b/src/libraries/Microsoft.Extensions.FileSystemGlobbing/src/FilePatternMatch.cs index e19649700e322c..87d3954ef41292 100644 --- a/src/libraries/Microsoft.Extensions.FileSystemGlobbing/src/FilePatternMatch.cs +++ b/src/libraries/Microsoft.Extensions.FileSystemGlobbing/src/FilePatternMatch.cs @@ -28,16 +28,17 @@ public struct FilePatternMatch : IEquatable /// If the matcher searched for "src/Project/**/*.cs" and the pattern matcher found "src/Project/Interfaces/IFile.cs", /// then = "Interfaces/IFile.cs" and = "src/Project/Interfaces/IFile.cs". /// - public string? Stem { get; } + public string Stem { get; } /// /// Initializes new instance of /// /// The path to the file matched, relative to the beginning of the matching search pattern. /// The subpath to the file matched, relative to the first wildcard in the matching search pattern. - public FilePatternMatch(string path, string? stem) + public FilePatternMatch(string path, string stem) { ArgumentNullException.ThrowIfNull(path); + ArgumentNullException.ThrowIfNull(stem); Path = path; Stem = stem; diff --git a/src/libraries/Microsoft.Extensions.FileSystemGlobbing/src/Internal/PatternTestResult.cs b/src/libraries/Microsoft.Extensions.FileSystemGlobbing/src/Internal/PatternTestResult.cs index d1067b28574e41..4005fdc13ffa81 100644 --- a/src/libraries/Microsoft.Extensions.FileSystemGlobbing/src/Internal/PatternTestResult.cs +++ b/src/libraries/Microsoft.Extensions.FileSystemGlobbing/src/Internal/PatternTestResult.cs @@ -1,6 +1,8 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. +using System.Diagnostics.CodeAnalysis; + namespace Microsoft.Extensions.FileSystemGlobbing.Internal { /// @@ -11,6 +13,7 @@ public struct PatternTestResult { public static readonly PatternTestResult Failed = new(isSuccessful: false, stem: null); + [MemberNotNullWhen(returnValue: true, nameof(Stem))] public bool IsSuccessful { get; } public string? Stem { get; } diff --git a/src/libraries/Microsoft.Extensions.FileSystemGlobbing/tests/FilePatternMatchTests.cs b/src/libraries/Microsoft.Extensions.FileSystemGlobbing/tests/FilePatternMatchTests.cs index c16e991e3b111c..f352c27013b2fe 100644 --- a/src/libraries/Microsoft.Extensions.FileSystemGlobbing/tests/FilePatternMatchTests.cs +++ b/src/libraries/Microsoft.Extensions.FileSystemGlobbing/tests/FilePatternMatchTests.cs @@ -1,6 +1,7 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. +using System; using Xunit; namespace Microsoft.Extensions.FileSystemGlobbing.Tests @@ -24,5 +25,17 @@ public void TestGetHashCode() FilePatternMatch matchCase2 = new FilePatternMatch("sub/sub2/bar/baz/three.txt", "Sub2/bar/baz/thrEE.txt"); Assert.Equal(matchCase1.GetHashCode(), matchCase2.GetHashCode()); } + + [Fact] + public void TestPathArgumentNullExceptions() + { + Assert.Throws(() => new FilePatternMatch(null, "sub2/bar/baz/three.txt")); + } + + [Fact] + public void TestStemArgumentNullExceptions() + { + Assert.Throws(() => new FilePatternMatch("sub2/bar/baz/three.txt", null)); + } } } diff --git a/src/libraries/sendtohelix-mobile.targets b/src/libraries/sendtohelix-mobile.targets index 1475f03ad4df4a..3d128ce22a3aa0 100644 --- a/src/libraries/sendtohelix-mobile.targets +++ b/src/libraries/sendtohelix-mobile.targets @@ -90,7 +90,6 @@ Include="$([System.IO.Directory]::GetDirectories('$(TestArchiveTestsRoot)', '*.app', System.IO.SearchOption.AllDirectories))"> $(AppleTestTarget) $(_workItemTimeout) - $(_workItemTimeout) $(_XHarnessAppleCustomCommand) @@ -100,7 +99,6 @@ Exclude="$([System.IO.Directory]::GetFiles('$(TestArchiveRoot)', 'xharness-app-payload*', System.IO.SearchOption.AllDirectories))"> $(AppleTestTarget) $(_workItemTimeout) - $(_workItemTimeout) $(_XHarnessAppleCustomCommand) diff --git a/src/tests/Common/helixpublishwitharcade.proj b/src/tests/Common/helixpublishwitharcade.proj index b9daf4afbbe59e..28fc40a768f954 100644 --- a/src/tests/Common/helixpublishwitharcade.proj +++ b/src/tests/Common/helixpublishwitharcade.proj @@ -1010,7 +1010,6 @@ $(AppleTestTarget) $([System.TimeSpan]::FromMinutes($(TimeoutPerTestCollectionInMinutes))) - $([System.TimeSpan]::FromMinutes($(TimeoutPerTestCollectionInMinutes))) $(SigningCommand) dotnet $(XUnitRunnerDll) %(XUnitWrapperDlls) $(XUnitRunnerArgs) @@ -1018,7 +1017,6 @@ --set-env=TestExclusionListPath=TestExclusionList.txt $(AppleTestTarget) $([System.TimeSpan]::FromMinutes($(TimeoutPerTestCollectionInMinutes))) - $([System.TimeSpan]::FromMinutes($(TimeoutPerTestCollectionInMinutes))) diff --git a/src/tests/nativeaot/SmokeTests/DynamicGenerics/Github118072.cs b/src/tests/nativeaot/SmokeTests/DynamicGenerics/Github118072.cs index 3e5f12b666c062..e490ffbfd02024 100644 --- a/src/tests/nativeaot/SmokeTests/DynamicGenerics/Github118072.cs +++ b/src/tests/nativeaot/SmokeTests/DynamicGenerics/Github118072.cs @@ -21,26 +21,22 @@ class GitHub118072 [TestMethod] public static void RunTest() { - Type current = typeof(object); - GetMI1().MakeGenericMethod(typeof(object)).Invoke(null, []); - current = FillCache(current); + FlushCache(); GetMI2().MakeGenericMethod(typeof(object)).Invoke(null, []); - current = FillCache(current); + FlushCache(); GetMI3().MakeGenericMethod(typeof(object)).Invoke(null, []); - current = FillCache(current); + FlushCache(); GetMI4().MakeGenericMethod(typeof(object)).Invoke(null, []); - static Type FillCache(Type current) + static void FlushCache() { - for (int i = 0; i < 400; i++) + // Make sure the cached type loader contexts are flushed + for (int j = 0; j < 10; j++) { - Type next = typeof(MyClass<>).MakeGenericType(current); - Activator.CreateInstance(next); - current = next; + GC.Collect(); + GC.WaitForPendingFinalizers(); } - - return current; } } diff --git a/src/tests/nativeaot/SmokeTests/Reflection/Reflection.cs b/src/tests/nativeaot/SmokeTests/Reflection/Reflection.cs index a0ee00a5e7ac21..10a21b4d59e236 100644 --- a/src/tests/nativeaot/SmokeTests/Reflection/Reflection.cs +++ b/src/tests/nativeaot/SmokeTests/Reflection/Reflection.cs @@ -76,6 +76,8 @@ private static int Main() Test105034Regression.Run(); TestMethodsNeededFromNativeLayout.Run(); TestFieldAndParamMetadata.Run(); + TestActivationWithoutConstructor.Run(); + TestNestedMakeGeneric.Run(); // // Mostly functionality tests @@ -859,6 +861,60 @@ public static void Run() } } + class TestActivationWithoutConstructor + { + public static void Run() + { + { + object o = Activator.CreateInstance(typeof(StructForCreateInstanceDirect<>).MakeGenericType(GetTheType())); + if (!o.ToString().Contains(nameof(StructForCreateInstanceDirect<>))) + throw new Exception(); + } + + { + object o = CreateInstance(typeof(StructForCreateInstanceIndirect<>).MakeGenericType(GetTheType())); + if (!o.ToString().Contains(nameof(StructForCreateInstanceIndirect<>))) + throw new Exception(); + + static object CreateInstance([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] Type t) + => Activator.CreateInstance(t); + } + + { + object o = RuntimeHelpers.GetUninitializedObject(typeof(StructForGetUninitializedObject<>).MakeGenericType(GetTheType())); + if (!o.ToString().Contains(nameof(StructForGetUninitializedObject<>))) + throw new Exception(); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Type GetTheType() => typeof(Atom); + } + + class Atom; + + struct StructForCreateInstanceDirect where T : class; + struct StructForCreateInstanceIndirect where T : class; + struct StructForGetUninitializedObject where T : class; + } + + class TestNestedMakeGeneric + { + class Outie where T : class; + class Innie where T : class; + class Atom; + + public static void Run() + { + Type inner = typeof(Innie<>).MakeGenericType(GetAtom()); + Type outer = typeof(Outie<>).MakeGenericType(inner); + + Console.WriteLine(Activator.CreateInstance(outer)); + + [MethodImpl(MethodImplOptions.NoInlining)] + static Type GetAtom() => typeof(Atom); + } + } + class TestCreateDelegate { internal class Greeter