Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 1 addition & 4 deletions bench-cargo-miri/big-allocs/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,7 @@ fn main() {
// We can't use too big of an allocation or this code will encounter an allocation failure in
// CI. Since the allocation can't be huge, we need to do a few iterations so that the effect
// we're trying to measure is clearly visible above the interpreter's startup time.
// FIXME (https://github.com/rust-lang/miri/issues/4253): On 32bit targets, we can run out of
// usable addresses if we don't reuse, leading to random test failures.
let count = if cfg!(target_pointer_width = "32") { 8 } else { 12 };
for _ in 0..count {
for _ in 0..20 {
drop(Vec::<u8>::with_capacity(512 * 1024 * 1024));
}
}
5 changes: 5 additions & 0 deletions src/alloc_addresses/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,11 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
if global_state.next_base_addr > this.target_usize_max() {
throw_exhaust!(AddressSpaceFull);
}
// If we filled up more than half the address space, start aggressively reusing
// addresses to avoid running out.
if global_state.next_base_addr > u64::try_from(this.target_isize_max()).unwrap() {
global_state.reuse.address_space_shortage();
}

interp_ok(base_addr)
}
Expand Down
15 changes: 11 additions & 4 deletions src/alloc_addresses/reuse_pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ pub struct ReusePool {
/// allocations as address-size pairs, the list must be sorted by the size and then the thread ID.
///
/// Each of these maps has at most MAX_POOL_SIZE elements, and since alignment is limited to
/// less than 64 different possible value, that bounds the overall size of the pool.
/// less than 64 different possible values, that bounds the overall size of the pool.
///
/// We also store the ID and the data-race clock of the thread that donated this pool element,
/// to ensure synchronization with the thread that picks up this address.
Expand All @@ -36,6 +36,15 @@ impl ReusePool {
}
}

/// Call this when we are using up a lot of the address space: if memory reuse is enabled at all,
/// this will bump the intra-thread reuse rate to 100% so that we can keep running this program as
/// long as possible.
pub fn address_space_shortage(&mut self) {
if self.address_reuse_rate > 0.0 {
self.address_reuse_rate = 1.0;
}
}

fn subpool(&mut self, align: Align) -> &mut Vec<(u64, Size, ThreadId, VClock)> {
let pool_idx: usize = align.bytes().trailing_zeros().try_into().unwrap();
if self.pool.len() <= pool_idx {
Expand All @@ -55,9 +64,7 @@ impl ReusePool {
clock: impl FnOnce() -> VClock,
) {
// Let's see if we even want to remember this address.
// We don't remember stack addresses: there's a lot of them (so the perf impact is big),
// and we only want to reuse stack slots within the same thread or else we'll add a lot of
// undesired synchronization.
// We don't remember stack addresses since there's so many of them (so the perf impact is big).
if kind == MemoryKind::Stack || !rng.random_bool(self.address_reuse_rate) {
return;
}
Expand Down