Skip to content

Commit 08b677b

Browse files
author
Thomas Schatzl
committed
8071277: G1: Merge commits and uncommits of contiguous memory
Reviewed-by: iwalulya, ayang
1 parent 75269fd commit 08b677b

File tree

3 files changed

+67
-45
lines changed

3 files changed

+67
-45
lines changed

src/hotspot/share/gc/g1/g1NUMA.cpp

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -203,9 +203,7 @@ uint G1NUMA::index_for_region(G1HeapRegion* hr) const {
203203
// * G1HeapRegion #: |-#0-||-#1-||-#2-||-#3-||-#4-||-#5-||-#6-||-#7-||-#8-||-#9-||#10-||#11-||#12-||#13-||#14-||#15-|
204204
// * NUMA node #: |----#0----||----#1----||----#2----||----#3----||----#0----||----#1----||----#2----||----#3----|
205205
void G1NUMA::request_memory_on_node(void* aligned_address, size_t size_in_bytes, uint region_index) {
206-
if (!is_enabled()) {
207-
return;
208-
}
206+
assert(is_enabled(), "must be, check before");
209207

210208
if (size_in_bytes == 0) {
211209
return;

src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp

Lines changed: 64 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -96,13 +96,15 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
9696
const size_t start_page = (size_t)start_idx * _pages_per_region;
9797
const size_t size_in_pages = num_regions * _pages_per_region;
9898
bool zero_filled = _storage.commit(start_page, size_in_pages);
99-
if (_memory_tag == mtJavaHeap) {
99+
100+
if (should_distribute_across_numa_nodes()) {
100101
for (uint region_index = start_idx; region_index < start_idx + num_regions; region_index++ ) {
101102
void* address = _storage.page_start(region_index * _pages_per_region);
102103
size_t size_in_bytes = _storage.page_size() * _pages_per_region;
103104
G1NUMA::numa()->request_memory_on_node(address, size_in_bytes, region_index);
104105
}
105106
}
107+
106108
if (AlwaysPreTouch) {
107109
_storage.pretouch(start_page, size_in_pages, pretouch_workers);
108110
}
@@ -122,7 +124,7 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
122124

123125
// G1RegionToSpaceMapper implementation where the region granularity is smaller
124126
// than the commit granularity.
125-
// Basically, the contents of one OS page span several regions.
127+
// Basically, the contents of one OS page spans several regions.
126128
class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
127129
size_t _regions_per_page;
128130
// Lock to prevent bitmap updates and the actual underlying
@@ -148,13 +150,18 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
148150
return _region_commit_map.find_first_set_bit(region, region_limit) != region_limit;
149151
}
150152

151-
void numa_request_on_node(size_t page_idx) {
152-
if (_memory_tag == mtJavaHeap) {
153-
uint region = (uint)(page_idx * _regions_per_page);
154-
void* address = _storage.page_start(page_idx);
155-
size_t size_in_bytes = _storage.page_size();
156-
G1NUMA::numa()->request_memory_on_node(address, size_in_bytes, region);
153+
bool commit_pages(size_t start_page, size_t size_in_pages) {
154+
bool result = _storage.commit(start_page, size_in_pages);
155+
156+
if (should_distribute_across_numa_nodes()) {
157+
for (size_t page = start_page; page < start_page + size_in_pages; page++) {
158+
uint region = checked_cast<uint>(page * _regions_per_page);
159+
void* address = _storage.page_start(page);
160+
size_t size_in_bytes = _storage.page_size();
161+
G1NUMA::numa()->request_memory_on_node(address, size_in_bytes, region);
162+
}
157163
}
164+
return result;
158165
}
159166

160167
public:
@@ -171,6 +178,21 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
171178
guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
172179
}
173180

181+
size_t find_first_uncommitted(size_t page, size_t end) {
182+
assert(page < end, "must be");
183+
while (page < end && is_page_committed(page)) {
184+
page++;
185+
}
186+
return page;
187+
}
188+
189+
size_t find_first_committed(size_t page, size_t end) {
190+
while (page < end && !is_page_committed(page)) {
191+
page++;
192+
}
193+
return MIN2(page, end);
194+
}
195+
174196
virtual void commit_regions(uint start_idx, size_t num_regions, WorkerThreads* pretouch_workers) {
175197
uint region_limit = (uint)(start_idx + num_regions);
176198
assert(num_regions > 0, "Must commit at least one region");
@@ -179,46 +201,39 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
179201

180202
size_t const NoPage = SIZE_MAX;
181203

182-
size_t first_committed = NoPage;
183-
size_t num_committed = 0;
204+
size_t first_newly_committed = NoPage;
205+
size_t num_committed_pages = 0;
184206

185-
size_t start_page = region_idx_to_page_idx(start_idx);
186-
size_t end_page = region_idx_to_page_idx(region_limit - 1);
207+
size_t const start_page = region_idx_to_page_idx(start_idx);
208+
size_t const end_page = region_idx_to_page_idx(region_limit - 1) + 1;
187209

188210
bool all_zero_filled = true;
189211

190212
// Concurrent operations might operate on regions sharing the same
191213
// underlying OS page. See lock declaration for more details.
192214
{
193215
MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
194-
for (size_t page = start_page; page <= end_page; page++) {
195-
if (!is_page_committed(page)) {
196-
// Page not committed.
197-
if (num_committed == 0) {
198-
first_committed = page;
199-
}
200-
num_committed++;
201-
202-
if (!_storage.commit(page, 1)) {
203-
// Found dirty region during commit.
204-
all_zero_filled = false;
205-
}
206-
207-
// Move memory to correct NUMA node for the heap.
208-
numa_request_on_node(page);
209-
} else {
210-
// Page already committed.
211-
all_zero_filled = false;
212-
}
216+
217+
size_t uncommitted_l = find_first_uncommitted(start_page, end_page);
218+
size_t uncommitted_r = find_first_committed(uncommitted_l + 1, end_page);
219+
220+
first_newly_committed = uncommitted_l;
221+
num_committed_pages = uncommitted_r - uncommitted_l;
222+
223+
if (num_committed_pages > 0 &&
224+
!commit_pages(first_newly_committed, num_committed_pages)) {
225+
all_zero_filled = false;
213226
}
214227

228+
all_zero_filled &= (uncommitted_l == start_page) && (uncommitted_r == end_page);
229+
215230
// Update the commit map for the given range. Not using the par_set_range
216231
// since updates to _region_commit_map for this mapper is protected by _lock.
217232
_region_commit_map.set_range(start_idx, region_limit, BitMap::unknown_range);
218233
}
219234

220-
if (AlwaysPreTouch && num_committed > 0) {
221-
_storage.pretouch(first_committed, num_committed, pretouch_workers);
235+
if (AlwaysPreTouch && num_committed_pages > 0) {
236+
_storage.pretouch(first_newly_committed, num_committed_pages, pretouch_workers);
222237
}
223238

224239
fire_on_commit(start_idx, num_regions, all_zero_filled);
@@ -230,8 +245,8 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
230245
assert(_region_commit_map.find_first_clear_bit(start_idx, region_limit) == region_limit,
231246
"Should only be committed regions in the range [%u, %u)", start_idx, region_limit);
232247

233-
size_t start_page = region_idx_to_page_idx(start_idx);
234-
size_t end_page = region_idx_to_page_idx(region_limit - 1);
248+
size_t const start_page = region_idx_to_page_idx(start_idx);
249+
size_t const end_page = region_idx_to_page_idx(region_limit - 1) + 1;
235250

236251
// Concurrent operations might operate on regions sharing the same
237252
// underlying OS page. See lock declaration for more details.
@@ -240,13 +255,16 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
240255
// updates to _region_commit_map for this mapper is protected by _lock.
241256
_region_commit_map.clear_range(start_idx, region_limit, BitMap::unknown_range);
242257

243-
for (size_t page = start_page; page <= end_page; page++) {
244-
// We know all pages were committed before clearing the map. If the
245-
// the page is still marked as committed after the clear we should
246-
// not uncommit it.
247-
if (!is_page_committed(page)) {
248-
_storage.uncommit(page, 1);
249-
}
258+
// We know all pages were committed before clearing the map. If the
259+
// the page is still marked as committed after the clear we should
260+
// not uncommit it.
261+
size_t uncommitted_l = find_first_uncommitted(start_page, end_page);
262+
size_t uncommitted_r = find_first_committed(uncommitted_l + 1, end_page);
263+
264+
size_t num_uncommitted_pages_found = uncommitted_r - uncommitted_l;
265+
266+
if (num_uncommitted_pages_found > 0) {
267+
_storage.uncommit(uncommitted_l, num_uncommitted_pages_found);
250268
}
251269
}
252270
};
@@ -257,6 +275,10 @@ void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions, b
257275
}
258276
}
259277

278+
bool G1RegionToSpaceMapper::should_distribute_across_numa_nodes() const {
279+
return _memory_tag == mtJavaHeap && G1NUMA::numa()->is_enabled();
280+
}
281+
260282
G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs,
261283
size_t actual_size,
262284
size_t page_size,

src/hotspot/share/gc/g1/g1RegionToSpaceMapper.hpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,8 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
5858
G1RegionToSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, size_t commit_factor, MemTag mem_tag);
5959

6060
void fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled);
61+
62+
bool should_distribute_across_numa_nodes() const;
6163
public:
6264
MemRegion reserved() { return _storage.reserved(); }
6365

0 commit comments

Comments
 (0)