@@ -96,13 +96,15 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
96
96
const size_t start_page = (size_t )start_idx * _pages_per_region;
97
97
const size_t size_in_pages = num_regions * _pages_per_region;
98
98
bool zero_filled = _storage.commit (start_page, size_in_pages);
99
- if (_memory_tag == mtJavaHeap) {
99
+
100
+ if (should_distribute_across_numa_nodes ()) {
100
101
for (uint region_index = start_idx; region_index < start_idx + num_regions; region_index++ ) {
101
102
void * address = _storage.page_start (region_index * _pages_per_region);
102
103
size_t size_in_bytes = _storage.page_size () * _pages_per_region;
103
104
G1NUMA::numa ()->request_memory_on_node (address, size_in_bytes, region_index);
104
105
}
105
106
}
107
+
106
108
if (AlwaysPreTouch) {
107
109
_storage.pretouch (start_page, size_in_pages, pretouch_workers);
108
110
}
@@ -122,7 +124,7 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
122
124
123
125
// G1RegionToSpaceMapper implementation where the region granularity is smaller
124
126
// than the commit granularity.
125
- // Basically, the contents of one OS page span several regions.
127
+ // Basically, the contents of one OS page spans several regions.
126
128
class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
127
129
size_t _regions_per_page;
128
130
// Lock to prevent bitmap updates and the actual underlying
@@ -148,13 +150,18 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
148
150
return _region_commit_map.find_first_set_bit (region, region_limit) != region_limit;
149
151
}
150
152
151
- void numa_request_on_node (size_t page_idx) {
152
- if (_memory_tag == mtJavaHeap) {
153
- uint region = (uint)(page_idx * _regions_per_page);
154
- void * address = _storage.page_start (page_idx);
155
- size_t size_in_bytes = _storage.page_size ();
156
- G1NUMA::numa ()->request_memory_on_node (address, size_in_bytes, region);
153
+ bool commit_pages (size_t start_page, size_t size_in_pages) {
154
+ bool result = _storage.commit (start_page, size_in_pages);
155
+
156
+ if (should_distribute_across_numa_nodes ()) {
157
+ for (size_t page = start_page; page < start_page + size_in_pages; page++) {
158
+ uint region = checked_cast<uint>(page * _regions_per_page);
159
+ void * address = _storage.page_start (page);
160
+ size_t size_in_bytes = _storage.page_size ();
161
+ G1NUMA::numa ()->request_memory_on_node (address, size_in_bytes, region);
162
+ }
157
163
}
164
+ return result;
158
165
}
159
166
160
167
public:
@@ -171,6 +178,21 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
171
178
guarantee ((page_size * commit_factor) >= alloc_granularity, " allocation granularity smaller than commit granularity" );
172
179
}
173
180
181
+ size_t find_first_uncommitted (size_t page, size_t end) {
182
+ assert (page < end, " must be" );
183
+ while (page < end && is_page_committed (page)) {
184
+ page++;
185
+ }
186
+ return page;
187
+ }
188
+
189
+ size_t find_first_committed (size_t page, size_t end) {
190
+ while (page < end && !is_page_committed (page)) {
191
+ page++;
192
+ }
193
+ return MIN2 (page, end);
194
+ }
195
+
174
196
virtual void commit_regions (uint start_idx, size_t num_regions, WorkerThreads* pretouch_workers) {
175
197
uint region_limit = (uint)(start_idx + num_regions);
176
198
assert (num_regions > 0 , " Must commit at least one region" );
@@ -179,46 +201,39 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
179
201
180
202
size_t const NoPage = SIZE_MAX;
181
203
182
- size_t first_committed = NoPage;
183
- size_t num_committed = 0 ;
204
+ size_t first_newly_committed = NoPage;
205
+ size_t num_committed_pages = 0 ;
184
206
185
- size_t start_page = region_idx_to_page_idx (start_idx);
186
- size_t end_page = region_idx_to_page_idx (region_limit - 1 );
207
+ size_t const start_page = region_idx_to_page_idx (start_idx);
208
+ size_t const end_page = region_idx_to_page_idx (region_limit - 1 ) + 1 ;
187
209
188
210
bool all_zero_filled = true ;
189
211
190
212
// Concurrent operations might operate on regions sharing the same
191
213
// underlying OS page. See lock declaration for more details.
192
214
{
193
215
MutexLocker ml (&_lock, Mutex::_no_safepoint_check_flag);
194
- for (size_t page = start_page; page <= end_page; page++) {
195
- if (!is_page_committed (page)) {
196
- // Page not committed.
197
- if (num_committed == 0 ) {
198
- first_committed = page;
199
- }
200
- num_committed++;
201
-
202
- if (!_storage.commit (page, 1 )) {
203
- // Found dirty region during commit.
204
- all_zero_filled = false ;
205
- }
206
-
207
- // Move memory to correct NUMA node for the heap.
208
- numa_request_on_node (page);
209
- } else {
210
- // Page already committed.
211
- all_zero_filled = false ;
212
- }
216
+
217
+ size_t uncommitted_l = find_first_uncommitted (start_page, end_page);
218
+ size_t uncommitted_r = find_first_committed (uncommitted_l + 1 , end_page);
219
+
220
+ first_newly_committed = uncommitted_l;
221
+ num_committed_pages = uncommitted_r - uncommitted_l;
222
+
223
+ if (num_committed_pages > 0 &&
224
+ !commit_pages (first_newly_committed, num_committed_pages)) {
225
+ all_zero_filled = false ;
213
226
}
214
227
228
+ all_zero_filled &= (uncommitted_l == start_page) && (uncommitted_r == end_page);
229
+
215
230
// Update the commit map for the given range. Not using the par_set_range
216
231
// since updates to _region_commit_map for this mapper is protected by _lock.
217
232
_region_commit_map.set_range (start_idx, region_limit, BitMap::unknown_range);
218
233
}
219
234
220
- if (AlwaysPreTouch && num_committed > 0 ) {
221
- _storage.pretouch (first_committed, num_committed , pretouch_workers);
235
+ if (AlwaysPreTouch && num_committed_pages > 0 ) {
236
+ _storage.pretouch (first_newly_committed, num_committed_pages , pretouch_workers);
222
237
}
223
238
224
239
fire_on_commit (start_idx, num_regions, all_zero_filled);
@@ -230,8 +245,8 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
230
245
assert (_region_commit_map.find_first_clear_bit (start_idx, region_limit) == region_limit,
231
246
" Should only be committed regions in the range [%u, %u)" , start_idx, region_limit);
232
247
233
- size_t start_page = region_idx_to_page_idx (start_idx);
234
- size_t end_page = region_idx_to_page_idx (region_limit - 1 );
248
+ size_t const start_page = region_idx_to_page_idx (start_idx);
249
+ size_t const end_page = region_idx_to_page_idx (region_limit - 1 ) + 1 ;
235
250
236
251
// Concurrent operations might operate on regions sharing the same
237
252
// underlying OS page. See lock declaration for more details.
@@ -240,13 +255,16 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
240
255
// updates to _region_commit_map for this mapper is protected by _lock.
241
256
_region_commit_map.clear_range (start_idx, region_limit, BitMap::unknown_range);
242
257
243
- for (size_t page = start_page; page <= end_page; page++) {
244
- // We know all pages were committed before clearing the map. If the
245
- // the page is still marked as committed after the clear we should
246
- // not uncommit it.
247
- if (!is_page_committed (page)) {
248
- _storage.uncommit (page, 1 );
249
- }
258
+ // We know all pages were committed before clearing the map. If the
259
+ // the page is still marked as committed after the clear we should
260
+ // not uncommit it.
261
+ size_t uncommitted_l = find_first_uncommitted (start_page, end_page);
262
+ size_t uncommitted_r = find_first_committed (uncommitted_l + 1 , end_page);
263
+
264
+ size_t num_uncommitted_pages_found = uncommitted_r - uncommitted_l;
265
+
266
+ if (num_uncommitted_pages_found > 0 ) {
267
+ _storage.uncommit (uncommitted_l, num_uncommitted_pages_found);
250
268
}
251
269
}
252
270
};
@@ -257,6 +275,10 @@ void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions, b
257
275
}
258
276
}
259
277
278
+ bool G1RegionToSpaceMapper::should_distribute_across_numa_nodes () const {
279
+ return _memory_tag == mtJavaHeap && G1NUMA::numa ()->is_enabled ();
280
+ }
281
+
260
282
G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper (ReservedSpace rs,
261
283
size_t actual_size,
262
284
size_t page_size,
0 commit comments