与G1的分区类似,ZGC将堆划分成Page进行管理,不同的是ZGC中Page大小不是固定的,而是分为三种Small、Medium、Large三类。
本文将详细介绍ZGC的分页机制。
ZGC有3种不同的页面类型:小型(2MB),中型(32MB)和大型(2MB的倍数)。
zGlobals_linux_x86.hpp
const size_t ZPlatformPageSizeSmallShift = 21; // 2M
const size_t ZPlatformAddressOffsetBits = 42; // 4TB
zGlobals.hpp
// Page types
const uint8_t ZPageTypeSmall = 0;
const uint8_t ZPageTypeMedium = 1;
const uint8_t ZPageTypeLarge = 2;
// Page size shifts
const size_t ZPageSizeSmallShift = ZPlatformPageSizeSmallShift;
const size_t ZPageSizeMediumShift = ZPageSizeSmallShift + 4;
const size_t ZPageSizeMinShift = ZPageSizeSmallShift;
large page仅允许包含一个对象
zPage.inline.hpp
inline uint32_t ZPage::object_max_count() const {
switch (type()) {
case ZPageTypeLarge:
// A large page can only contain a single
// object aligned to the start of the page.
return 1;
default:
return (uint32_t)(size() >> object_alignment_shift());
}
}
zObjectAllocator.cpp
uintptr_t ZObjectAllocator::alloc_large_object(size_t size, ZAllocationFlags flags) {
assert(ZThread::is_java(), "Should be a Java thread");
uintptr_t addr = 0;
// Allocate new large page
const size_t page_size = align_up(size, ZPageSizeMin);
ZPage* const page = alloc_page(ZPageTypeLarge, page_size, flags);
if (page != NULL) {
// Allocate the object
addr = page->alloc_object(size);
}
return addr;
}
uintptr_t ZObjectAllocator::alloc_object(size_t size, ZAllocationFlags flags) {
if (size <= ZObjectSizeLimitSmall) {
// Small
return alloc_small_object(size, flags);
} else if (size <= ZObjectSizeLimitMedium) {
// Medium
return alloc_medium_object(size, flags);
} else {
// Large
return alloc_large_object(size, flags);
}
}
当从已有的page中分配对象失败时,则需要申请新的page,下文代码以分配小对象为例:
zObjectAllocator.cpp
uintptr_t ZObjectAllocator::alloc_small_object_from_worker(size_t size, ZAllocationFlags flags) {
assert(ZThread::is_worker(), "Should be a worker thread");
ZPage* page = _worker_small_page.get();
uintptr_t addr = 0;
if (page != NULL) {
addr = page->alloc_object(size);
}
if (addr == 0) {
// Allocate new page
page = alloc_page(ZPageTypeSmall, ZPageSizeSmall, flags);
if (page != NULL) {
addr = page->alloc_object(size);
}
_worker_small_page.set(page);
}
return addr;
}
zObjectAllocator.cpp
ZPage* ZObjectAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
ZPage* const page = ZHeap::heap()->alloc_page(type, size, flags);
if (page != NULL) {
// Increment used bytes
Atomic::add(size, _used.addr());
}
return page;
}
zHeap.cpp
ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
ZPage* const page = _page_allocator.alloc_page(type, size, flags);
if (page != NULL) {
// Update pagetable
_pagetable.insert(page);
}
return page;
}
zPageAllocator.cpp
ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
ZPage* const page = flags.non_blocking()
? alloc_page_nonblocking(type, size, flags)
: alloc_page_blocking(type, size, flags);
if (page == NULL) {
// Out of memory
return NULL;
}
// Map page if needed
if (!page->is_mapped()) {
map_page(page);
}
// Reset page. This updates the page's sequence number and must
// be done after page allocation, which potentially blocked in
// a safepoint where the global sequence number was updated.
page->reset();
// Update allocation statistics. Exclude worker threads to avoid
// artificial inflation of the allocation rate due to relocation.
if (!flags.worker_thread()) {
// Note that there are two allocation rate counters, which have
// different purposes and are sampled at different frequencies.
const size_t bytes = page->size();
ZStatInc(ZCounterAllocationRate, bytes);
ZStatInc(ZStatAllocRate::counter(), bytes);
}
return page;
}
alloc_page_blocking与alloc_page_nonblocking分配逻辑类似,区别在于当分配page失败时,alloc_page_blocking会发起GC并等待直到分配page成功,相关代码也在zPageAllocator.cpp
ZPage* ZPageAllocator::alloc_page_blocking(uint8_t type, size_t size, ZAllocationFlags flags) {
// Prepare to block
ZPageAllocRequest request(type, size, flags, ZCollectedHeap::heap()->total_collections());
_lock.lock();
// Try non-blocking allocation
ZPage* page = alloc_page_common(type, size, flags);
if (page == NULL) {
// Allocation failed, enqueue request
_queue.insert_last(&request);
}
_lock.unlock();
if (page == NULL) {
// Allocation failed
ZStatTimer timer(ZCriticalPhaseAllocationStall);
// We can only block if VM is fully initialized
check_out_of_memory_during_initialization();
do {
// Start asynchronous GC
ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall);
// Wait for allocation to complete or fail
page = request.wait();
} while (page == gc_marker);
{
// Guard deletion of underlying semaphore. This is a workaround for a
// bug in sem_post() in glibc < 2.21, where it's not safe to destroy
// the semaphore immediately after returning from sem_wait(). The
// reason is that sem_post() can touch the semaphore after a waiting
// thread have returned from sem_wait(). To avoid this race we are
// forcing the waiting thread to acquire/release the lock held by the
// posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674
ZLocker locker(&_lock);
}
}
return page;
}
zObjectAllocator.cpp alloc_page_common函数
ZPage* ZPageAllocator::alloc_page_common(uint8_t type, size_t size, ZAllocationFlags flags) {
ZPage* const page = alloc_page_common_inner(type, size, flags);
if (page == NULL) {
// Out of memory
return NULL;
}
// Update used statistics
increase_used(size, flags.relocation());
// Send trace event
ZTracer::tracer()->report_page_alloc(size, used(), max_available(flags.no_reserve()), _cache.available(), flags);
return page;
}
zObjectAllocator.cpp alloc_page_common_inner函数
ZPage* ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, ZAllocationFlags flags) {
const size_t max = max_available(flags.no_reserve());
if (max < size) {
// Not enough free memory
return NULL;
}
// Try allocating from the page cache
ZPage* const cached_page = _cache.alloc_page(type, size);
if (cached_page != NULL) {
return cached_page;
}
// Try allocate from the pre-mapped memory
ZPage* const pre_mapped_page = _pre_mapped.alloc_page(type, size);
if (pre_mapped_page != NULL) {
return pre_mapped_page;
}
// Flush any remaining pre-mapped memory so that
// subsequent allocations can use the physical memory.
flush_pre_mapped();
// Try ensure that physical memory is available
const size_t unused = try_ensure_unused(size, flags.no_reserve());
if (unused < size) {
// Flush cache to free up more physical memory
flush_cache(size - unused);
}
// Create new page and allocate physical memory
return create_page(type, size);
}
OpenJDK 12 源代码