Skip to content

Commit

Permalink
Add support of allocating different sizes of storage in StorageProvid…
Browse files Browse the repository at this point in the history
…er (facebook#1504)

Summary:

Large segment needs to be backed by a large storage size.
StorageProvider currently always allocate fixed size of storage
determined by HERMESVM_LOG_HEAP_SEGMENT_SIZE.

This diffs adds support of allocating larger storage with below
changes:
1. `newStorage()` and `deleteStorage()` takes additional `sz` parameter.
2. For `MallocStorageProvider` and `VMAllocateStorageProvider`, simply
change the previous fixed storage size to passed in `sz`.
3. For `ContiguousVAStorageProvider`, use a BitVector to manage
allocations and deallocations. This can be improved later if we observe
fragmentations.

The support of enabling different sizes of heap segment will be added
later.

Differential Revision: D61676721
  • Loading branch information
lavenzg authored and facebook-github-bot committed Nov 8, 2024
1 parent eaa2808 commit 51eb787
Show file tree
Hide file tree
Showing 8 changed files with 241 additions and 126 deletions.
9 changes: 5 additions & 4 deletions include/hermes/VM/HeapRuntime.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ class HeapRuntime {
public:
~HeapRuntime() {
runtime_->~RT();
sp_->deleteStorage(runtime_);
sp_->deleteStorage(runtime_, kHeapRuntimeStorageSize);
}

/// Allocate a segment and create an aliased shared_ptr that points to the
Expand All @@ -36,16 +36,17 @@ class HeapRuntime {

private:
HeapRuntime(std::shared_ptr<StorageProvider> sp) : sp_{std::move(sp)} {
auto ptrOrError = sp_->newStorage("hermes-rt");
auto ptrOrError = sp_->newStorage("hermes-rt", kHeapRuntimeStorageSize);
if (!ptrOrError)
hermes_fatal("Cannot initialize Runtime storage.", ptrOrError.getError());
static_assert(
sizeof(RT) < AlignedHeapSegment::storageSize(), "Segments too small.");
static_assert(sizeof(RT) < kHeapRuntimeStorageSize, "Segments too small.");
runtime_ = static_cast<RT *>(*ptrOrError);
}

std::shared_ptr<StorageProvider> sp_;
RT *runtime_;
static constexpr size_t kHeapRuntimeStorageSize =
AlignedHeapSegment::storageSize();
};
} // namespace vm
} // namespace hermes
Expand Down
4 changes: 2 additions & 2 deletions include/hermes/VM/LimitedStorageProvider.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ class LimitedStorageProvider final : public StorageProvider {
: delegate_(std::move(provider)), limit_(limit) {}

protected:
llvh::ErrorOr<void *> newStorageImpl(const char *name) override;
llvh::ErrorOr<void *> newStorageImpl(const char *name, size_t sz) override;

void deleteStorageImpl(void *storage) override;
void deleteStorageImpl(void *storage, size_t sz) override;
};

} // namespace vm
Expand Down
27 changes: 14 additions & 13 deletions include/hermes/VM/StorageProvider.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,20 +37,21 @@ class StorageProvider {

/// @}

/// Create a new segment memory space.
llvh::ErrorOr<void *> newStorage() {
return newStorage(nullptr);
/// Create a new segment memory space with given size \p sz.
llvh::ErrorOr<void *> newStorage(size_t sz) {
return newStorage(nullptr, sz);
}
/// Create a new segment memory space and give this memory the name \p name.
/// \return A pointer to a block of memory that has
/// AlignedHeapSegment::storageSize() bytes, and is aligned on
/// AlignedHeapSegment::storageSize().
llvh::ErrorOr<void *> newStorage(const char *name);
/// \return A pointer to a block of memory that has \p sz bytes, and is
/// aligned on AlignedHeapSegmentBase::kSegmentUnitSize. Note that \p sz must
/// be non-zero and equals to a multiple of
/// AlignedHeapSegmentBase::kSegmentUnitSize.
llvh::ErrorOr<void *> newStorage(const char *name, size_t sz);

/// Delete the given segment's memory space, and make it available for re-use.
/// \post Nothing in the range [storage, storage +
/// AlignedHeapSegment::storageSize()) is valid memory to be read or written.
void deleteStorage(void *storage);
/// Note that \p sz must be the same as used to allocating \p storage.
/// \post Nothing in the range [storage, storage + sz) is valid memory to be
/// read or written.
void deleteStorage(void *storage, size_t sz);

/// The number of storages this provider has allocated in its lifetime.
size_t numSucceededAllocs() const;
Expand All @@ -67,8 +68,8 @@ class StorageProvider {
size_t numLiveAllocs() const;

protected:
virtual llvh::ErrorOr<void *> newStorageImpl(const char *name) = 0;
virtual void deleteStorageImpl(void *storage) = 0;
virtual llvh::ErrorOr<void *> newStorageImpl(const char *name, size_t sz) = 0;
virtual void deleteStorageImpl(void *storage, size_t sz) = 0;

private:
size_t numSucceededAllocs_{0};
Expand Down
14 changes: 8 additions & 6 deletions lib/VM/LimitedStorageProvider.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,20 +13,22 @@
namespace hermes {
namespace vm {

llvh::ErrorOr<void *> LimitedStorageProvider::newStorageImpl(const char *name) {
llvh::ErrorOr<void *> LimitedStorageProvider::newStorageImpl(
const char *name,
size_t sz) {
if (limit_ < AlignedHeapSegment::storageSize()) {
return make_error_code(OOMError::TestVMLimitReached);
}
limit_ -= AlignedHeapSegment::storageSize();
return delegate_->newStorage(name);
limit_ -= sz;
return delegate_->newStorage(name, sz);
}

void LimitedStorageProvider::deleteStorageImpl(void *storage) {
void LimitedStorageProvider::deleteStorageImpl(void *storage, size_t sz) {
if (!storage) {
return;
}
delegate_->deleteStorage(storage);
limit_ += AlignedHeapSegment::storageSize();
delegate_->deleteStorage(storage, sz);
limit_ += sz;
}

} // namespace vm
Expand Down
146 changes: 97 additions & 49 deletions lib/VM/StorageProvider.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,13 @@

#include "hermes/VM/StorageProvider.h"

#include "hermes/ADT/BitArray.h"
#include "hermes/Support/CheckedMalloc.h"
#include "hermes/Support/Compiler.h"
#include "hermes/Support/OSCompat.h"
#include "hermes/VM/AlignedHeapSegment.h"

#include "llvh/ADT/BitVector.h"
#include "llvh/ADT/DenseMap.h"
#include "llvh/Support/ErrorHandling.h"
#include "llvh/Support/MathExtras.h"
Expand Down Expand Up @@ -55,14 +57,18 @@ namespace vm {

namespace {

/// Minimum segment storage size. Any larger segment size should be a multiple
/// of it.
static constexpr size_t kSegmentUnitSize =
AlignedHeapSegmentBase::kSegmentUnitSize;

bool isAligned(void *p) {
return (reinterpret_cast<uintptr_t>(p) &
(AlignedHeapSegment::storageSize() - 1)) == 0;
return (reinterpret_cast<uintptr_t>(p) & (kSegmentUnitSize - 1)) == 0;
}

char *alignAlloc(void *p) {
return reinterpret_cast<char *>(llvh::alignTo(
reinterpret_cast<uintptr_t>(p), AlignedHeapSegment::storageSize()));
return reinterpret_cast<char *>(
llvh::alignTo(reinterpret_cast<uintptr_t>(p), kSegmentUnitSize));
}

void *getMmapHint() {
Expand All @@ -78,67 +84,103 @@ void *getMmapHint() {

class VMAllocateStorageProvider final : public StorageProvider {
public:
llvh::ErrorOr<void *> newStorageImpl(const char *name) override;
void deleteStorageImpl(void *storage) override;
llvh::ErrorOr<void *> newStorageImpl(const char *name, size_t sz) override;
void deleteStorageImpl(void *storage, size_t sz) override;
};

class ContiguousVAStorageProvider final : public StorageProvider {
public:
ContiguousVAStorageProvider(size_t size)
: size_(llvh::alignTo<AlignedHeapSegment::storageSize()>(size)) {
auto result = oscompat::vm_reserve_aligned(
size_, AlignedHeapSegment::storageSize(), getMmapHint());
: size_(llvh::alignTo<kSegmentUnitSize>(size)),
statusBits_(size_ / kSegmentUnitSize) {
auto result =
oscompat::vm_reserve_aligned(size_, kSegmentUnitSize, getMmapHint());
if (!result)
hermes_fatal("Contiguous storage allocation failed.", result.getError());
level_ = start_ = static_cast<char *>(*result);
start_ = static_cast<char *>(*result);
oscompat::vm_name(start_, size_, kFreeRegionName);
}
~ContiguousVAStorageProvider() override {
oscompat::vm_release_aligned(start_, size_);
}

llvh::ErrorOr<void *> newStorageImpl(const char *name) override {
llvh::ErrorOr<void *> newStorageImpl(const char *name, size_t sz) override {
// No available space to use.
if (LLVM_UNLIKELY(firstFreeBit_ == -1)) {
return make_error_code(OOMError::MaxStorageReached);
}

assert(
statusBits_.find_first_unset() == firstFreeBit_ &&
"firstFreeBit_ should always be the first unset bit");

void *storage;
if (!freelist_.empty()) {
storage = freelist_.back();
freelist_.pop_back();
} else if (level_ < start_ + size_) {
storage =
std::exchange(level_, level_ + AlignedHeapSegment::storageSize());
} else {
int numUnits = sz / kSegmentUnitSize;
int nextUsedBit = statusBits_.find_next(firstFreeBit_);
int curFreeBit = firstFreeBit_;
// Search for a large enough continuous bit range.
while (nextUsedBit != -1 && (nextUsedBit - curFreeBit < numUnits)) {
curFreeBit = statusBits_.find_next_unset(nextUsedBit);
if (curFreeBit == -1) {
return make_error_code(OOMError::MaxStorageReached);
}
nextUsedBit = statusBits_.find_next(curFreeBit);
}
// nextUsedBit could be -1, so check if there is enough space left.
if (nextUsedBit == -1 && curFreeBit + numUnits > (int)statusBits_.size()) {
return make_error_code(OOMError::MaxStorageReached);
}
auto res = oscompat::vm_commit(storage, AlignedHeapSegment::storageSize());

storage = start_ + curFreeBit * kSegmentUnitSize;
statusBits_.set(curFreeBit, curFreeBit + numUnits);
// Reset it to the new leftmost free bit.
firstFreeBit_ = statusBits_.find_first_unset();

auto res = oscompat::vm_commit(storage, sz);
if (res) {
oscompat::vm_name(storage, AlignedHeapSegment::storageSize(), name);
oscompat::vm_name(storage, sz, name);
}
return res;
}

void deleteStorageImpl(void *storage) override {
void deleteStorageImpl(void *storage, size_t sz) override {
assert(
!llvh::alignmentAdjustment(
storage, AlignedHeapSegment::storageSize()) &&
!llvh::alignmentAdjustment(storage, kSegmentUnitSize) &&
"Storage not aligned");
assert(storage >= start_ && storage < level_ && "Storage not in region");
oscompat::vm_name(
storage, AlignedHeapSegment::storageSize(), kFreeRegionName);
oscompat::vm_uncommit(storage, AlignedHeapSegment::storageSize());
freelist_.push_back(storage);
assert(
storage >= start_ && storage < start_ + size_ &&
"Storage not in region");
oscompat::vm_name(storage, sz, kFreeRegionName);
oscompat::vm_uncommit(storage, sz);
size_t numUnits = sz / kSegmentUnitSize;
// Reset all bits for this storage.
int startIndex = (static_cast<char *>(storage) - start_) / kSegmentUnitSize;
statusBits_.reset(startIndex, startIndex + numUnits);
if (startIndex < firstFreeBit_)
firstFreeBit_ = startIndex;
}

private:
static constexpr const char *kFreeRegionName = "hermes-free-heap";
size_t size_;
char *start_;
char *level_;
llvh::SmallVector<void *, 0> freelist_;
/// First free bit in \c statusBits_. We always make new allocation from the
/// leftmost free bit, based on heuristics:
/// 1. Usually the reserved address space is not full.
/// 2. Storage with size kSegmentUnitSize is allocated and deleted more
/// frequently than larger storage.
/// 3. Likely small storage will find space available from leftmost free bit,
/// leaving enough space at the right side for large storage.
int firstFreeBit_{0};
/// One bit for each kSegmentUnitSize space in the entire reserved virtual
/// address space. A bit is set if the corresponding space is used.
llvh::BitVector statusBits_;
};

class MallocStorageProvider final : public StorageProvider {
public:
llvh::ErrorOr<void *> newStorageImpl(const char *name) override;
void deleteStorageImpl(void *storage) override;
llvh::ErrorOr<void *> newStorageImpl(const char *name, size_t sz) override;
void deleteStorageImpl(void *storage, size_t sz) override;

private:
/// Map aligned starts to actual starts for freeing.
Expand All @@ -148,46 +190,49 @@ class MallocStorageProvider final : public StorageProvider {
};

llvh::ErrorOr<void *> VMAllocateStorageProvider::newStorageImpl(
const char *name) {
assert(AlignedHeapSegment::storageSize() % oscompat::page_size() == 0);
const char *name,
size_t sz) {
assert(kSegmentUnitSize % oscompat::page_size() == 0);
// Allocate the space, hoping it will be the correct alignment.
auto result = oscompat::vm_allocate_aligned(
AlignedHeapSegment::storageSize(),
AlignedHeapSegment::storageSize(),
getMmapHint());
auto result =
oscompat::vm_allocate_aligned(sz, kSegmentUnitSize, getMmapHint());
if (!result) {
return result;
}
void *mem = *result;
assert(isAligned(mem));
(void)&isAligned;
#ifdef HERMESVM_ALLOW_HUGE_PAGES
oscompat::vm_hugepage(mem, AlignedHeapSegment::storageSize());
oscompat::vm_hugepage(mem, sz);
#endif

// Name the memory region on platforms that support naming.
oscompat::vm_name(mem, AlignedHeapSegment::storageSize(), name);
oscompat::vm_name(mem, sz, name);
return mem;
}

void VMAllocateStorageProvider::deleteStorageImpl(void *storage) {
void VMAllocateStorageProvider::deleteStorageImpl(void *storage, size_t sz) {
if (!storage) {
return;
}
oscompat::vm_free_aligned(storage, AlignedHeapSegment::storageSize());
oscompat::vm_free_aligned(storage, sz);
}

llvh::ErrorOr<void *> MallocStorageProvider::newStorageImpl(const char *name) {
llvh::ErrorOr<void *> MallocStorageProvider::newStorageImpl(
const char *name,
size_t sz) {
// name is unused, can't name malloc memory.
(void)name;
void *mem = checkedMalloc2(AlignedHeapSegment::storageSize(), 2u);
void *mem = checkedMalloc2(sz, 2u);
void *lowLim = alignAlloc(mem);
assert(isAligned(lowLim) && "New storage should be aligned");
lowLimToAllocHandle_[lowLim] = mem;
return lowLim;
}

void MallocStorageProvider::deleteStorageImpl(void *storage) {
void MallocStorageProvider::deleteStorageImpl(void *storage, size_t sz) {
// free() does not need the memory size.
(void)sz;
if (!storage) {
return;
}
Expand Down Expand Up @@ -217,8 +262,11 @@ std::unique_ptr<StorageProvider> StorageProvider::mallocProvider() {
return std::unique_ptr<StorageProvider>(new MallocStorageProvider);
}

llvh::ErrorOr<void *> StorageProvider::newStorage(const char *name) {
auto res = newStorageImpl(name);
llvh::ErrorOr<void *> StorageProvider::newStorage(const char *name, size_t sz) {
assert(
sz && (sz % kSegmentUnitSize == 0) &&
"Allocated storage size must be multiples of kSegmentUnitSize");
auto res = newStorageImpl(name, sz);

if (res) {
numSucceededAllocs_++;
Expand All @@ -229,13 +277,13 @@ llvh::ErrorOr<void *> StorageProvider::newStorage(const char *name) {
return res;
}

void StorageProvider::deleteStorage(void *storage) {
void StorageProvider::deleteStorage(void *storage, size_t sz) {
if (!storage) {
return;
}

numDeletedAllocs_++;
deleteStorageImpl(storage);
return deleteStorageImpl(storage, sz);
}

llvh::ErrorOr<std::pair<void *, size_t>>
Expand Down
4 changes: 2 additions & 2 deletions lib/VM/gcs/AlignedHeapSegment.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ llvh::ErrorOr<AlignedHeapSegment> AlignedHeapSegment::create(
llvh::ErrorOr<AlignedHeapSegment> AlignedHeapSegment::create(
StorageProvider *provider,
const char *name) {
auto result = provider->newStorage(name);
auto result = provider->newStorage(name, storageSize());
if (!result) {
return result.getError();
}
Expand Down Expand Up @@ -103,7 +103,7 @@ AlignedHeapSegment::~AlignedHeapSegment() {
__asan_unpoison_memory_region(start(), end() - start());

if (provider_) {
provider_->deleteStorage(lowLim_);
provider_->deleteStorage(lowLim_, storageSize());
}
}

Expand Down
Loading

0 comments on commit 51eb787

Please sign in to comment.