diff --git a/include/hermes/VM/AlignedHeapSegment.h b/include/hermes/VM/AlignedHeapSegment.h index 4a7d96b197e..135e9aba922 100644 --- a/include/hermes/VM/AlignedHeapSegment.h +++ b/include/hermes/VM/AlignedHeapSegment.h @@ -36,9 +36,9 @@ class StorageProvider; // TODO (T25527350): Debug Dump // TODO (T25527350): Heap Moving -/// An \c AlignedHeapSegment is a contiguous chunk of memory aligned to its own -/// storage size (which is a fixed power of two number of bytes). The storage -/// is further split up according to the diagram below: +/// An \c AlignedHeapSegment manages a contiguous chunk of memory aligned to +/// kSegmentUnitSize. The storage is further split up according to the diagram +/// below: /// /// +----------------------------------------+ /// | (1) Card Table | @@ -52,83 +52,31 @@ class StorageProvider; /// | (End) | /// +----------------------------------------+ /// -/// The tables in (1), and (2) cover the contiguous allocation space (3) -/// into which GCCells are bump allocated. +/// The tables in (1), and (2) cover the contiguous allocation space (3) into +/// which GCCells are bump allocated. They have fixed size computed from +/// kSegmentUnitSize. For segments whose size is some non-unit multiple of +/// kSegmentUnitSize, card table allocates its internal arrays separately +/// instead. Only one GCCell is allowed in each such segment, so the inline +/// Mark Bit Array is large enough. Any segment size smaller than +/// kSegmentUnitSize is not supported. The headers of all GCCells, in any +/// segment type, must reside in the first region of kSegmentUnitSize. This +/// invariant ensures that we can always get the card table from a valid GCCell +/// pointer. class AlignedHeapSegment { public: - /// @name Constants and utility functions for the aligned storage of \c - /// AlignedHeapSegment. - /// - /// @{ - /// The size and the alignment of the storage, in bytes. - static constexpr unsigned kLogSize = HERMESVM_LOG_HEAP_SEGMENT_SIZE; - static constexpr size_t kSize{1 << kLogSize}; - /// Mask for isolating the offset into a storage for a pointer. - static constexpr size_t kLowMask{kSize - 1}; - /// Mask for isolating the storage being pointed into by a pointer. - static constexpr size_t kHighMask{~kLowMask}; - - /// Returns the storage size, in bytes, of an \c AlignedHeapSegment. - static constexpr size_t storageSize() { - return kSize; - } - - /// Returns the pointer to the beginning of the storage containing \p ptr - /// (inclusive). Assuming such a storage exists. Note that - /// - /// storageStart(seg.hiLim()) != seg.lowLim() - /// - /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it - /// is the first address not in the bounds. - static void *storageStart(const void *ptr) { - return reinterpret_cast( - reinterpret_cast(ptr) & kHighMask); - } - - /// Returns the pointer to the end of the storage containing \p ptr - /// (exclusive). Assuming such a storage exists. Note that - /// - /// storageEnd(seg.hiLim()) != seg.hiLim() - /// - /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it - /// is the first address not in the bounds. - static void *storageEnd(const void *ptr) { - return reinterpret_cast(storageStart(ptr)) + kSize; - } - - /// Returns the offset in bytes to \p ptr from the start of its containing - /// storage. Assuming such a storage exists. Note that - /// - /// offset(seg.hiLim()) != seg.size() - /// - /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it - /// is the first address not in the bounds. - static size_t offset(const char *ptr) { - return reinterpret_cast(ptr) & kLowMask; - } - /// @} - - /// Construct a null AlignedHeapSegment (one that does not own memory). - AlignedHeapSegment() = default; - /// \c AlignedHeapSegment is movable and assignable, but not copyable. - AlignedHeapSegment(AlignedHeapSegment &&); - AlignedHeapSegment &operator=(AlignedHeapSegment &&); - AlignedHeapSegment(const AlignedHeapSegment &) = delete; - - ~AlignedHeapSegment(); - - /// Create a AlignedHeapSegment by allocating memory with \p provider. - static llvh::ErrorOr create(StorageProvider *provider); - static llvh::ErrorOr create( - StorageProvider *provider, - const char *name); + /// Heap segment size in log of 2. + static constexpr size_t kLogSize = HERMESVM_LOG_HEAP_SEGMENT_SIZE; + /// The unit segment size, in bytes. Any valid heap segment must have a size + /// equal to or be a multiple of it. + static constexpr size_t kSegmentUnitSize = (1 << kLogSize); /// Contents of the memory region managed by this segment. class Contents { public: /// The number of bits representing the total number of heap-aligned /// addresses in the segment storage. - static constexpr size_t kMarkBitArraySize = kSize >> LogHeapAlign; + static constexpr size_t kMarkBitArraySize = + kSegmentUnitSize >> LogHeapAlign; /// BitArray for marking allocation region of a segment. using MarkBitArray = BitArray; @@ -137,8 +85,13 @@ class AlignedHeapSegment { void protectGuardPage(oscompat::ProtectMode mode); private: + friend class FixedSizeHeapSegment; friend class AlignedHeapSegment; + /// Pass segment size to CardTable constructor to allocate its data + /// separately if \p sz > kSegmentUnitSize. + Contents(size_t segmentSize) : cardTable_(segmentSize) {} + /// Note that because of the Contents object, the first few bytes of the /// card table are unused, we instead use them to store a small /// SHSegmentInfo struct. @@ -179,10 +132,11 @@ class AlignedHeapSegment { "SHSegmentInfo does not fit in available unused CardTable space."); /// The offset from the beginning of a segment of the allocatable region. - static constexpr size_t offsetOfAllocRegion{offsetof(Contents, allocRegion_)}; + static constexpr size_t kOffsetOfAllocRegion{ + offsetof(Contents, allocRegion_)}; static_assert( - isSizeHeapAligned(offsetOfAllocRegion), + isSizeHeapAligned(kOffsetOfAllocRegion), "Allocation region must start at a heap aligned offset"); static_assert( @@ -215,6 +169,272 @@ class AlignedHeapSegment { GCCell *cell_{nullptr}; }; + /// Returns the address that is the lower bound of the segment. + /// \post The returned pointer is guaranteed to be aligned to + /// kSegmentUnitSize. + char *lowLim() const { + return lowLim_; + } + + /// Get the size of this segment. + size_t storageSize() const { +#ifdef HERMES_SLOW_DEBUG + auto *segmentInfo = reinterpret_cast(lowLim_); + auto sz = (size_t)segmentInfo->shiftedSegmentSize + << HERMESVM_LOG_HEAP_SEGMENT_SIZE; + assert( + sz == segmentSize_ && + "segmentSize_ in FixedSizeHeapSegment must always be equal to the size stored in SHSegmentInfo"); +#endif + return segmentSize_; + } + + /// The largest size the allocation region of an aligned heap segment could + /// be. + size_t maxSize() const { + return storageSize() - kOffsetOfAllocRegion; + } + + /// Returns the address that is the upper bound of the segment. + char *hiLim() const { + return lowLim() + storageSize(); + } + + /// Returns the address at which the first allocation in this segment would + /// occur. + /// Disable UB sanitization because 'this' may be null during the tests. + char *start() const LLVM_NO_SANITIZE("undefined") { + return contents()->allocRegion_; + } + + /// Returns the address at which the next allocation, if any, will occur. + char *level() const { + return level_; + } + + /// Return a reference to the card table covering the memory region managed by + /// this segment. + CardTable &cardTable() const { + return contents()->cardTable_; + } + + /// Given a \p cell lives in the memory region of some valid segment \c s, + /// returns a pointer to the CardTable covering the segment containing the + /// cell. Note that this takes a GCCell pointer in order to correctly get + /// the segment starting address for JumboHeapSegment. + /// + /// \pre There exists a currently alive heap in which \p cell is allocated. + static CardTable *cardTableCovering(const GCCell *cell) { + return &contents(alignedStorageStart(cell))->cardTable_; + } + + /// Find the head of the first cell that extends into the card at index + /// \p cardIdx. + /// \return A cell such that + /// cell <= indexToAddress(cardIdx) < cell->nextCell(). + GCCell *getFirstCellHead(size_t cardIdx) { + CardTable &cards = cardTable(); + GCCell *cell = cards.firstObjForCard(cardIdx); + assert(cell->isValid() && "Object head doesn't point to a valid object"); + return cell; + } + + /// Record the head of this cell so it can be found by the card scanner. + static void setCellHead(const GCCell *cellStart, const size_t sz) { + const char *start = reinterpret_cast(cellStart); + const char *end = start + sz; + CardTable *cards = cardTableCovering(cellStart); + auto boundary = cards->nextBoundary(start); + // If this object crosses a card boundary, then update boundaries + // appropriately. + if (boundary.address() < end) { + cards->updateBoundaries(&boundary, start, end); + } + } + + /// Return a reference to the mark bit array covering the memory region + /// managed by this segment. + Contents::MarkBitArray &markBitArray() const { + return contents()->markBitArray_; + } + + /// Mark the given \p cell. Assumes the given address is a valid heap object. + static void setCellMarkBit(const GCCell *cell) { + auto *markBits = markBitArrayCovering(cell); + size_t ind = addressToMarkBitArrayIndex(cell); + markBits->set(ind, true); + } + + /// Return whether the given \p cell is marked. Assumes the given address is + /// a valid heap object. + static bool getCellMarkBit(const GCCell *cell) { + auto *markBits = markBitArrayCovering(cell); + size_t ind = addressToMarkBitArrayIndex(cell); + return markBits->at(ind); + } + + /// Translate the given address to a 0-based index in the MarkBitArray of its + /// segment. The base address is the start of the storage of this segment. For + /// JumboSegment, this should always return a constant index + /// kOffsetOfAllocRegion >> LogHeapAlign. + static size_t addressToMarkBitArrayIndex(const GCCell *cell) { + auto *cp = reinterpret_cast(cell); + auto *base = reinterpret_cast(alignedStorageStart(cell)); + return (cp - base) >> LogHeapAlign; + } + + /// Return true if object \p a and \p b live in the same segment. This is used + /// to check if a pointer field in \p a may points to an object in the same + /// segment (so that we don't need to dirty the cards). This also works for + /// large segment, since there is only one cell in those segments (i.e., \p a + /// and \p b would be the same). + static bool containedInSame(const GCCell *a, const GCCell *b) { + return (reinterpret_cast(a) ^ reinterpret_cast(b)) < + kSegmentUnitSize; + } + +#ifndef NDEBUG + /// Get the storage end of segment that \p cell resides in. + static char *storageEnd(const GCCell *cell) { + auto *start = alignedStorageStart(cell); + auto *segmentInfo = reinterpret_cast(start); + return start + + (segmentInfo->shiftedSegmentSize << HERMESVM_LOG_HEAP_SEGMENT_SIZE); + } +#endif + + protected: + AlignedHeapSegment() = default; + + /// Construct Contents() at the address of \p lowLim. + AlignedHeapSegment(void *lowLim, size_t segmentSize) + : lowLim_(reinterpret_cast(lowLim)), segmentSize_(segmentSize) { + new (contents()) Contents(segmentSize); + contents()->protectGuardPage(oscompat::ProtectMode::None); + } + + /// Return a pointer to the contents of the memory region managed by this + /// segment. + Contents *contents() const { + return reinterpret_cast(lowLim_); + } + + /// Given the \p lowLim of some valid segment's memory region, returns a + /// pointer to the Contents laid out in the storage, assuming it exists. + static Contents *contents(void *lowLim) { + return reinterpret_cast(lowLim); + } + + /// The start of the aligned segment. + char *lowLim_{nullptr}; + + /// The current address in this segment to allocate new object. This must be + /// positioned after lowLim_ to be correctly initialized. + char *level_{start()}; + + /// The size of this segment. + size_t segmentSize_; + + private: + /// Return the starting address for aligned region of size kSegmentUnitSize + /// that \p cell resides in. If \c cell resides in a JumboSegment, it's the + /// only cell there, this essentially returns its segment starting address. + static char *alignedStorageStart(const GCCell *cell) { + return reinterpret_cast( + reinterpret_cast(cell) & ~(kSegmentUnitSize - 1)); + } + + /// Given a \p cell, returns a pointer to the MarkBitArray covering the + /// segment that \p cell resides in. + /// + /// \pre There exists a currently alive heap that claims to contain \c cell. + static Contents::MarkBitArray *markBitArrayCovering(const GCCell *cell) { + auto *segStart = alignedStorageStart(cell); + return &contents(segStart)->markBitArray_; + } +}; + +/// JumboHeapSegment has custom storage size that must be a multiple of +/// kSegmentUnitSize. Each such segment can only allocate a single object that +/// occupies the entire allocation space. Therefore, the inline MarkBitArray is +/// large enough, while CardTable needs to allocate its cards and boundaries +/// arrays separately. +class JumboHeapSegment : public AlignedHeapSegment {}; + +/// FixedSizeHeapSegment has fixed storage size kSegmentUnitSize. Its CardTable +/// and MarkBitArray are stored inline right before the allocation space. This +/// is used for all allocations in YoungGen and normal object allocations in +/// OldGen. +class FixedSizeHeapSegment : public AlignedHeapSegment { + public: + /// @name Constants and utility functions for the aligned storage of \c + /// FixedSizeHeapSegment. + /// + /// @{ + /// The size and the alignment of the storage, in bytes. + static constexpr size_t kSize = kSegmentUnitSize; + /// Mask for isolating the offset into a storage for a pointer. + static constexpr size_t kLowMask{kSize - 1}; + /// Mask for isolating the storage being pointed into by a pointer. + static constexpr size_t kHighMask{~kLowMask}; + + /// Returns the storage size, in bytes, of a \c FixedSizeHeapSegment. This is + /// a static override of AlignedHeapSegment::storageSize(), since here the + /// segment size is a constant. + static constexpr size_t storageSize() { + return kSize; + } + + /// Returns the pointer to the beginning of the storage containing \p ptr + /// (inclusive). Assuming such a storage exists. Note that + /// + /// storageStart(seg.hiLim()) != seg.lowLim() + /// + /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it + /// is the first address not in the bounds. + static void *storageStart(const void *ptr) { + return reinterpret_cast( + reinterpret_cast(ptr) & kHighMask); + } + + /// Returns the pointer to the end of the storage containing \p ptr + /// (exclusive). Assuming such a storage exists. Note that + /// + /// storageEnd(seg.hiLim()) != seg.hiLim() + /// + /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it + /// is the first address not in the bounds. + static void *storageEnd(const void *ptr) { + return reinterpret_cast(storageStart(ptr)) + kSize; + } + + /// Returns the offset in bytes to \p ptr from the start of its containing + /// storage. Assuming such a storage exists. Note that + /// + /// offset(seg.hiLim()) != seg.size() + /// + /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it + /// is the first address not in the bounds. + static size_t offset(const char *ptr) { + return reinterpret_cast(ptr) & kLowMask; + } + /// @} + + /// Construct a null FixedSizeHeapSegment (one that does not own memory). + FixedSizeHeapSegment() = default; + /// \c FixedSizeHeapSegment is movable and assignable, but not copyable. + FixedSizeHeapSegment(FixedSizeHeapSegment &&); + FixedSizeHeapSegment &operator=(FixedSizeHeapSegment &&); + FixedSizeHeapSegment(const FixedSizeHeapSegment &) = delete; + FixedSizeHeapSegment &operator=(const FixedSizeHeapSegment &) = delete; + + ~FixedSizeHeapSegment(); + + /// Create a FixedSizeHeapSegment by allocating memory with \p provider. + static llvh::ErrorOr create( + StorageProvider *provider, + const char *name = nullptr); + /// Returns the index of the segment containing \p lowLim, which is required /// to be the start of its containing segment. (This can allow extra /// efficiency, in cases where the segment start has already been computed.) @@ -238,51 +458,14 @@ class AlignedHeapSegment { /// space, returns {nullptr, false}. inline AllocResult alloc(uint32_t size); - /// Given the \p lowLim of some valid segment's memory region, returns a - /// pointer to the AlignedHeapSegment::Contents laid out in that storage, - /// assuming it exists. - inline static Contents *contents(void *lowLim); - inline static const Contents *contents(const void *lowLim); - /// Given a \p ptr into the memory region of some valid segment \c s, returns /// a pointer to the CardTable covering the segment containing the pointer. /// /// \pre There exists a currently alive heap that claims to contain \c ptr. inline static CardTable *cardTableCovering(const void *ptr); - /// Given a \p ptr into the memory region of some valid segment \c s, returns - /// a pointer to the MarkBitArray covering the segment containing the - /// pointer. - /// - /// \pre There exists a currently alive heap that claims to contain \c ptr. - inline static Contents::MarkBitArray *markBitArrayCovering(const void *ptr); - - /// Translate the given address to a 0-based index in the MarkBitArray of its - /// segment. The base address is the start of the storage of this segment. - static size_t addressToMarkBitArrayIndex(const void *ptr) { - auto *cp = reinterpret_cast(ptr); - auto *base = reinterpret_cast(storageStart(cp)); - return (cp - base) >> LogHeapAlign; - } - - /// Mark the given \p cell. Assumes the given address is a valid heap object. - inline static void setCellMarkBit(const GCCell *cell); - - /// Return whether the given \p cell is marked. Assumes the given address is - /// a valid heap object. - inline static bool getCellMarkBit(const GCCell *cell); - - /// Find the head of the first cell that extends into the card at index - /// \p cardIdx. - /// \return A cell such that - /// cell <= indexToAddress(cardIdx) < cell->nextCell(). - inline GCCell *getFirstCellHead(size_t cardIdx); - - /// Record the head of this cell so it can be found by the card scanner. - static inline void setCellHead(const GCCell *start, const size_t sz); - /// The largest size the allocation region of an aligned heap segment could - /// be. + /// be. This is a static override of AlignedHeapSegment::maxSize(). inline static constexpr size_t maxSize(); /// The size of the allocation region in this aligned heap segment. @@ -294,23 +477,6 @@ class AlignedHeapSegment { /// The number of bytes in the segment that are available for allocation. inline size_t available() const; - /// Returns the address that is the lower bound of the segment. - /// \post The returned pointer is guaranteed to be aligned to a segment - /// boundary. - char *lowLim() const { - return lowLim_; - } - - /// Returns the address that is the upper bound of the segment. - char *hiLim() const { - return lowLim() + storageSize(); - } - - /// Returns the address at which the first allocation in this segment would - /// occur. - /// Disable UB sanitization because 'this' may be null during the tests. - inline char *start() const LLVM_NO_SANITIZE("undefined"); - /// Returns the first address after the region in which allocations can occur, /// taking external memory credits into a account (they decrease the effective /// end). @@ -330,31 +496,19 @@ class AlignedHeapSegment { /// ignoring external memory credits. inline char *end() const; - /// Returns the address at which the next allocation, if any, will occur. - inline char *level() const; - /// Returns an iterator range corresponding to the cells in this segment. inline llvh::iterator_range cells(); /// Returns whether \p a and \p b are contained in the same - /// AlignedHeapSegment. + /// FixedSizeHeapSegment. inline static bool containedInSame(const void *a, const void *b); - /// Return a reference to the card table covering the memory region managed by - /// this segment. - /// Disable sanitization because 'this' may be null in the tests. - inline CardTable &cardTable() const LLVM_NO_SANITIZE("null"); - - /// Return a reference to the mark bit array covering the memory region - /// managed by this segment. - inline Contents::MarkBitArray &markBitArray() const; - explicit operator bool() const { return lowLim(); } /// \return \c true if and only if \p ptr is within the memory range owned by - /// this \c AlignedHeapSegment. + /// this \c FixedSizeHeapSegment. bool contains(const void *ptr) const { return storageStart(ptr) == lowLim(); } @@ -390,26 +544,15 @@ class AlignedHeapSegment { /// Set the contents of the segment to a dead value. void clear(); - /// Set the given range [start, end) to a dead value. - static void clear(char *start, char *end); /// Checks that dead values are present in the [start, end) range. static void checkUnwritten(char *start, char *end); #endif - protected: - /// Return a pointer to the contents of the memory region managed by this - /// segment. - inline Contents *contents() const; - - /// The start of the aligned segment. - char *lowLim_{nullptr}; - + private: /// The provider that created this segment. It will be used to properly /// destroy this. StorageProvider *provider_{nullptr}; - char *level_{start()}; - /// The upper limit of the space that we can currently allocated into; /// this may be decreased when externally allocated memory is credited to /// the generation owning this space. @@ -417,13 +560,12 @@ class AlignedHeapSegment { /// Used in move constructor and move assignment operator following the copy /// and swap idiom. - friend void swap(AlignedHeapSegment &a, AlignedHeapSegment &b); + friend void swap(FixedSizeHeapSegment &a, FixedSizeHeapSegment &b); - private: - AlignedHeapSegment(StorageProvider *provider, void *lowLim); + FixedSizeHeapSegment(StorageProvider *provider, void *lowLim); }; -AllocResult AlignedHeapSegment::alloc(uint32_t size) { +AllocResult FixedSizeHeapSegment::alloc(uint32_t size) { assert(lowLim() != nullptr && "Cannot allocate in a null segment"); assert(size >= sizeof(GCCell) && "cell must be larger than GCCell"); assert(isSizeHeapAligned(size) && "size must be heap aligned"); @@ -459,118 +601,48 @@ AllocResult AlignedHeapSegment::alloc(uint32_t size) { return {cell, true}; } -/*static*/ -AlignedHeapSegment::Contents::MarkBitArray * -AlignedHeapSegment::markBitArrayCovering(const void *ptr) { - return &contents(storageStart(ptr))->markBitArray_; -} - -/*static*/ -void AlignedHeapSegment::setCellMarkBit(const GCCell *cell) { - auto *markBits = markBitArrayCovering(cell); - size_t ind = addressToMarkBitArrayIndex(cell); - markBits->set(ind, true); -} - -/*static*/ -bool AlignedHeapSegment::getCellMarkBit(const GCCell *cell) { - auto *markBits = markBitArrayCovering(cell); - size_t ind = addressToMarkBitArrayIndex(cell); - return markBits->at(ind); -} - -GCCell *AlignedHeapSegment::getFirstCellHead(size_t cardIdx) { - CardTable &cards = cardTable(); - GCCell *cell = cards.firstObjForCard(cardIdx); - assert(cell->isValid() && "Object head doesn't point to a valid object"); - return cell; -} - -/* static */ -void AlignedHeapSegment::setCellHead(const GCCell *cellStart, const size_t sz) { - const char *start = reinterpret_cast(cellStart); - const char *end = start + sz; - CardTable *cards = cardTableCovering(start); - auto boundary = cards->nextBoundary(start); - // If this object crosses a card boundary, then update boundaries - // appropriately. - if (boundary.address() < end) { - cards->updateBoundaries(&boundary, start, end); - } -} - -/* static */ AlignedHeapSegment::Contents *AlignedHeapSegment::contents( - void *lowLim) { - return reinterpret_cast(lowLim); -} - -/* static */ const AlignedHeapSegment::Contents *AlignedHeapSegment::contents( - const void *lowLim) { - return reinterpret_cast(lowLim); +/* static */ CardTable *FixedSizeHeapSegment::cardTableCovering( + const void *ptr) { + return &FixedSizeHeapSegment::contents(storageStart(ptr))->cardTable_; } -/* static */ CardTable *AlignedHeapSegment::cardTableCovering(const void *ptr) { - return &AlignedHeapSegment::contents(storageStart(ptr))->cardTable_; +/* static */ constexpr size_t FixedSizeHeapSegment::maxSize() { + return storageSize() - kOffsetOfAllocRegion; } -/* static */ constexpr size_t AlignedHeapSegment::maxSize() { - return storageSize() - offsetof(Contents, allocRegion_); -} - -size_t AlignedHeapSegment::size() const { +size_t FixedSizeHeapSegment::size() const { return end() - start(); } -size_t AlignedHeapSegment::used() const { +size_t FixedSizeHeapSegment::used() const { return level() - start(); } -size_t AlignedHeapSegment::available() const { +size_t FixedSizeHeapSegment::available() const { return effectiveEnd() - level(); } -char *AlignedHeapSegment::start() const { - return contents()->allocRegion_; -} - -char *AlignedHeapSegment::effectiveEnd() const { +char *FixedSizeHeapSegment::effectiveEnd() const { return effectiveEnd_; } -char *AlignedHeapSegment::end() const { +char *FixedSizeHeapSegment::end() const { return start() + maxSize(); } -char *AlignedHeapSegment::level() const { - return level_; -} - -llvh::iterator_range -AlignedHeapSegment::cells() { +llvh::iterator_range +FixedSizeHeapSegment::cells() { return { HeapCellIterator(reinterpret_cast(start())), HeapCellIterator(reinterpret_cast(level()))}; } /* static */ -bool AlignedHeapSegment::containedInSame(const void *a, const void *b) { +bool FixedSizeHeapSegment::containedInSame(const void *a, const void *b) { return (reinterpret_cast(a) ^ reinterpret_cast(b)) < storageSize(); } -CardTable &AlignedHeapSegment::cardTable() const { - return contents()->cardTable_; -} - -AlignedHeapSegment::Contents::MarkBitArray &AlignedHeapSegment::markBitArray() - const { - return contents()->markBitArray_; -} - -AlignedHeapSegment::Contents *AlignedHeapSegment::contents() const { - return contents(lowLim()); -} - } // namespace vm } // namespace hermes diff --git a/include/hermes/VM/ArrayStorage.h b/include/hermes/VM/ArrayStorage.h index 15d90f83e3b..6247d0e2c84 100644 --- a/include/hermes/VM/ArrayStorage.h +++ b/include/hermes/VM/ArrayStorage.h @@ -237,7 +237,7 @@ class ArrayStorageBase final auto *fromStart = other->data(); auto *fromEnd = fromStart + otherSz; GCHVType::uninitialized_copy( - fromStart, fromEnd, data() + sz, runtime.getHeap()); + fromStart, fromEnd, data() + sz, runtime.getHeap(), this); size_.store(sz + otherSz, std::memory_order_release); } diff --git a/include/hermes/VM/CardTableNC.h b/include/hermes/VM/CardTableNC.h index 5bfa40f2102..452830f09e0 100644 --- a/include/hermes/VM/CardTableNC.h +++ b/include/hermes/VM/CardTableNC.h @@ -22,10 +22,14 @@ namespace hermes { namespace vm { /// The card table optimizes young gen collections by restricting the amount of -/// heap belonging to the old gen that must be scanned. The card table expects -/// to be constructed inside an AlignedHeapSegment's storage, at some position -/// before the allocation region, and covers the extent of that storage's -/// memory. +/// heap belonging to the old gen that must be scanned. The card table expects +/// to be constructed at the beginning of a segment's storage, and covers the +/// extent of that storage's memory. There are two cases: +/// 1. For FixedSizeHeapSegment, the inline CardStatus array and Boundary array +/// in the card table is large enough. +/// 2. For JumboHeapSegment, the two arrays are allocated separately. +/// In either case, the pointers to the CardStatus array and Boundary array are +/// stored in \c cards and \c boundaries field of SHSegmentInfo. /// /// Also supports the following query: Given a card in the heap that intersects /// with the used portion of its segment, find its "crossing object" -- the @@ -58,16 +62,19 @@ class CardTable { const char *address_{nullptr}; }; + enum class CardStatus : char { Clean = 0, Dirty = 1 }; + /// The size (and base-two log of the size) of cards used in the card table. static constexpr size_t kLogCardSize = 9; // ==> 512-byte cards. static constexpr size_t kCardSize = 1 << kLogCardSize; // ==> 512-byte cards. - static constexpr size_t kSegmentSize = 1 << HERMESVM_LOG_HEAP_SEGMENT_SIZE; - - /// The number of valid indices into the card table. - static constexpr size_t kValidIndices = kSegmentSize >> kLogCardSize; + /// Maximum size of segment that can have inline cards and boundaries array. + static constexpr size_t kSegmentUnitSize = 1 + << HERMESVM_LOG_HEAP_SEGMENT_SIZE; - /// The size of the card table. - static constexpr size_t kCardTableSize = kValidIndices; + /// The size of the maximum inline card table. CardStatus array and boundary + /// array for larger segment has larger size and is stored separately. + static constexpr size_t kInlineCardTableSize = + kSegmentUnitSize >> kLogCardSize; /// For convenience, this is a conversion factor to determine how many bytes /// in the heap correspond to a single byte in the card table. This is @@ -77,29 +84,58 @@ class CardTable { /// guaranteed by a static_assert below. static constexpr size_t kHeapBytesPerCardByte = kCardSize; - /// A prefix of every segment is occupied by auxilary data - /// structures. The card table is the first such data structure. - /// The card table maps to the segment. Only the suffix of the card - /// table that maps to the suffix of entire segment that is used for - /// allocation is ever used; the prefix that maps to the card table - /// itself is not used. (Nor is the portion that of the card table - /// that maps to the other auxiliary data structure, the mark bit - /// array, but we don't attempt to calculate that here.) - /// It is useful to know the size of this unused region of - /// the card table, so it can be used for other purposes. - /// Note that the total size of the card table is 2 times - /// kCardTableSize, since the CardTable contains two byte arrays of - /// that size (cards_ and _boundaries_). - static constexpr size_t kFirstUsedIndex = - (2 * kCardTableSize) >> kLogCardSize; - - CardTable() = default; + /// A prefix of every segment is occupied by auxiliary data structures. The + /// card table is the first such data structure. The card table maps to the + /// segment. Only the suffix of the card table that maps to the suffix of + /// entire segment that is used for allocation is ever used; the prefix that + /// maps to the card table itself is not used, nor is the portion of the card + /// table that maps to the other auxiliary data structure: the mark bit array + /// and guard pages. This small space can be used for other purpose, such as + /// storing the SHSegmentInfo (we assert in AlignedHeapSegment that its + /// size won't exceed this unused space). The actual first used index should + /// take into account all these structures. Here we only calculate for + /// CardTable and size of SHSegmentInfo. It's only used as starting index for + /// clearing/dirtying range of bits. + /// Note that the total size of the card table is 2 times kCardTableSize, + /// since the CardTable contains two byte arrays of that size (cards_ and + /// boundaries_). And this index must be larger than the size of SHSegmentInfo + /// to avoid corrupting it when clearing/dirtying bits. + static constexpr size_t kFirstUsedIndex = std::max( + sizeof(SHSegmentInfo), + (2 * kInlineCardTableSize) >> kLogCardSize); + + CardTable(size_t segmentSize) { + assert( + segmentSize && segmentSize % kSegmentUnitSize == 0 && + "segmentSize must be a multiple of kSegmentUnitSize"); + + segmentInfo_.shiftedSegmentSize = + segmentSize >> HERMESVM_LOG_HEAP_SEGMENT_SIZE; + if (segmentSize == kSegmentUnitSize) { + // Just use the inline storage. + setCards(inlineCardStatusArray_); + setBoundaries(inlineBoundaryArray_); + } else { + size_t cardTableSize = segmentSize >> kLogCardSize; + // CardStatus is clean by default, so must zero-initialize it. + setCards(new AtomicIfConcurrentGC[cardTableSize] {}); + setBoundaries(new int8_t[cardTableSize]); + } + } /// CardTable is not copyable or movable: It must be constructed in-place. CardTable(const CardTable &) = delete; CardTable(CardTable &&) = delete; CardTable &operator=(const CardTable &) = delete; CardTable &operator=(CardTable &&) = delete; + ~CardTable() { + // If CardStatus/Boundary array is allocated separately, free them. + if (cards() != inlineCardStatusArray_) { + delete[] cards(); + delete[] boundaries(); + } + } + /// Returns the card table index corresponding to a byte at the given address. /// \pre \p addr must be within the bounds of the segment owning this card /// table or at most 1 card after it, that is to say @@ -112,18 +148,25 @@ class CardTable { /// of how this is used. inline size_t addressToIndex(const void *addr) const LLVM_NO_SANITIZE("null"); - /// Returns the address corresponding to the given card table - /// index. + /// Returns the address corresponding to the given card table index. /// /// \pre \p index is bounded: /// - /// 0 <= index <= kValidIndices + /// 0 <= index <= getEndIndex() inline const char *indexToAddress(size_t index) const; /// Make the card table entry for the given address dirty. /// \pre \p addr is required to be an address covered by the card table. + /// This only works for memory in normal objects (i.e., do not support large + /// allocation) and is more efficient than the version for large objects. inline void dirtyCardForAddress(const void *addr); + /// Make the card table entry for the given address dirty. + /// \pre \p addr is required to be an address covered by the card table. + /// This reads the cards array pointer from SHSegmentInfo, so works for normal + /// and large objects. + inline void dirtyCardForAddressInLargeObj(const void *addr); + /// Make the card table entries for cards that intersect the given address /// range dirty. The range is a closed interval [low, high]. /// \pre \p low and \p high are required to be addresses covered by the card @@ -132,18 +175,27 @@ class CardTable { /// Returns whether the card table entry for the given address is dirty. /// \pre \p addr is required to be an address covered by the card table. + /// This only works for memory in normal objects (i.e., do not support large + /// allocation) and is more efficient than the version for large objects. inline bool isCardForAddressDirty(const void *addr) const; /// Returns whether the card table entry for the given index is dirty. /// \pre \p index is required to be a valid card table index. + /// This only works for memory in normal objects (i.e., do not support large + /// allocation) and is more efficient than the version for large objects. inline bool isCardForIndexDirty(const size_t index) const; + /// Version of isCardForAddressDirty()/isCardForIndexDirty that works for + /// normal and large objects. + inline bool isCardForAddressDirtyInLargeObj(const void *addr) const; + inline bool isCardForIndexDirtyInLargeObj(const size_t index) const; + /// If there is a dirty card at or after \p fromIndex, at an index less than /// \p endIndex, returns the index of the dirty card, else returns none. inline OptValue findNextDirtyCard(size_t fromIndex, size_t endIndex) const; - /// If there is a card card at or after \p fromIndex, at an index less than + /// If there is a card at or after \p fromIndex, at an index less than /// \p endIndex, returns the index of the clean card, else returns none. inline OptValue findNextCleanCard(size_t fromIndex, size_t endIndex) const; @@ -184,12 +236,24 @@ class CardTable { /// is the first object.) GCCell *firstObjForCard(unsigned index) const; + /// Get the segment size from SHSegmentInfo. This is only used in debug code + /// or when clearing the entire card table. + size_t getSegmentSize() const { + return (size_t)segmentInfo_.shiftedSegmentSize + << HERMESVM_LOG_HEAP_SEGMENT_SIZE; + } + + /// The end index of the card table (all valid indices should be smaller). + size_t getEndIndex() const { + return getSegmentSize() >> kLogCardSize; + } + #ifdef HERMES_EXTRA_DEBUG /// Temporary debugging hack: yield the numeric value of the boundaries_ array /// for the given \p index. /// TODO(T48709128): remove this when the problem is diagnosed. int8_t cardObjectTableValue(unsigned index) const { - return boundaries_[index]; + return boundaries()[index]; } /// These methods protect and unprotect, respectively, the memory @@ -211,16 +275,35 @@ class CardTable { /// /// \pre start is card-aligned. void verifyBoundaries(char *start, char *level) const; + + /// Find the object that owns the memory at \p loc. + GCCell *findObjectContaining(const void *loc) const; #endif // HERMES_SLOW_DEBUG private: #ifndef NDEBUG - /// Returns the pointer to the end of the storage containing \p ptr - /// (exclusive). - static void *storageEnd(const void *ptr); + /// Returns the pointer to the end of the storage starting at \p lowLim. + void *storageEnd(const void *lowLim) const { + return reinterpret_cast( + reinterpret_cast(lowLim) + getSegmentSize()); + } #endif - enum class CardStatus : char { Clean = 0, Dirty = 1 }; + void setCards(AtomicIfConcurrentGC *cards) { + segmentInfo_.cards = cards; + } + + AtomicIfConcurrentGC *cards() const { + return static_cast *>(segmentInfo_.cards); + } + + void setBoundaries(int8_t *boundaries) { + segmentInfo_.boundaries = boundaries; + } + + int8_t *boundaries() const { + return segmentInfo_.boundaries; + } /// \return The lowest address whose card can be dirtied in this array. i.e. /// The smallest address such that @@ -255,14 +338,27 @@ class CardTable { void cleanOrDirtyRange(size_t from, size_t to, CardStatus cleanOrDirty); - /// This needs to be atomic so that the background thread in Hades can safely - /// dirty cards when compacting. - std::array, kCardTableSize> cards_{}; + union { + /// The bytes occupied by segmentInfo_ are guaranteed not to be overridden + /// by writes to cards_ array. See static assertions in AlignedHeapSegment. + /// Pointers to the underlying CardStatus array and boundary array are + /// stored in it. Note that we could also store the boundary array in a + /// union along with inlineBoundaryArray_, since that array has unused + /// prefix bytes as well. It will save 8 bytes here. But it makes the size + /// check more complex as we need to ensure that the segment size is large + /// enough so that inlineBoundaryArray_ has enough unused prefix bytes to + /// store the pointer. + SHSegmentInfo segmentInfo_; + /// This needs to be atomic so that the background thread in Hades can + /// safely dirty cards when compacting. + AtomicIfConcurrentGC + inlineCardStatusArray_[kInlineCardTableSize]{}; + }; /// See the comment at kHeapBytesPerCardByte above to see why this is /// necessary. static_assert( - sizeof(cards_[0]) == 1, + sizeof(inlineCardStatusArray_[0]) == 1, "Validate assumption that card table entries are one byte"); /// Each card has a corresponding signed byte in the boundaries_ table. A @@ -275,7 +371,7 @@ class CardTable { /// time: If we allocate a large object that crosses many cards, the first /// crossed cards gets a non-negative value, and each subsequent one uses the /// maximum exponent that stays within the card range for the object. - int8_t boundaries_[kCardTableSize]; + int8_t inlineBoundaryArray_[kInlineCardTableSize]; }; /// Implementations of inlines. @@ -305,7 +401,7 @@ inline size_t CardTable::addressToIndex(const void *addr) const { } inline const char *CardTable::indexToAddress(size_t index) const { - assert(index <= kValidIndices && "index must be within the index range"); + assert(index <= getEndIndex() && "index must be within the index range"); const char *res = base() + (index << kLogCardSize); assert( base() <= res && res <= storageEnd(base()) && @@ -314,7 +410,16 @@ inline const char *CardTable::indexToAddress(size_t index) const { } inline void CardTable::dirtyCardForAddress(const void *addr) { - cards_[addressToIndex(addr)].store( + // Make sure that this is not called on a large segment. + assert( + inlineCardStatusArray_ == cards() && + "CardStatus array for this CardTable is allocated separately"); + inlineCardStatusArray_[addressToIndex(addr)].store( + CardStatus::Dirty, std::memory_order_relaxed); +} + +inline void CardTable::dirtyCardForAddressInLargeObj(const void *addr) { + cards()[addressToIndex(addr)].store( CardStatus::Dirty, std::memory_order_relaxed); } @@ -323,8 +428,22 @@ inline bool CardTable::isCardForAddressDirty(const void *addr) const { } inline bool CardTable::isCardForIndexDirty(size_t index) const { - assert(index < kValidIndices && "index is required to be in range."); - return cards_[index].load(std::memory_order_relaxed) == CardStatus::Dirty; + assert(index < getEndIndex() && "index is required to be in range."); + // Make sure that this is not called on a large segment. + assert( + inlineCardStatusArray_ == cards() && + "CardStatus array for this CardTable is allocated separately"); + return inlineCardStatusArray_[index].load(std::memory_order_relaxed) == + CardStatus::Dirty; +} + +inline bool CardTable::isCardForAddressDirtyInLargeObj(const void *addr) const { + return isCardForIndexDirtyInLargeObj(addressToIndex(addr)); +} + +inline bool CardTable::isCardForIndexDirtyInLargeObj(size_t index) const { + assert(index < getEndIndex() && "index is required to be in range."); + return cards()[index].load(std::memory_order_relaxed) == CardStatus::Dirty; } inline OptValue CardTable::findNextDirtyCard( @@ -348,9 +467,9 @@ inline CardTable::Boundary CardTable::nextBoundary(const char *level) const { } inline const char *CardTable::base() const { - // As we know the card table is laid out inline before the allocation region - // of its aligned heap segment, we can use its own this pointer as the base - // address. + // As we know the card table is laid out inline at the beginning of the + // segment storage, which is before the allocation region, we can use its own + // this pointer as the base address. return reinterpret_cast(this); } diff --git a/include/hermes/VM/GCBase.h b/include/hermes/VM/GCBase.h index c1114809745..1e6de491cc5 100644 --- a/include/hermes/VM/GCBase.h +++ b/include/hermes/VM/GCBase.h @@ -204,6 +204,12 @@ enum XorPtrKeyID { /// const GCSmallHermesValue *start, /// uint32_t numHVs); /// +/// The above barriers may have a variant with "ForLargeObj" suffix, which is +/// used when the heap location may be from a GCCell that supports large +/// allocation. This variant is less efficient since it has to load the cards +/// array through pointer in SHSegmentInfo, instead of the inline array field +/// in CardTable structure. +/// /// In debug builds: is a write barrier necessary for a write of the given /// GC pointer \p value to the given \p loc? /// bool needsWriteBarrier(void *loc, void *value); @@ -226,7 +232,7 @@ enum XorPtrKeyID { /// Return the maximum amount of bytes holdable by this heap. /// gcheapsize_t max() const; /// Return the total amount of bytes of storage this GC will require. -/// This will be a multiple of AlignedHeapSegment::storageSize(). +/// This will be a multiple of FixedSizeHeapSegment::storageSize(). /// gcheapsize_t storageFootprint() const; /// class GCBase { @@ -1154,18 +1160,44 @@ class GCBase { /// Default implementations for read and write barriers: do nothing. void writeBarrier(const GCHermesValue *loc, HermesValue value); void writeBarrier(const GCSmallHermesValue *loc, SmallHermesValue value); + void writeBarrierForLargeObj( + const GCCell *owningObj, + const GCHermesValue *loc, + HermesValue value); + void writeBarrierForLargeObj( + const GCCell *owningObj, + const GCSmallHermesValue *loc, + SmallHermesValue value); void writeBarrier(const GCPointerBase *loc, const GCCell *value); + void writeBarrierForLargeObj( + const GCCell *owningObj, + const GCPointerBase *loc, + const GCCell *value); void constructorWriteBarrier(const GCHermesValue *loc, HermesValue value); void constructorWriteBarrier( const GCSmallHermesValue *loc, SmallHermesValue value); + void constructorWriteBarrierForLargeObj( + const GCCell *owningObj, + const GCHermesValue *loc, + HermesValue value); + void constructorWriteBarrierForLargeObj( + const GCCell *owningObj, + const GCSmallHermesValue *loc, + SmallHermesValue value); void constructorWriteBarrier(const GCPointerBase *loc, const GCCell *value); + void constructorWriteBarrierForLargeObj( + const GCCell *owningObj, + const GCPointerBase *loc, + const GCCell *value); void writeBarrierRange(const GCHermesValue *start, uint32_t numHVs); void writeBarrierRange(const GCSmallHermesValue *start, uint32_t numHVs); void constructorWriteBarrierRange( + const GCCell *owningObj, const GCHermesValue *start, uint32_t numHVs); void constructorWriteBarrierRange( + const GCCell *owningObj, const GCSmallHermesValue *start, uint32_t numHVs); void snapshotWriteBarrier(const GCHermesValue *loc); diff --git a/include/hermes/VM/GCPointer-inline.h b/include/hermes/VM/GCPointer-inline.h index fa5f7633ed5..4db995331fe 100644 --- a/include/hermes/VM/GCPointer-inline.h +++ b/include/hermes/VM/GCPointer-inline.h @@ -59,6 +59,44 @@ GCPointerBase::set(PointerBase &base, CompressedPointer ptr, GC &gc) { setNoBarrier(ptr); } +inline void GCPointerBase::set( + PointerBase &base, + GCCell *ptr, + GC &gc, + const GCCell *owningObj) { + assert( + (!ptr || gc.validPointer(ptr)) && + "Cannot set a GCPointer to an invalid pointer"); + // Write barrier must happen before the write. + gc.writeBarrierForLargeObj(owningObj, this, ptr); + setNoBarrier(CompressedPointer::encode(ptr, base)); +} + +inline void GCPointerBase::setNonNull( + PointerBase &base, + GCCell *ptr, + GC &gc, + const GCCell *owningObj) { + assert( + gc.validPointer(ptr) && "Cannot set a GCPointer to an invalid pointer"); + // Write barrier must happen before the write. + gc.writeBarrierForLargeObj(owningObj, this, ptr); + setNoBarrier(CompressedPointer::encodeNonNull(ptr, base)); +} + +inline void GCPointerBase::set( + PointerBase &base, + CompressedPointer ptr, + GC &gc, + const GCCell *owningObj) { + assert( + (!ptr || gc.validPointer(ptr.get(base))) && + "Cannot set a GCPointer to an invalid pointer"); + // Write barrier must happen before the write. + gc.writeBarrierForLargeObj(owningObj, this, ptr.get(base)); + setNoBarrier(ptr); +} + inline void GCPointerBase::setNull(GC &gc) { gc.snapshotWriteBarrier(this); setNoBarrier(CompressedPointer(nullptr)); diff --git a/include/hermes/VM/GCPointer.h b/include/hermes/VM/GCPointer.h index 09db5f06d87..cff5dc5b86e 100644 --- a/include/hermes/VM/GCPointer.h +++ b/include/hermes/VM/GCPointer.h @@ -34,7 +34,8 @@ class GCPointerBase : public CompressedPointer { class NoBarriers : public std::false_type {}; class YesBarriers : public std::true_type {}; - /// This must be used to assign a new value to this GCPointer. + /// This must be used to assign a new value to this GCPointer. This must not + /// be used if it lives in an object that supports large allocation. /// \param ptr The memory being pointed to. /// \param base The base of ptr. /// \param gc Used for write barriers. @@ -42,6 +43,22 @@ class GCPointerBase : public CompressedPointer { inline void set(PointerBase &base, CompressedPointer ptr, GC &gc); inline void setNonNull(PointerBase &base, GCCell *ptr, GC &gc); + /// This must be used to assign a new value to this GCPointer. + /// \param ptr The memory being pointed to. + /// \param base The base of ptr. + /// \param gc Used for write barriers. + /// \param owningObj The object that contains this GCPointer, used by the + /// writer barriers. + inline void + set(PointerBase &base, GCCell *ptr, GC &gc, const GCCell *owningObj); + inline void set( + PointerBase &base, + CompressedPointer ptr, + GC &gc, + const GCCell *owningObj); + inline void + setNonNull(PointerBase &base, GCCell *ptr, GC &gc, const GCCell *owningObj); + /// Set this pointer to null. This needs a write barrier in some types of /// garbage collectors. inline void setNull(GC &gc); @@ -86,7 +103,8 @@ class GCPointer : public GCPointerBase { return vmcast(GCPointerBase::getNonNull(base)); } - /// Assign a new value to this GCPointer. + /// Assign a new value to this GCPointer. This must not be used if it lives in + /// an object that supports large allocation. /// \param base The base of ptr. /// \param ptr The memory being pointed to. /// \param gc Used for write barriers. @@ -97,10 +115,33 @@ class GCPointer : public GCPointerBase { GCPointerBase::setNonNull(base, ptr, gc); } - /// Convenience overload of GCPointer::set for other GCPointers. + /// Assign a new value to this GCPointer. + /// \param ptr The memory being pointed to. + /// \param gc Used for write barriers. + /// \param owningObj The object that contains this GCPointer, used by the + /// writer barriers. + void set(PointerBase &base, T *ptr, GC &gc, const GCCell *owningObj) { + GCPointerBase::set(base, ptr, gc, owningObj); + } + void setNonNull(PointerBase &base, T *ptr, GC &gc, const GCCell *owningObj) { + GCPointerBase::setNonNull(base, ptr, gc, owningObj); + } + + /// Convenience overload of GCPointer::set for other GCPointers. This must not + /// be used if it lives in an object that supports large allocation. void set(PointerBase &base, const GCPointer &ptr, GC &gc) { GCPointerBase::set(base, ptr, gc); } + + /// Convenience overload of GCPointer::set for other GCPointers. \p owningObj + /// is used by the writer barriers. + void set( + PointerBase &base, + const GCPointer &ptr, + GC &gc, + const GCCell *owningObj) { + GCPointerBase::set(base, ptr, gc, owningObj); + } }; } // namespace vm diff --git a/include/hermes/VM/HadesGC.h b/include/hermes/VM/HadesGC.h index 1ff0d7219c8..8c2ce97d0f2 100644 --- a/include/hermes/VM/HadesGC.h +++ b/include/hermes/VM/HadesGC.h @@ -76,7 +76,7 @@ class HadesGC final : public GCBase { static constexpr uint32_t maxAllocationSizeImpl() { // The largest allocation allowable in Hades is the max size a single // segment supports. - return AlignedHeapSegment::maxSize(); + return FixedSizeHeapSegment::maxSize(); } static constexpr uint32_t minAllocationSizeImpl() { @@ -161,6 +161,21 @@ class HadesGC final : public GCBase { writeBarrierSlow(loc, value); } void writeBarrierSlow(const GCHermesValue *loc, HermesValue value); + void writeBarrierForLargeObj( + const GCCell *owningObj, + const GCHermesValue *loc, + HermesValue value) { + assert( + !calledByBackgroundThread() && + "Write barrier invoked by background thread."); + // A pointer that lives in YG never needs any write barriers. + if (LLVM_UNLIKELY(!inYoungGen(loc))) + writeBarrierSlowForLargeObj(owningObj, loc, value); + } + void writeBarrierSlowForLargeObj( + const GCCell *owningObj, + const GCHermesValue *loc, + HermesValue value); void writeBarrier(const GCSmallHermesValue *loc, SmallHermesValue value) { assert( @@ -171,6 +186,21 @@ class HadesGC final : public GCBase { writeBarrierSlow(loc, value); } void writeBarrierSlow(const GCSmallHermesValue *loc, SmallHermesValue value); + void writeBarrierForLargeObj( + const GCCell *owningObj, + const GCSmallHermesValue *loc, + SmallHermesValue value) { + assert( + !calledByBackgroundThread() && + "Write barrier invoked by background thread."); + // A pointer that lives in YG never needs any write barriers. + if (LLVM_UNLIKELY(!inYoungGen(loc))) + writeBarrierSlowForLargeObj(owningObj, loc, value); + } + void writeBarrierSlowForLargeObj( + const GCCell *owningObj, + const GCSmallHermesValue *loc, + SmallHermesValue value); /// The given pointer value is being written at the given loc (required to /// be in the heap). The value may be null. Execute a write barrier. @@ -185,6 +215,21 @@ class HadesGC final : public GCBase { writeBarrierSlow(loc, value); } void writeBarrierSlow(const GCPointerBase *loc, const GCCell *value); + void writeBarrierForLargeObj( + const GCCell *owningObj, + const GCPointerBase *loc, + const GCCell *value) { + assert( + !calledByBackgroundThread() && + "Write barrier invoked by background thread."); + // A pointer that lives in YG never needs any write barriers. + if (LLVM_UNLIKELY(!inYoungGen(loc))) + writeBarrierSlowForLargeObj(owningObj, loc, value); + } + void writeBarrierSlowForLargeObj( + const GCCell *owningObj, + const GCPointerBase *loc, + const GCCell *value); /// Special versions of \p writeBarrier for when there was no previous value /// initialized into the space. @@ -194,6 +239,18 @@ class HadesGC final : public GCBase { constructorWriteBarrierSlow(loc, value); } void constructorWriteBarrierSlow(const GCHermesValue *loc, HermesValue value); + void constructorWriteBarrierForLargeObj( + const GCCell *owningObj, + const GCHermesValue *loc, + HermesValue value) { + // A pointer that lives in YG never needs any write barriers. + if (LLVM_UNLIKELY(!inYoungGen(loc))) + constructorWriteBarrierSlowForLargeObj(owningObj, loc, value); + } + void constructorWriteBarrierSlowForLargeObj( + const GCCell *owningObj, + const GCHermesValue *loc, + HermesValue value); void constructorWriteBarrier( const GCSmallHermesValue *loc, @@ -205,32 +262,56 @@ class HadesGC final : public GCBase { void constructorWriteBarrierSlow( const GCSmallHermesValue *loc, SmallHermesValue value); + void constructorWriteBarrierForLargeObj( + const GCCell *owningObj, + const GCSmallHermesValue *loc, + SmallHermesValue value) { + // A pointer that lives in YG never needs any write barriers. + if (LLVM_UNLIKELY(!inYoungGen(loc))) + constructorWriteBarrierSlowForLargeObj(owningObj, loc, value); + } + void constructorWriteBarrierSlowForLargeObj( + const GCCell *owningObj, + const GCSmallHermesValue *loc, + SmallHermesValue value); void constructorWriteBarrier(const GCPointerBase *loc, const GCCell *value) { // A pointer that lives in YG never needs any write barriers. if (LLVM_UNLIKELY(!inYoungGen(loc))) relocationWriteBarrier(loc, value); } + void constructorWriteBarrierForLargeObj( + const GCCell *owningObj, + const GCPointerBase *loc, + const GCCell *value) { + // A pointer that lives in YG never needs any write barriers. + if (LLVM_UNLIKELY(!inYoungGen(loc))) + relocationWriteBarrierForLargeObj(owningObj, loc, value); + } void constructorWriteBarrierRange( + const GCCell *owningObj, const GCHermesValue *start, uint32_t numHVs) { // A pointer that lives in YG never needs any write barriers. if (LLVM_UNLIKELY(!inYoungGen(start))) - constructorWriteBarrierRangeSlow(start, numHVs); + constructorWriteBarrierRangeSlow(owningObj, start, numHVs); } void constructorWriteBarrierRangeSlow( + const GCCell *owningObj, const GCHermesValue *start, uint32_t numHVs); void constructorWriteBarrierRange( + const GCCell *owningObj, const GCSmallHermesValue *start, uint32_t numHVs) { // A pointer that lives in YG never needs any write barriers. if (LLVM_UNLIKELY(!inYoungGen(start))) - constructorWriteBarrierRangeSlow(start, numHVs); + constructorWriteBarrierRangeSlow(owningObj, start, numHVs); } void constructorWriteBarrierRangeSlow( + const GCCell *owningObj, const GCSmallHermesValue *start, uint32_t numHVs); @@ -297,7 +378,7 @@ class HadesGC final : public GCBase { /// \return true if the pointer lives in the young generation. bool inYoungGen(const void *p) const override { - return youngGen_.lowLim() == AlignedHeapSegment::storageStart(p); + return youngGen_.lowLim() == FixedSizeHeapSegment::storageStart(p); } bool inYoungGen(CompressedPointer p) const { return p.getSegmentStart() == youngGenCP_; @@ -361,12 +442,12 @@ class HadesGC final : public GCBase { /// Call \p callback on every non-freelist cell allocated in this segment. template static void forAllObjsInSegment( - AlignedHeapSegment &seg, + FixedSizeHeapSegment &seg, CallbackFunction callback); /// Only call the callback on cells without forwarding pointers. template static void forCompactedObjsInSegment( - AlignedHeapSegment &seg, + FixedSizeHeapSegment &seg, CallbackFunction callback, PointerBase &base); @@ -374,21 +455,21 @@ class HadesGC final : public GCBase { public: explicit OldGen(HadesGC &gc); - std::deque::iterator begin(); - std::deque::iterator end(); - std::deque::const_iterator begin() const; - std::deque::const_iterator end() const; + std::deque::iterator begin(); + std::deque::iterator end(); + std::deque::const_iterator begin() const; + std::deque::const_iterator end() const; size_t numSegments() const; - AlignedHeapSegment &operator[](size_t i); + FixedSizeHeapSegment &operator[](size_t i); /// Take ownership of the given segment. - void addSegment(AlignedHeapSegment seg); + void addSegment(FixedSizeHeapSegment seg); /// Remove the last segment from the OG. /// \return the segment that was removed. - AlignedHeapSegment popSegment(); + FixedSizeHeapSegment popSegment(); /// Indicate that OG should target having a size of \p targetSizeBytes. void setTargetSizeBytes(size_t targetSizeBytes); @@ -507,7 +588,7 @@ class HadesGC final : public GCBase { static constexpr size_t kMinSizeForLargeBlock = 1 << kLogMinSizeForLargeBlock; static constexpr size_t kNumLargeFreelistBuckets = - llvh::detail::ConstantLog2::value - + llvh::detail::ConstantLog2::value - kLogMinSizeForLargeBlock + 1; static constexpr size_t kNumFreelistBuckets = kNumSmallFreelistBuckets + kNumLargeFreelistBuckets; @@ -578,7 +659,7 @@ class HadesGC final : public GCBase { /// Use a std::deque instead of a std::vector so that references into it /// remain valid across a push_back. - std::deque segments_; + std::deque segments_; /// See \c targetSizeBytes() above. ExponentialMovingAverage targetSizeBytes_{0, 0}; @@ -660,9 +741,9 @@ class HadesGC final : public GCBase { /// Keeps the storage provider alive until after the GC is fully destructed. std::shared_ptr provider_; - /// youngGen is a bump-pointer space, so it can re-use AlignedHeapSegment. + /// youngGen is a bump-pointer space, so it can re-use FixedSizeHeapSegment. /// Protected by gcMutex_. - AlignedHeapSegment youngGen_; + FixedSizeHeapSegment youngGen_; AssignableCompressedPointer youngGenCP_; /// List of cells in YG that have finalizers. Iterate through this to clean @@ -672,7 +753,7 @@ class HadesGC final : public GCBase { /// Since YG collection times are the primary driver of pause times, it is /// useful to have a knob to reduce the effective size of the YG. This number - /// is the fraction of AlignedHeapSegment::maxSize() that we should use for + /// is the fraction of FixedSizeHeapSegment::maxSize() that we should use for /// the YG.. Note that we only set the YG size using this at the end of the /// first real YG, since doing it for direct promotions would waste OG memory /// without a pause time benefit. @@ -772,7 +853,7 @@ class HadesGC final : public GCBase { /// \return true if the pointer lives in the segment that is being marked or /// evacuated for compaction. bool contains(const void *p) const { - return start == AlignedHeapSegment::storageStart(p); + return start == FixedSizeHeapSegment::storageStart(p); } bool contains(CompressedPointer p) const { return p.getSegmentStart() == startCP; @@ -781,7 +862,7 @@ class HadesGC final : public GCBase { /// \return true if the pointer lives in the segment that is currently being /// evacuated for compaction. bool evacContains(const void *p) const { - return evacStart == AlignedHeapSegment::storageStart(p); + return evacStart == FixedSizeHeapSegment::storageStart(p); } bool evacContains(CompressedPointer p) const { return p.getSegmentStart() == evacStartCP; @@ -829,7 +910,7 @@ class HadesGC final : public GCBase { /// The segment being compacted. This should be removed from the OG right /// after it is identified, and freed entirely once the compaction is /// complete. - std::shared_ptr segment; + std::shared_ptr segment; } compactee_; /// The number of compactions this GC has performed. @@ -964,7 +1045,7 @@ class HadesGC final : public GCBase { template void scanDirtyCardsForSegment( EvacAcceptor &acceptor, - AlignedHeapSegment &segment); + FixedSizeHeapSegment &segment); /// Find all pointers from OG into the YG/compactee during a YG collection. /// This is done quickly through use of write barriers that detect the @@ -991,6 +1072,10 @@ class HadesGC final : public GCBase { /// pointers into YG and for tracking newly created pointers into the /// compactee. void relocationWriteBarrier(const void *loc, const void *value); + void relocationWriteBarrierForLargeObj( + const GCCell *owningObj, + const void *loc, + const GCCell *value); /// Finalize all objects in YG that have finalizers. void finalizeYoungGenObjects(); @@ -1011,19 +1096,19 @@ class HadesGC final : public GCBase { uint64_t heapFootprint() const; /// Accessor for the YG. - AlignedHeapSegment &youngGen() { + FixedSizeHeapSegment &youngGen() { return youngGen_; } - const AlignedHeapSegment &youngGen() const { + const FixedSizeHeapSegment &youngGen() const { return youngGen_; } /// Create a new segment (to be used by either YG or OG). - llvh::ErrorOr createSegment(); + llvh::ErrorOr createSegment(); /// Set a given segment as the YG segment. /// \return the previous YG segment. - AlignedHeapSegment setYoungGen(AlignedHeapSegment seg); + FixedSizeHeapSegment setYoungGen(FixedSizeHeapSegment seg); /// Get/set the current number of external bytes used by the YG. size_t getYoungGenExternalBytes() const; @@ -1048,7 +1133,7 @@ class HadesGC final : public GCBase { /// \param extraName append this to the name of the segment. Must be /// non-empty. void addSegmentExtentToCrashManager( - const AlignedHeapSegment &seg, + const FixedSizeHeapSegment &seg, const std::string &extraName); /// Deletes a segment from the CrashManager's custom data. diff --git a/include/hermes/VM/HeapRuntime.h b/include/hermes/VM/HeapRuntime.h index c87aed40d76..a0a3897f041 100644 --- a/include/hermes/VM/HeapRuntime.h +++ b/include/hermes/VM/HeapRuntime.h @@ -22,7 +22,7 @@ class HeapRuntime { public: ~HeapRuntime() { runtime_->~RT(); - sp_->deleteStorage(runtime_); + sp_->deleteStorage(runtime_, kHeapRuntimeStorageSize); } /// Allocate a segment and create an aliased shared_ptr that points to the @@ -36,16 +36,16 @@ class HeapRuntime { private: HeapRuntime(std::shared_ptr sp) : sp_{std::move(sp)} { - auto ptrOrError = sp_->newStorage("hermes-rt"); + auto ptrOrError = sp_->newStorage(kHeapRuntimeStorageSize, "hermes-rt"); if (!ptrOrError) hermes_fatal("Cannot initialize Runtime storage.", ptrOrError.getError()); - static_assert( - sizeof(RT) < AlignedHeapSegment::storageSize(), "Segments too small."); + static_assert(sizeof(RT) < kHeapRuntimeStorageSize, "Segments too small."); runtime_ = static_cast(*ptrOrError); } std::shared_ptr sp_; RT *runtime_; + static constexpr size_t kHeapRuntimeStorageSize = FixedSizeHeapSegment::kSize; }; } // namespace vm } // namespace hermes diff --git a/include/hermes/VM/HermesValue-inline.h b/include/hermes/VM/HermesValue-inline.h index d38a9219879..4271048c8d7 100644 --- a/include/hermes/VM/HermesValue-inline.h +++ b/include/hermes/VM/HermesValue-inline.h @@ -37,6 +37,18 @@ GCHermesValueBase::GCHermesValueBase(HVType hv, GC &gc) : HVType{hv} { gc.constructorWriteBarrier(this, hv); } +template +template +GCHermesValueBase::GCHermesValueBase( + HVType hv, + GC &gc, + const GCCell *owningObj) + : HVType{hv} { + assert(!hv.isPointer() || hv.getPointer()); + if (NeedsBarriers::value) + gc.constructorWriteBarrierForLargeObj(owningObj, this, hv); +} + template template GCHermesValueBase::GCHermesValueBase(HVType hv, GC &gc, std::nullptr_t) @@ -61,6 +73,22 @@ inline void GCHermesValueBase::set(HVType hv, GC &gc) { HVType::setNoBarrier(hv); } +template +template +inline void +GCHermesValueBase::set(HVType hv, GC &gc, const GCCell *owningObj) { + if (hv.isPointer()) { + HERMES_SLOW_ASSERT( + gc.validPointer(hv.getPointer(gc.getPointerBase())) && + "Setting an invalid pointer into a GCHermesValue"); + } + assert(NeedsBarriers::value || !gc.needsWriteBarrier(this, hv)); + if constexpr (NeedsBarriers::value) { + gc.writeBarrierForLargeObj(owningObj, this, hv); + } + HVType::setNoBarrier(hv); +} + template void GCHermesValueBase::setNonPtr(HVType hv, GC &gc) { assert(!hv.isPointer()); @@ -182,7 +210,8 @@ inline GCHermesValueBase *GCHermesValueBase::uninitialized_copy( GCHermesValueBase *first, GCHermesValueBase *last, GCHermesValueBase *result, - GC &gc) { + GC &gc, + const GCCell *owningObj) { #ifndef NDEBUG uintptr_t fromFirst = reinterpret_cast(first), fromLast = reinterpret_cast(last); @@ -194,7 +223,7 @@ inline GCHermesValueBase *GCHermesValueBase::uninitialized_copy( "Uninitialized range cannot overlap with an initialized one."); #endif - gc.constructorWriteBarrierRange(result, last - first); + gc.constructorWriteBarrierRange(owningObj, result, last - first); // memcpy is fine for an uninitialized copy. std::memcpy( reinterpret_cast(result), first, (last - first) * sizeof(HVType)); diff --git a/include/hermes/VM/HermesValue.h b/include/hermes/VM/HermesValue.h index 7e377d44754..2400e7a3f59 100644 --- a/include/hermes/VM/HermesValue.h +++ b/include/hermes/VM/HermesValue.h @@ -520,9 +520,15 @@ template class GCHermesValueBase final : public HVType { public: GCHermesValueBase() : HVType(HVType::encodeUndefinedValue()) {} - /// Initialize a GCHermesValue from another HV. Performs a write barrier. + /// Initialize a GCHermesValue from another HV. Performs a write barrier. This + /// must not be used if it lives in an object that supports large allocation. template GCHermesValueBase(HVType hv, GC &gc); + /// Initialize a GCHermesValue from another HV. Performs a write barrier using + /// \p owningObj, which owns this GCHermesValue and may support large + /// allocation. + template + GCHermesValueBase(HVType hv, GC &gc, const GCCell *owningObj); /// Initialize a GCHermesValue from a non-pointer HV. Might perform a write /// barrier, depending on the GC. /// NOTE: The last parameter is unused, but acts as an overload selector. @@ -530,11 +536,19 @@ class GCHermesValueBase final : public HVType { GCHermesValueBase(HVType hv, GC &gc, std::nullptr_t); GCHermesValueBase(const HVType &) = delete; - /// The HermesValue \p hv may be an object pointer. Assign the - /// value, and perform any necessary write barriers. + /// The HermesValue \p hv may be an object pointer. Assign the value, and + /// perform any necessary write barriers. This must not be used if it lives in + /// an object that supports large allocation. template inline void set(HVType hv, GC &gc); + /// The HermesValue \p hv may be an object pointer. Assign the value, and + /// perform any necessary write barriers. \p owningObj is the object that + /// contains this GCHermesValueBase, and it may support large allocation. + /// for which the object pointer is needed by writer barriers. + template + inline void set(HVType hv, GC &gc, const GCCell *owningObj); + /// The HermesValue \p hv must not be an object pointer. Assign the /// value. /// Some GCs still need to do a write barrier though, so pass a GC parameter. @@ -589,7 +603,8 @@ class GCHermesValueBase final : public HVType { GCHermesValueBase *first, GCHermesValueBase *last, GCHermesValueBase *result, - GC &gc); + GC &gc, + const GCCell *owningObj); /// Copies a range of values and performs a write barrier on each. template diff --git a/include/hermes/VM/LimitedStorageProvider.h b/include/hermes/VM/LimitedStorageProvider.h index a060435027b..41d575625a0 100644 --- a/include/hermes/VM/LimitedStorageProvider.h +++ b/include/hermes/VM/LimitedStorageProvider.h @@ -29,9 +29,9 @@ class LimitedStorageProvider final : public StorageProvider { : delegate_(std::move(provider)), limit_(limit) {} protected: - llvh::ErrorOr newStorageImpl(const char *name) override; + llvh::ErrorOr newStorageImpl(size_t sz, const char *name) override; - void deleteStorageImpl(void *storage) override; + void deleteStorageImpl(void *storage, size_t sz) override; }; } // namespace vm diff --git a/include/hermes/VM/MallocGC.h b/include/hermes/VM/MallocGC.h index 98ed5c523a3..ea0f2d9970b 100644 --- a/include/hermes/VM/MallocGC.h +++ b/include/hermes/VM/MallocGC.h @@ -235,14 +235,42 @@ class MallocGC final : public GCBase { void writeBarrier(const GCHermesValue *, HermesValue) {} void writeBarrier(const GCSmallHermesValue *, SmallHermesValue) {} + void + writeBarrierForLargeObj(const GCCell *, const GCHermesValue *, HermesValue) {} + void writeBarrierForLargeObj( + const GCCell *, + const GCSmallHermesValue *, + SmallHermesValue) {} void writeBarrier(const GCPointerBase *, const GCCell *) {} + void writeBarrierForLargeObj( + const GCCell *, + const GCPointerBase *, + const GCCell *) {} void constructorWriteBarrier(const GCHermesValue *, HermesValue) {} void constructorWriteBarrier(const GCSmallHermesValue *, SmallHermesValue) {} + void constructorWriteBarrierForLargeObj( + const GCCell *, + const GCSmallHermesValue *, + SmallHermesValue) {} + void constructorWriteBarrierForLargeObj( + const GCCell *, + const GCHermesValue *, + HermesValue) {} void constructorWriteBarrier(const GCPointerBase *, const GCCell *) {} + void constructorWriteBarrierForLargeObj( + const GCCell *, + const GCPointerBase *, + const GCCell *) {} void writeBarrierRange(const GCHermesValue *, uint32_t) {} void writeBarrierRange(const GCSmallHermesValue *, uint32_t) {} - void constructorWriteBarrierRange(const GCHermesValue *, uint32_t) {} - void constructorWriteBarrierRange(const GCSmallHermesValue *, uint32_t) {} + void constructorWriteBarrierRange( + const GCCell *, + const GCHermesValue *, + uint32_t) {} + void constructorWriteBarrierRange( + const GCCell *, + const GCSmallHermesValue *, + uint32_t) {} void snapshotWriteBarrier(const GCHermesValue *) {} void snapshotWriteBarrier(const GCSmallHermesValue *) {} void snapshotWriteBarrier(const GCPointerBase *) {} diff --git a/include/hermes/VM/SingleObject.h b/include/hermes/VM/SingleObject.h index 1262e6d49b4..b4d70d9465b 100644 --- a/include/hermes/VM/SingleObject.h +++ b/include/hermes/VM/SingleObject.h @@ -53,7 +53,7 @@ struct IsGCObject> { template const ObjectVTable SingleObject::vt = { - VTable(kind, cellSize>(), nullptr, nullptr), + VTable(kind, cellSize>()), SingleObject::_getOwnIndexedRangeImpl, SingleObject::_haveOwnIndexedImpl, SingleObject::_getOwnIndexedPropertyFlagsImpl, diff --git a/include/hermes/VM/StorageProvider.h b/include/hermes/VM/StorageProvider.h index 41d87f82ac5..9738bf94d63 100644 --- a/include/hermes/VM/StorageProvider.h +++ b/include/hermes/VM/StorageProvider.h @@ -37,20 +37,17 @@ class StorageProvider { /// @} - /// Create a new segment memory space. - llvh::ErrorOr newStorage() { - return newStorage(nullptr); - } - /// Create a new segment memory space and give this memory the name \p name. - /// \return A pointer to a block of memory that has - /// AlignedHeapSegment::storageSize() bytes, and is aligned on - /// AlignedHeapSegment::storageSize(). - llvh::ErrorOr newStorage(const char *name); + /// \return A pointer to a block of memory that has \p sz bytes, and is + /// aligned on AlignedHeapSegment::kSegmentUnitSize. Note that \p sz must + /// be non-zero and equals to a multiple of + /// AlignedHeapSegment::kSegmentUnitSize. + llvh::ErrorOr newStorage(size_t sz, const char *name = nullptr); /// Delete the given segment's memory space, and make it available for re-use. - /// \post Nothing in the range [storage, storage + - /// AlignedHeapSegment::storageSize()) is valid memory to be read or written. - void deleteStorage(void *storage); + /// Note that \p sz must be the same as used to allocating \p storage. + /// \post Nothing in the range [storage, storage + sz) is valid memory to be + /// read or written. + void deleteStorage(void *storage, size_t sz); /// The number of storages this provider has allocated in its lifetime. size_t numSucceededAllocs() const; @@ -67,8 +64,12 @@ class StorageProvider { size_t numLiveAllocs() const; protected: - virtual llvh::ErrorOr newStorageImpl(const char *name) = 0; - virtual void deleteStorageImpl(void *storage) = 0; + /// \pre \p sz is non-zero and equal to a multiple of + /// AlignedHeapSegment::kSegmentUnitSize. + virtual llvh::ErrorOr newStorageImpl(size_t sz, const char *name) = 0; + /// \pre \p sz is non-zero and equal to a multiple of + /// AlignedHeapSegment::kSegmentUnitSize. + virtual void deleteStorageImpl(void *storage, size_t sz) = 0; private: size_t numSucceededAllocs_{0}; diff --git a/include/hermes/VM/StringPrimitive.h b/include/hermes/VM/StringPrimitive.h index e98cec3f75e..e65f0a01326 100644 --- a/include/hermes/VM/StringPrimitive.h +++ b/include/hermes/VM/StringPrimitive.h @@ -838,6 +838,7 @@ template const VTable DynamicStringPrimitive::vt = VTable( DynamicStringPrimitive::getCellKind(), 0, + false, nullptr, nullptr, nullptr @@ -865,6 +866,7 @@ template const VTable ExternalStringPrimitive::vt = VTable( ExternalStringPrimitive::getCellKind(), 0, + false, ExternalStringPrimitive::_finalizeImpl, ExternalStringPrimitive::_mallocSizeImpl, nullptr @@ -886,6 +888,7 @@ template const VTable BufferedStringPrimitive::vt = VTable( BufferedStringPrimitive::getCellKind(), 0, + false, nullptr, // finalize. nullptr, // mallocSize nullptr diff --git a/include/hermes/VM/VTable.h b/include/hermes/VM/VTable.h index baf551898b8..35fd792b853 100644 --- a/include/hermes/VM/VTable.h +++ b/include/hermes/VM/VTable.h @@ -106,6 +106,8 @@ struct VTable { /// If it is variable sized, it should inherit from \see /// VariableSizeRuntimeCell. const uint32_t size; + /// Whether the cell supports large allocation. + bool allowLargeAlloc; /// Called during GC when an object becomes unreachable. Must not perform any /// allocations or access any garbage-collectable objects. Unless an /// operation is documented to be safe to call from a finalizer, it probably @@ -137,6 +139,7 @@ struct VTable { constexpr explicit VTable( CellKind kind, uint32_t size, + bool allowLargeAlloc = false, FinalizeCallback *finalize = nullptr, MallocSizeCallback *mallocSize = nullptr, TrimSizeCallback *trimSize = nullptr @@ -153,6 +156,7 @@ struct VTable { ) : kind(kind), size(heapAlignSize(size)), + allowLargeAlloc(allowLargeAlloc), finalize_(finalize), mallocSize_(mallocSize), trimSize_(trimSize) diff --git a/include/hermes/VM/sh_segment_info.h b/include/hermes/VM/sh_segment_info.h index ae4c7ebdf51..ae38138ccfb 100644 --- a/include/hermes/VM/sh_segment_info.h +++ b/include/hermes/VM/sh_segment_info.h @@ -12,6 +12,17 @@ /// contain segment-specific information. typedef struct SHSegmentInfo { unsigned index; + // Segments have sizes that are multiples of + // 1< here to avoid using + /// C++ type and forward declaring nested type. + void *cards; + /// Pointer that points to the boundary array for this segment. + int8_t *boundaries; } SHSegmentInfo; #endif diff --git a/lib/VM/ArrayStorage.cpp b/lib/VM/ArrayStorage.cpp index a4fc8acba0c..98f952bfaa5 100644 --- a/lib/VM/ArrayStorage.cpp +++ b/lib/VM/ArrayStorage.cpp @@ -19,6 +19,7 @@ template const VTable ArrayStorageBase::vt( ArrayStorageBase::getCellKind(), 0, + false, nullptr, nullptr, _trimSizeCallback @@ -103,7 +104,8 @@ ExecutionStatus ArrayStorageBase::reallocateToLarger( { GCHVType *from = self->data() + fromFirst; GCHVType *to = newSelf->data() + toFirst; - GCHVType::uninitialized_copy(from, from + copySize, to, runtime.getHeap()); + GCHVType::uninitialized_copy( + from, from + copySize, to, runtime.getHeap(), newSelf); } // Initialize the elements before the first copied element. diff --git a/lib/VM/Callable.cpp b/lib/VM/Callable.cpp index 2127e25f27f..6143b80b973 100644 --- a/lib/VM/Callable.cpp +++ b/lib/VM/Callable.cpp @@ -593,6 +593,7 @@ const CallableVTable BoundFunction::vt{ VTable( CellKind::BoundFunctionKind, cellSize(), + false, nullptr, nullptr, nullptr @@ -905,6 +906,7 @@ const CallableVTable NativeJSFunction::vt{ VTable( CellKind::NativeJSFunctionKind, cellSize(), + false, nullptr, nullptr, nullptr @@ -1054,6 +1056,7 @@ const CallableVTable NativeFunction::vt{ VTable( CellKind::NativeFunctionKind, cellSize(), + false, nullptr, nullptr, nullptr @@ -1233,6 +1236,7 @@ const CallableVTable NativeConstructor::vt{ VTable( CellKind::NativeConstructorKind, cellSize(), + false, nullptr, nullptr, nullptr @@ -1284,6 +1288,7 @@ const CallableVTable JSFunction::vt{ VTable( CellKind::JSFunctionKind, cellSize(), + false, nullptr, nullptr, nullptr diff --git a/lib/VM/DecoratedObject.cpp b/lib/VM/DecoratedObject.cpp index cf221061048..a0a47025ed2 100644 --- a/lib/VM/DecoratedObject.cpp +++ b/lib/VM/DecoratedObject.cpp @@ -22,6 +22,7 @@ const ObjectVTable DecoratedObject::vt{ VTable( CellKind::DecoratedObjectKind, cellSize(), + false, DecoratedObject::_finalizeImpl, DecoratedObject::_mallocSizeImpl), DecoratedObject::_getOwnIndexedRangeImpl, diff --git a/lib/VM/Domain.cpp b/lib/VM/Domain.cpp index c0d2b7b3916..a530137045f 100644 --- a/lib/VM/Domain.cpp +++ b/lib/VM/Domain.cpp @@ -18,6 +18,7 @@ namespace vm { const VTable Domain::vt{ CellKind::DomainKind, cellSize(), + false, _finalizeImpl, _mallocSizeImpl, nullptr diff --git a/lib/VM/DummyObject.cpp b/lib/VM/DummyObject.cpp index cab35da5c22..8eaa7e2b243 100644 --- a/lib/VM/DummyObject.cpp +++ b/lib/VM/DummyObject.cpp @@ -19,6 +19,7 @@ namespace testhelpers { const VTable DummyObject::vt{ CellKind::DummyObjectKind, cellSize(), + false, _finalizeImpl, _mallocSizeImpl, nullptr diff --git a/lib/VM/FastArray.cpp b/lib/VM/FastArray.cpp index a7dd89f23e9..1b31077f03b 100644 --- a/lib/VM/FastArray.cpp +++ b/lib/VM/FastArray.cpp @@ -28,6 +28,7 @@ const ObjectVTable FastArray::vt{ VTable( CellKind::FastArrayKind, cellSize(), + false, nullptr, nullptr, nullptr diff --git a/lib/VM/GCBase.cpp b/lib/VM/GCBase.cpp index 1247b35828d..57172b39b4e 100644 --- a/lib/VM/GCBase.cpp +++ b/lib/VM/GCBase.cpp @@ -965,23 +965,63 @@ bool GCBase::shouldSanitizeHandles() { runtimeGCDispatch([&](auto *gc) { gc->name(arg1, arg2); }); \ } +#define GCBASE_BARRIER_3(name, type1, type2, type3) \ + void GCBase::name(type1 arg1, type2 arg2, type3 arg3) { \ + runtimeGCDispatch([&](auto *gc) { gc->name(arg1, arg2, arg3); }); \ + } + GCBASE_BARRIER_2(writeBarrier, const GCHermesValue *, HermesValue); +GCBASE_BARRIER_3( + writeBarrierForLargeObj, + const GCCell *, + const GCHermesValue *, + HermesValue); GCBASE_BARRIER_2(writeBarrier, const GCSmallHermesValue *, SmallHermesValue); +GCBASE_BARRIER_3( + writeBarrierForLargeObj, + const GCCell *, + const GCSmallHermesValue *, + SmallHermesValue); GCBASE_BARRIER_2(writeBarrier, const GCPointerBase *, const GCCell *); +GCBASE_BARRIER_3( + writeBarrierForLargeObj, + const GCCell *, + const GCPointerBase *, + const GCCell *); GCBASE_BARRIER_2(constructorWriteBarrier, const GCHermesValue *, HermesValue); +GCBASE_BARRIER_3( + constructorWriteBarrierForLargeObj, + const GCCell *, + const GCHermesValue *, + HermesValue); GCBASE_BARRIER_2( constructorWriteBarrier, const GCSmallHermesValue *, SmallHermesValue); +GCBASE_BARRIER_3( + constructorWriteBarrierForLargeObj, + const GCCell *, + const GCSmallHermesValue *, + SmallHermesValue); GCBASE_BARRIER_2( constructorWriteBarrier, const GCPointerBase *, const GCCell *); +GCBASE_BARRIER_3( + constructorWriteBarrierForLargeObj, + const GCCell *, + const GCPointerBase *, + const GCCell *); GCBASE_BARRIER_2(writeBarrierRange, const GCHermesValue *, uint32_t); GCBASE_BARRIER_2(writeBarrierRange, const GCSmallHermesValue *, uint32_t); -GCBASE_BARRIER_2(constructorWriteBarrierRange, const GCHermesValue *, uint32_t); -GCBASE_BARRIER_2( +GCBASE_BARRIER_3( + constructorWriteBarrierRange, + const GCCell *, + const GCHermesValue *, + uint32_t); +GCBASE_BARRIER_3( constructorWriteBarrierRange, + const GCCell *, const GCSmallHermesValue *, uint32_t); GCBASE_BARRIER_1(snapshotWriteBarrier, const GCHermesValue *); diff --git a/lib/VM/HiddenClass.cpp b/lib/VM/HiddenClass.cpp index fcd069eb0a5..f5948000928 100644 --- a/lib/VM/HiddenClass.cpp +++ b/lib/VM/HiddenClass.cpp @@ -95,6 +95,7 @@ void TransitionMap::uncleanMakeLarge(Runtime &runtime) { const VTable HiddenClass::vt{ CellKind::HiddenClassKind, cellSize(), + false, _finalizeImpl, _mallocSizeImpl, nullptr diff --git a/lib/VM/HostModel.cpp b/lib/VM/HostModel.cpp index a87c1c6c91b..512e7e709db 100644 --- a/lib/VM/HostModel.cpp +++ b/lib/VM/HostModel.cpp @@ -20,6 +20,7 @@ const CallableVTable FinalizableNativeFunction::vt{ VTable( CellKind::FinalizableNativeFunctionKind, cellSize(), + false, FinalizableNativeFunction::_finalizeImpl), FinalizableNativeFunction::_getOwnIndexedRangeImpl, FinalizableNativeFunction::_haveOwnIndexedImpl, @@ -81,6 +82,7 @@ const ObjectVTable HostObject::vt{ VTable( CellKind::HostObjectKind, cellSize(), + false, HostObject::_finalizeImpl), HostObject::_getOwnIndexedRangeImpl, HostObject::_haveOwnIndexedImpl, diff --git a/lib/VM/JSArray.cpp b/lib/VM/JSArray.cpp index 3e43bd9d79b..8f772873cf1 100644 --- a/lib/VM/JSArray.cpp +++ b/lib/VM/JSArray.cpp @@ -372,6 +372,7 @@ const ObjectVTable Arguments::vt{ VTable( CellKind::ArgumentsKind, cellSize(), + false, nullptr, nullptr, nullptr @@ -493,6 +494,7 @@ const ObjectVTable JSArray::vt{ VTable( CellKind::JSArrayKind, cellSize(), + false, nullptr, nullptr, nullptr diff --git a/lib/VM/JSArrayBuffer.cpp b/lib/VM/JSArrayBuffer.cpp index 1a60101d143..d24938317bf 100644 --- a/lib/VM/JSArrayBuffer.cpp +++ b/lib/VM/JSArrayBuffer.cpp @@ -20,6 +20,7 @@ const ObjectVTable JSArrayBuffer::vt{ VTable( CellKind::JSArrayBufferKind, cellSize(), + false, _finalizeImpl, _mallocSizeImpl, nullptr diff --git a/lib/VM/JSCallSite.cpp b/lib/VM/JSCallSite.cpp index 2e6f0c71f5e..d63578b566b 100644 --- a/lib/VM/JSCallSite.cpp +++ b/lib/VM/JSCallSite.cpp @@ -15,7 +15,7 @@ namespace hermes { namespace vm { const ObjectVTable JSCallSite::vt{ - VTable(CellKind::JSCallSiteKind, cellSize(), nullptr, nullptr), + VTable(CellKind::JSCallSiteKind, cellSize()), JSCallSite::_getOwnIndexedRangeImpl, JSCallSite::_haveOwnIndexedImpl, JSCallSite::_getOwnIndexedPropertyFlagsImpl, diff --git a/lib/VM/JSError.cpp b/lib/VM/JSError.cpp index 5726d58bb69..c2a00492c85 100644 --- a/lib/VM/JSError.cpp +++ b/lib/VM/JSError.cpp @@ -31,6 +31,7 @@ const ObjectVTable JSError::vt{ VTable( CellKind::JSErrorKind, cellSize(), + false, JSError::_finalizeImpl, JSError::_mallocSizeImpl), JSError::_getOwnIndexedRangeImpl, diff --git a/lib/VM/JSObject.cpp b/lib/VM/JSObject.cpp index 02126de5a86..ef22482436c 100644 --- a/lib/VM/JSObject.cpp +++ b/lib/VM/JSObject.cpp @@ -26,6 +26,7 @@ const ObjectVTable JSObject::vt{ VTable( CellKind::JSObjectKind, cellSize(), + false, nullptr, nullptr, nullptr diff --git a/lib/VM/JSRegExp.cpp b/lib/VM/JSRegExp.cpp index a8140cfd34c..d8ee6c7e3bd 100644 --- a/lib/VM/JSRegExp.cpp +++ b/lib/VM/JSRegExp.cpp @@ -27,6 +27,7 @@ const ObjectVTable JSRegExp::vt{ VTable( CellKind::JSRegExpKind, cellSize(), + false, JSRegExp::_finalizeImpl, JSRegExp::_mallocSizeImpl, nullptr diff --git a/lib/VM/JSWeakMapImpl.cpp b/lib/VM/JSWeakMapImpl.cpp index c64134ad9c1..eb8e48f5469 100644 --- a/lib/VM/JSWeakMapImpl.cpp +++ b/lib/VM/JSWeakMapImpl.cpp @@ -206,6 +206,7 @@ const ObjectVTable JSWeakMapImpl::vt{ VTable( C, cellSize(), + false, JSWeakMapImpl::_finalizeImpl, JSWeakMapImpl::_mallocSizeImpl, nullptr diff --git a/lib/VM/JSWeakRef.cpp b/lib/VM/JSWeakRef.cpp index 7d74a2125d5..2fadbe2ebd8 100644 --- a/lib/VM/JSWeakRef.cpp +++ b/lib/VM/JSWeakRef.cpp @@ -17,6 +17,7 @@ const ObjectVTable JSWeakRef::vt{ VTable( CellKind::JSWeakRefKind, cellSize(), + false, JSWeakRef::_finalizeImpl, nullptr, nullptr diff --git a/lib/VM/LimitedStorageProvider.cpp b/lib/VM/LimitedStorageProvider.cpp index 90e3e6138b5..6e32c8ae28e 100644 --- a/lib/VM/LimitedStorageProvider.cpp +++ b/lib/VM/LimitedStorageProvider.cpp @@ -13,20 +13,22 @@ namespace hermes { namespace vm { -llvh::ErrorOr LimitedStorageProvider::newStorageImpl(const char *name) { - if (limit_ < AlignedHeapSegment::storageSize()) { +llvh::ErrorOr LimitedStorageProvider::newStorageImpl( + size_t sz, + const char *name) { + if (limit_ < FixedSizeHeapSegment::storageSize()) { return make_error_code(OOMError::TestVMLimitReached); } - limit_ -= AlignedHeapSegment::storageSize(); - return delegate_->newStorage(name); + limit_ -= sz; + return delegate_->newStorage(sz, name); } -void LimitedStorageProvider::deleteStorageImpl(void *storage) { +void LimitedStorageProvider::deleteStorageImpl(void *storage, size_t sz) { if (!storage) { return; } - delegate_->deleteStorage(storage); - limit_ += AlignedHeapSegment::storageSize(); + delegate_->deleteStorage(storage, sz); + limit_ += sz; } } // namespace vm diff --git a/lib/VM/NativeState.cpp b/lib/VM/NativeState.cpp index e9975f05a49..0f0f8f78eb9 100644 --- a/lib/VM/NativeState.cpp +++ b/lib/VM/NativeState.cpp @@ -15,6 +15,7 @@ namespace vm { const VTable NativeState::vt{ CellKind::NativeStateKind, cellSize(), + false, _finalizeImpl, }; diff --git a/lib/VM/Runtime.cpp b/lib/VM/Runtime.cpp index 9307b5cfce0..764482d2cf7 100644 --- a/lib/VM/Runtime.cpp +++ b/lib/VM/Runtime.cpp @@ -159,7 +159,7 @@ std::shared_ptr Runtime::create(const RuntimeConfig &runtimeConfig) { uint64_t maxHeapSize = runtimeConfig.getGCConfig().getMaxHeapSize(); // Allow some extra segments for the runtime, and as a buffer for the GC. uint64_t providerSize = std::min( - 1ULL << 32, maxHeapSize + AlignedHeapSegment::storageSize() * 4); + 1ULL << 32, maxHeapSize + FixedSizeHeapSegment::storageSize() * 4); std::shared_ptr sp = StorageProvider::contiguousVAProvider(providerSize); auto rt = HeapRuntime::create(sp); @@ -248,12 +248,13 @@ RuntimeBase::RuntimeBase() { void RuntimeBase::registerHeapSegment(unsigned idx, void *lowLim) { #if defined(HERMESVM_COMPRESSED_POINTERS) && !defined(HERMESVM_CONTIGUOUS_HEAP) - char *bias = - reinterpret_cast(lowLim) - (idx << AlignedHeapSegment::kLogSize); + char *bias = reinterpret_cast(lowLim) - + (idx << FixedSizeHeapSegment::kLogSize); segmentMap[idx] = bias; #endif - assert(lowLim == AlignedHeapSegment::storageStart(lowLim) && "Precondition"); - AlignedHeapSegment::setSegmentIndexFromStart(lowLim, idx); + assert( + lowLim == FixedSizeHeapSegment::storageStart(lowLim) && "Precondition"); + FixedSizeHeapSegment::setSegmentIndexFromStart(lowLim, idx); } Runtime::Runtime( diff --git a/lib/VM/SegmentedArray.cpp b/lib/VM/SegmentedArray.cpp index 93c4af99437..b59c7c525c9 100644 --- a/lib/VM/SegmentedArray.cpp +++ b/lib/VM/SegmentedArray.cpp @@ -17,6 +17,7 @@ template const VTable SegmentedArrayBase::Segment::vt( getCellKind(), cellSize(), + false, nullptr, nullptr, nullptr @@ -76,6 +77,7 @@ template const VTable SegmentedArrayBase::vt( getCellKind(), /*variableSize*/ 0, + false, nullptr, nullptr, _trimSizeCallback @@ -292,7 +294,8 @@ ExecutionStatus SegmentedArrayBase::growRight( self->inlineStorage(), self->inlineStorage() + numSlotsUsed, newSegmentedArray->inlineStorage(), - runtime.getHeap()); + runtime.getHeap(), + newSegmentedArray.get()); // Set the size of the new array to be the same as the old array's size. newSegmentedArray->numSlotsUsed_.store( numSlotsUsed, std::memory_order_release); diff --git a/lib/VM/StorageProvider.cpp b/lib/VM/StorageProvider.cpp index 67fed1eb8d3..fc93f9d4d9d 100644 --- a/lib/VM/StorageProvider.cpp +++ b/lib/VM/StorageProvider.cpp @@ -7,11 +7,13 @@ #include "hermes/VM/StorageProvider.h" +#include "hermes/ADT/BitArray.h" #include "hermes/Support/CheckedMalloc.h" #include "hermes/Support/Compiler.h" #include "hermes/Support/OSCompat.h" #include "hermes/VM/AlignedHeapSegment.h" +#include "llvh/ADT/BitVector.h" #include "llvh/ADT/DenseMap.h" #include "llvh/Support/ErrorHandling.h" #include "llvh/Support/MathExtras.h" @@ -55,14 +57,17 @@ namespace vm { namespace { +/// Minimum segment storage size. Any larger segment size should be a multiple +/// of it. +constexpr auto kSegmentUnitSize = AlignedHeapSegment::kSegmentUnitSize; + bool isAligned(void *p) { - return (reinterpret_cast(p) & - (AlignedHeapSegment::storageSize() - 1)) == 0; + return (reinterpret_cast(p) & (kSegmentUnitSize - 1)) == 0; } char *alignAlloc(void *p) { - return reinterpret_cast(llvh::alignTo( - reinterpret_cast(p), AlignedHeapSegment::storageSize())); + return reinterpret_cast( + llvh::alignTo(reinterpret_cast(p), kSegmentUnitSize)); } void *getMmapHint() { @@ -78,67 +83,103 @@ void *getMmapHint() { class VMAllocateStorageProvider final : public StorageProvider { public: - llvh::ErrorOr newStorageImpl(const char *name) override; - void deleteStorageImpl(void *storage) override; + llvh::ErrorOr newStorageImpl(size_t sz, const char *name) override; + void deleteStorageImpl(void *storage, size_t sz) override; }; class ContiguousVAStorageProvider final : public StorageProvider { public: ContiguousVAStorageProvider(size_t size) - : size_(llvh::alignTo(size)) { - auto result = oscompat::vm_reserve_aligned( - size_, AlignedHeapSegment::storageSize(), getMmapHint()); + : size_(llvh::alignTo(size)), + statusBits_(size_ / kSegmentUnitSize) { + auto result = + oscompat::vm_reserve_aligned(size_, kSegmentUnitSize, getMmapHint()); if (!result) hermes_fatal("Contiguous storage allocation failed.", result.getError()); - level_ = start_ = static_cast(*result); + start_ = static_cast(*result); oscompat::vm_name(start_, size_, kFreeRegionName); } ~ContiguousVAStorageProvider() override { oscompat::vm_release_aligned(start_, size_); } - llvh::ErrorOr newStorageImpl(const char *name) override { + llvh::ErrorOr newStorageImpl(size_t sz, const char *name) override { + // No available space to use. + if (LLVM_UNLIKELY(firstFreeBit_ == -1)) { + return make_error_code(OOMError::MaxStorageReached); + } + + assert( + statusBits_.find_first_unset() == firstFreeBit_ && + "firstFreeBit_ should always be the first unset bit"); + void *storage; - if (!freelist_.empty()) { - storage = freelist_.back(); - freelist_.pop_back(); - } else if (level_ < start_ + size_) { - storage = - std::exchange(level_, level_ + AlignedHeapSegment::storageSize()); - } else { + int numUnits = sz / kSegmentUnitSize; + int nextUsedBit = statusBits_.find_next(firstFreeBit_); + int curFreeBit = firstFreeBit_; + // Search for a large enough continuous bit range. + while (nextUsedBit != -1 && (nextUsedBit - curFreeBit < numUnits)) { + curFreeBit = statusBits_.find_next_unset(nextUsedBit); + if (curFreeBit == -1) { + return make_error_code(OOMError::MaxStorageReached); + } + nextUsedBit = statusBits_.find_next(curFreeBit); + } + // nextUsedBit could be -1, so check if there is enough space left. + if (nextUsedBit == -1 && curFreeBit + numUnits > (int)statusBits_.size()) { return make_error_code(OOMError::MaxStorageReached); } - auto res = oscompat::vm_commit(storage, AlignedHeapSegment::storageSize()); + + storage = start_ + curFreeBit * kSegmentUnitSize; + statusBits_.set(curFreeBit, curFreeBit + numUnits); + // Reset it to the new leftmost free bit. + firstFreeBit_ = statusBits_.find_next_unset(firstFreeBit_); + + auto res = oscompat::vm_commit(storage, sz); if (res) { - oscompat::vm_name(storage, AlignedHeapSegment::storageSize(), name); + oscompat::vm_name(storage, sz, name); } return res; } - void deleteStorageImpl(void *storage) override { + void deleteStorageImpl(void *storage, size_t sz) override { assert( - !llvh::alignmentAdjustment( - storage, AlignedHeapSegment::storageSize()) && + !llvh::alignmentAdjustment(storage, kSegmentUnitSize) && "Storage not aligned"); - assert(storage >= start_ && storage < level_ && "Storage not in region"); - oscompat::vm_name( - storage, AlignedHeapSegment::storageSize(), kFreeRegionName); - oscompat::vm_uncommit(storage, AlignedHeapSegment::storageSize()); - freelist_.push_back(storage); + assert( + storage >= start_ && storage < start_ + size_ && + "Storage not in region"); + oscompat::vm_name(storage, sz, kFreeRegionName); + oscompat::vm_uncommit(storage, sz); + size_t numUnits = sz / kSegmentUnitSize; + // Reset all bits for this storage. + int startIndex = (static_cast(storage) - start_) / kSegmentUnitSize; + statusBits_.reset(startIndex, startIndex + numUnits); + if (startIndex < firstFreeBit_) + firstFreeBit_ = startIndex; } private: static constexpr const char *kFreeRegionName = "hermes-free-heap"; size_t size_; char *start_; - char *level_; - llvh::SmallVector freelist_; + /// First free bit in \c statusBits_. We always make new allocation from the + /// leftmost free bit, based on heuristics: + /// 1. Usually the reserved address space is not full. + /// 2. Storage with size kSegmentUnitSize is allocated and deleted more + /// frequently than larger storage. + /// 3. Likely small storage will find space available from leftmost free bit, + /// leaving enough space at the right side for large storage. + int firstFreeBit_{0}; + /// One bit for each kSegmentUnitSize space in the entire reserved virtual + /// address space. A bit is set if the corresponding space is used. + llvh::BitVector statusBits_; }; class MallocStorageProvider final : public StorageProvider { public: - llvh::ErrorOr newStorageImpl(const char *name) override; - void deleteStorageImpl(void *storage) override; + llvh::ErrorOr newStorageImpl(size_t sz, const char *name) override; + void deleteStorageImpl(void *storage, size_t sz) override; private: /// Map aligned starts to actual starts for freeing. @@ -148,46 +189,48 @@ class MallocStorageProvider final : public StorageProvider { }; llvh::ErrorOr VMAllocateStorageProvider::newStorageImpl( + size_t sz, const char *name) { - assert(AlignedHeapSegment::storageSize() % oscompat::page_size() == 0); + assert(kSegmentUnitSize % oscompat::page_size() == 0); // Allocate the space, hoping it will be the correct alignment. - auto result = oscompat::vm_allocate_aligned( - AlignedHeapSegment::storageSize(), - AlignedHeapSegment::storageSize(), - getMmapHint()); + auto result = + oscompat::vm_allocate_aligned(sz, kSegmentUnitSize, getMmapHint()); if (!result) { return result; } void *mem = *result; assert(isAligned(mem)); (void)&isAligned; -#ifdef HERMESVM_ALLOW_HUGE_PAGES - oscompat::vm_hugepage(mem, AlignedHeapSegment::storageSize()); -#endif - + oscompat::vm_hugepage(mem, sz); // Name the memory region on platforms that support naming. - oscompat::vm_name(mem, AlignedHeapSegment::storageSize(), name); + oscompat::vm_name(mem, sz, name); return mem; } -void VMAllocateStorageProvider::deleteStorageImpl(void *storage) { +void VMAllocateStorageProvider::deleteStorageImpl(void *storage, size_t sz) { if (!storage) { return; } - oscompat::vm_free_aligned(storage, AlignedHeapSegment::storageSize()); + oscompat::vm_free_aligned(storage, sz); } -llvh::ErrorOr MallocStorageProvider::newStorageImpl(const char *name) { +llvh::ErrorOr MallocStorageProvider::newStorageImpl( + size_t sz, + const char *name) { // name is unused, can't name malloc memory. (void)name; - void *mem = checkedMalloc2(AlignedHeapSegment::storageSize(), 2u); + // Allocate size of sz + kSegmentUnitSize so that we could get an address + // aligned to kSegmentUnitSize. + void *mem = checkedMalloc2(/*count*/ 1u, sz + kSegmentUnitSize); void *lowLim = alignAlloc(mem); assert(isAligned(lowLim) && "New storage should be aligned"); lowLimToAllocHandle_[lowLim] = mem; return lowLim; } -void MallocStorageProvider::deleteStorageImpl(void *storage) { +void MallocStorageProvider::deleteStorageImpl(void *storage, size_t sz) { + // free() does not need the memory size. + (void)sz; if (!storage) { return; } @@ -217,8 +260,11 @@ std::unique_ptr StorageProvider::mallocProvider() { return std::unique_ptr(new MallocStorageProvider); } -llvh::ErrorOr StorageProvider::newStorage(const char *name) { - auto res = newStorageImpl(name); +llvh::ErrorOr StorageProvider::newStorage(size_t sz, const char *name) { + assert( + sz && (sz % kSegmentUnitSize == 0) && + "Allocated storage size must be multiples of kSegmentUnitSize"); + auto res = newStorageImpl(sz, name); if (res) { numSucceededAllocs_++; @@ -229,13 +275,17 @@ llvh::ErrorOr StorageProvider::newStorage(const char *name) { return res; } -void StorageProvider::deleteStorage(void *storage) { +void StorageProvider::deleteStorage(void *storage, size_t sz) { if (!storage) { return; } + assert( + sz && (sz % kSegmentUnitSize == 0) && + "Allocated storage size must be multiples of kSegmentUnitSize"); + numDeletedAllocs_++; - deleteStorageImpl(storage); + return deleteStorageImpl(storage, sz); } llvh::ErrorOr> diff --git a/lib/VM/gcs/AlignedHeapSegment.cpp b/lib/VM/gcs/AlignedHeapSegment.cpp index 1509168194d..14a8a37ba98 100644 --- a/lib/VM/gcs/AlignedHeapSegment.cpp +++ b/lib/VM/gcs/AlignedHeapSegment.cpp @@ -22,6 +22,17 @@ namespace hermes { namespace vm { +#ifndef NDEBUG +/// Set the given range [start, end) to a dead value. +static void clearRange(char *start, char *end) { +#if LLVM_ADDRESS_SANITIZER_BUILD + __asan_poison_memory_region(start, end - start); +#else + std::memset(start, kInvalidHeapValue, end - start); +#endif +} +#endif + void AlignedHeapSegment::Contents::protectGuardPage( oscompat::ProtectMode mode) { char *begin = &paddedGuardPage_[kGuardPagePadding]; @@ -33,23 +44,21 @@ void AlignedHeapSegment::Contents::protectGuardPage( } } -llvh::ErrorOr AlignedHeapSegment::create( - StorageProvider *provider) { - return create(provider, nullptr); -} - -llvh::ErrorOr AlignedHeapSegment::create( +llvh::ErrorOr FixedSizeHeapSegment::create( StorageProvider *provider, const char *name) { - auto result = provider->newStorage(name); + auto result = provider->newStorage(storageSize(), name); if (!result) { return result.getError(); } - return AlignedHeapSegment{provider, *result}; + assert(*result && "Heap segment storage allocation failure"); + return FixedSizeHeapSegment{provider, *result}; } -AlignedHeapSegment::AlignedHeapSegment(StorageProvider *provider, void *lowLim) - : lowLim_(static_cast(lowLim)), provider_(provider) { +FixedSizeHeapSegment::FixedSizeHeapSegment( + StorageProvider *provider, + void *lowLim) + : AlignedHeapSegment(lowLim, kSize), provider_(provider) { assert( storageStart(lowLim_) == lowLim_ && "The lower limit of this storage must be aligned"); @@ -58,35 +67,33 @@ AlignedHeapSegment::AlignedHeapSegment(StorageProvider *provider, void *lowLim) assert( reinterpret_cast(hiLim()) % oscompat::page_size() == 0 && "The higher limit must be page aligned"); - if (*this) { - new (contents()) Contents(); - contents()->protectGuardPage(oscompat::ProtectMode::None); #ifndef NDEBUG - clear(); + clear(); #endif - } } -void swap(AlignedHeapSegment &a, AlignedHeapSegment &b) { +void swap(FixedSizeHeapSegment &a, FixedSizeHeapSegment &b) { // Field lowLim_ and provider_ need to be swapped to make sure the storage of // a is not deleted when b is destroyed. std::swap(a.lowLim_, b.lowLim_); std::swap(a.provider_, b.provider_); std::swap(a.level_, b.level_); + std::swap(a.segmentSize_, b.segmentSize_); std::swap(a.effectiveEnd_, b.effectiveEnd_); } -AlignedHeapSegment::AlignedHeapSegment(AlignedHeapSegment &&other) - : AlignedHeapSegment() { +FixedSizeHeapSegment::FixedSizeHeapSegment(FixedSizeHeapSegment &&other) + : FixedSizeHeapSegment() { swap(*this, other); } -AlignedHeapSegment &AlignedHeapSegment::operator=(AlignedHeapSegment &&other) { +FixedSizeHeapSegment &FixedSizeHeapSegment::operator=( + FixedSizeHeapSegment &&other) { swap(*this, other); return *this; } -AlignedHeapSegment::~AlignedHeapSegment() { +FixedSizeHeapSegment::~FixedSizeHeapSegment() { if (lowLim() == nullptr) { return; } @@ -95,11 +102,11 @@ AlignedHeapSegment::~AlignedHeapSegment() { __asan_unpoison_memory_region(start(), end() - start()); if (provider_) { - provider_->deleteStorage(lowLim_); + provider_->deleteStorage(lowLim_, storageSize()); } } -void AlignedHeapSegment::markUnused(char *start, char *end) { +void FixedSizeHeapSegment::markUnused(char *start, char *end) { assert( !llvh::alignmentAdjustment(start, oscompat::page_size()) && !llvh::alignmentAdjustment(end, oscompat::page_size())); @@ -116,11 +123,11 @@ void AlignedHeapSegment::markUnused(char *start, char *end) { } template -void AlignedHeapSegment::setLevel(char *lvl) { +void FixedSizeHeapSegment::setLevel(char *lvl) { assert(dbgContainsLevel(lvl)); if (lvl < level_) { #ifndef NDEBUG - clear(lvl, level_); + clearRange(lvl, level_); #else if (MU == AdviseUnused::Yes) { const size_t PS = oscompat::page_size(); @@ -137,19 +144,19 @@ void AlignedHeapSegment::setLevel(char *lvl) { } /// Explicit template instantiations for setLevel -template void AlignedHeapSegment::setLevel(char *lvl); -template void AlignedHeapSegment::setLevel(char *lvl); +template void FixedSizeHeapSegment::setLevel(char *lvl); +template void FixedSizeHeapSegment::setLevel(char *lvl); template -void AlignedHeapSegment::resetLevel() { +void FixedSizeHeapSegment::resetLevel() { setLevel(start()); } /// Explicit template instantiations for resetLevel -template void AlignedHeapSegment::resetLevel(); -template void AlignedHeapSegment::resetLevel(); +template void FixedSizeHeapSegment::resetLevel(); +template void FixedSizeHeapSegment::resetLevel(); -void AlignedHeapSegment::setEffectiveEnd(char *effectiveEnd) { +void FixedSizeHeapSegment::setEffectiveEnd(char *effectiveEnd) { assert( start() <= effectiveEnd && effectiveEnd <= end() && "Must be valid end for segment."); @@ -157,33 +164,25 @@ void AlignedHeapSegment::setEffectiveEnd(char *effectiveEnd) { effectiveEnd_ = effectiveEnd; } -void AlignedHeapSegment::clearExternalMemoryCharge() { +void FixedSizeHeapSegment::clearExternalMemoryCharge() { setEffectiveEnd(end()); } #ifndef NDEBUG -bool AlignedHeapSegment::dbgContainsLevel(const void *lvl) const { +bool FixedSizeHeapSegment::dbgContainsLevel(const void *lvl) const { return contains(lvl) || lvl == hiLim(); } -bool AlignedHeapSegment::validPointer(const void *p) const { +bool FixedSizeHeapSegment::validPointer(const void *p) const { return start() <= p && p < level() && static_cast(p)->isValid(); } -void AlignedHeapSegment::clear() { - clear(start(), end()); -} - -/* static */ void AlignedHeapSegment::clear(char *start, char *end) { -#if LLVM_ADDRESS_SANITIZER_BUILD - __asan_poison_memory_region(start, end - start); -#else - std::memset(start, kInvalidHeapValue, end - start); -#endif +void FixedSizeHeapSegment::clear() { + clearRange(start(), end()); } -/* static */ void AlignedHeapSegment::checkUnwritten(char *start, char *end) { +/* static */ void FixedSizeHeapSegment::checkUnwritten(char *start, char *end) { #if !LLVM_ADDRESS_SANITIZER_BUILD && defined(HERMES_SLOW_DEBUG) // Check that the space was not written into. std::for_each( diff --git a/lib/VM/gcs/CardTableNC.cpp b/lib/VM/gcs/CardTableNC.cpp index ec94d5e5710..018ff2c5a06 100644 --- a/lib/VM/gcs/CardTableNC.cpp +++ b/lib/VM/gcs/CardTableNC.cpp @@ -20,12 +20,6 @@ namespace hermes { namespace vm { -#ifndef NDEBUG -/* static */ void *CardTable::storageEnd(const void *ptr) { - return AlignedHeapSegment::storageEnd(ptr); -} -#endif - void CardTable::dirtyCardsForAddressRange(const void *low, const void *high) { // If high is in the middle of some card, ensure that we dirty that card. high = reinterpret_cast(high) + kCardSize - 1; @@ -37,26 +31,26 @@ OptValue CardTable::findNextCardWithStatus( size_t fromIndex, size_t endIndex) const { for (size_t idx = fromIndex; idx < endIndex; idx++) - if (cards_[idx].load(std::memory_order_relaxed) == status) + if (cards()[idx].load(std::memory_order_relaxed) == status) return idx; return llvh::None; } void CardTable::clear() { - cleanRange(kFirstUsedIndex, kValidIndices); + cleanRange(kFirstUsedIndex, getEndIndex()); } void CardTable::updateAfterCompaction(const void *newLevel) { const char *newLevelPtr = static_cast(newLevel); size_t firstCleanCardIndex = addressToIndex(newLevelPtr + kCardSize - 1); assert( - firstCleanCardIndex <= kValidIndices && + firstCleanCardIndex <= getEndIndex() && firstCleanCardIndex >= kFirstUsedIndex && "Invalid index."); // Dirty the occupied cards (below the level), and clean the cards above the // level. dirtyRange(kFirstUsedIndex, firstCleanCardIndex); - cleanRange(firstCleanCardIndex, kValidIndices); + cleanRange(firstCleanCardIndex, getEndIndex()); } void CardTable::cleanRange(size_t from, size_t to) { @@ -72,7 +66,7 @@ void CardTable::cleanOrDirtyRange( size_t to, CardStatus cleanOrDirty) { for (size_t index = from; index < to; index++) { - cards_[index].store(cleanOrDirty, std::memory_order_relaxed); + cards()[index].store(cleanOrDirty, std::memory_order_relaxed); } } @@ -93,7 +87,7 @@ void CardTable::updateBoundaries( "Precondition: must have crossed boundary."); // The object may be large, and may cross multiple cards, but first // handle the first card. - boundaries_[boundary->index()] = + boundaries()[boundary->index()] = (boundary->address() - start) >> LogHeapAlign; boundary->bump(); @@ -106,7 +100,7 @@ void CardTable::updateBoundaries( unsigned currentIndexDelta = 1; unsigned numWithCurrentExp = 0; while (boundary->address() < end) { - boundaries_[boundary->index()] = encodeExp(currentExp); + boundaries()[boundary->index()] = encodeExp(currentExp); numWithCurrentExp++; if (numWithCurrentExp == currentIndexDelta) { numWithCurrentExp = 0; @@ -120,14 +114,14 @@ void CardTable::updateBoundaries( } GCCell *CardTable::firstObjForCard(unsigned index) const { - int8_t val = boundaries_[index]; + int8_t val = boundaries()[index]; // If val is negative, it means skip backwards some number of cards. // In general, for an object crossing 2^N cards, a query for one of // those cards will examine at most N entries in the table. while (val < 0) { index -= 1 << decodeExp(val); - val = boundaries_[index]; + val = boundaries()[index]; } char *boundary = const_cast(indexToAddress(index)); @@ -147,12 +141,12 @@ protectBoundaryTableWork(void *table, size_t sz, oscompat::ProtectMode mode) { void CardTable::protectBoundaryTable() { protectBoundaryTableWork( - &boundaries_[0], kValidIndices, oscompat::ProtectMode::None); + boundaries(), getEndIndex(), oscompat::ProtectMode::None); } void CardTable::unprotectBoundaryTable() { protectBoundaryTableWork( - &boundaries_[0], kValidIndices, oscompat::ProtectMode::ReadWrite); + boundaries(), getEndIndex(), oscompat::ProtectMode::ReadWrite); } #endif // HERMES_EXTRA_DEBUG @@ -160,7 +154,7 @@ void CardTable::unprotectBoundaryTable() { void CardTable::verifyBoundaries(char *start, char *level) const { // Start should be card-aligned. assert(isCardAligned(start)); - for (unsigned index = addressToIndex(start); index < kValidIndices; index++) { + for (unsigned index = addressToIndex(start); index < getEndIndex(); index++) { const char *boundary = indexToAddress(index); if (level <= boundary) { break; @@ -178,6 +172,13 @@ void CardTable::verifyBoundaries(char *start, char *level) const { "Card object boundary is broken: first obj doesn't extend into card"); } } + +GCCell *CardTable::findObjectContaining(const void *loc) const { + GCCell *obj = firstObjForCard(addressToIndex(loc)); + while (obj->nextCell() < loc) + obj = obj->nextCell(); + return obj; +} #endif // HERMES_SLOW_DEBUG } // namespace vm diff --git a/lib/VM/gcs/HadesGC.cpp b/lib/VM/gcs/HadesGC.cpp index e9bf33f4b28..decfe4be618 100644 --- a/lib/VM/gcs/HadesGC.cpp +++ b/lib/VM/gcs/HadesGC.cpp @@ -13,6 +13,7 @@ #include "hermes/VM/CheckHeapWellFormedAcceptor.h" #include "hermes/VM/FillerCell.h" #include "hermes/VM/GCBase-inline.h" +#include "hermes/VM/GCCell.h" #include "hermes/VM/GCPointer.h" #include "hermes/VM/HermesValue-inline.h" #include "hermes/VM/RootAndSlotAcceptorDefault.h" @@ -36,7 +37,7 @@ static constexpr size_t kTargetMaxPauseMs = 50; // Assert that it is always safe to construct a cell that is as large as the // entire segment. This lets us always assume that contiguous regions in a // segment can be safely turned into a single FreelistCell. -static_assert(AlignedHeapSegment::maxSize() <= HadesGC::maxAllocationSize()); +static_assert(FixedSizeHeapSegment::maxSize() <= HadesGC::maxAllocationSize()); // A free list cell is always variable-sized. const VTable HadesGC::OldGen::FreelistCell::vt{ @@ -172,7 +173,7 @@ HadesGC::OldGen::FreelistCell *HadesGC::OldGen::removeCellFromFreelist( /* static */ template void HadesGC::forAllObjsInSegment( - hermes::vm::AlignedHeapSegment &seg, + FixedSizeHeapSegment &seg, CallbackFunction callback) { for (GCCell *cell : seg.cells()) { // Skip free-list entries. @@ -185,7 +186,7 @@ void HadesGC::forAllObjsInSegment( /* static */ template void HadesGC::forCompactedObjsInSegment( - AlignedHeapSegment &seg, + FixedSizeHeapSegment &seg, CallbackFunction callback, PointerBase &base) { void *const stop = seg.level(); @@ -413,10 +414,11 @@ class HadesGC::EvacAcceptor final : public RootAndSlotAcceptor, return forwardCell(ptr); } if (CompactionEnabled && gc.compactee_.contains(ptr)) { + assert(currentCell_ && "currentCell_ must be set for compaction"); // If a compaction is about to take place, dirty the card for any newly // evacuated cells, since the marker may miss them. - AlignedHeapSegment::cardTableCovering(heapLoc)->dirtyCardForAddress( - heapLoc); + AlignedHeapSegment::cardTableCovering(currentCell_) + ->dirtyCardForAddressInLargeObj(heapLoc); } return ptr; } @@ -431,10 +433,11 @@ class HadesGC::EvacAcceptor final : public RootAndSlotAcceptor, return forwardCell(ptr); } if (CompactionEnabled && gc.compactee_.contains(cptr)) { + assert(currentCell_ && "currentCell_ must be set for compaction"); // If a compaction is about to take place, dirty the card for any newly // evacuated cells, since the marker may miss them. - AlignedHeapSegment::cardTableCovering(heapLoc)->dirtyCardForAddress( - heapLoc); + AlignedHeapSegment::cardTableCovering(currentCell_) + ->dirtyCardForAddressInLargeObj(heapLoc); } return cptr; } @@ -554,6 +557,12 @@ class HadesGC::EvacAcceptor final : public RootAndSlotAcceptor, } } + /// Set the current cell being visited. + void startCell(const GCCell *cell) { + if (CompactionEnabled) + currentCell_ = cell; + } + private: HadesGC &gc; PointerBase &pointerBase_; @@ -562,6 +571,10 @@ class HadesGC::EvacAcceptor final : public RootAndSlotAcceptor, const bool isTrackingIDs_; uint64_t evacuatedBytes_{0}; + /// Current GCCell being visited. For heap locations that could be from a + /// large object, we need this pointer to get the correct card table. + const GCCell *currentCell_{nullptr}; + void push(CopyListCell *cell) { cell->next_ = copyListHead_; copyListHead_ = CompressedPointer::encodeNonNull(cell, pointerBase_); @@ -643,11 +656,12 @@ class HadesGC::MarkAcceptor final : public RootAndSlotAcceptor { void acceptHeap(GCCell *cell, const void *heapLoc) { assert(cell && "Cannot pass null pointer to acceptHeap"); assert(!gc.inYoungGen(heapLoc) && "YG slot found in OG marking"); + assert(currentCell_ && "There must be a cell being visited"); if (gc.compactee_.contains(cell) && !gc.compactee_.contains(heapLoc)) { // This is a pointer in the heap pointing into the compactee, dirty the // corresponding card. - AlignedHeapSegment::cardTableCovering(heapLoc)->dirtyCardForAddress( - heapLoc); + AlignedHeapSegment::cardTableCovering(currentCell_) + ->dirtyCardForAddressInLargeObj(heapLoc); } if (AlignedHeapSegment::getCellMarkBit(cell)) { // Points to an already marked object, do nothing. @@ -816,6 +830,7 @@ class HadesGC::MarkAcceptor final : public RootAndSlotAcceptor { gc.dbgContains(cell) && "Non-heap object discovered during marking"); const auto sz = cell->getAllocatedSize(); numMarkedBytes += sz; + startCell(cell); gc.markCell(*this, cell); } markedBytes_ += numMarkedBytes; @@ -843,10 +858,19 @@ class HadesGC::MarkAcceptor final : public RootAndSlotAcceptor { return markedSymbols_; } + /// Set the current cell being visited. + void startCell(const GCCell *cell) { + currentCell_ = cell; + } + private: HadesGC &gc; PointerBase &pointerBase_; + /// Current GCCell being visited. For heap locations that could be from a + /// large object, we need this pointer to get the correct card table. + const GCCell *currentCell_{nullptr}; + /// A worklist local to the marking thread, that is only pushed onto by the /// marking thread. If this is empty, the global worklist must be consulted /// to ensure that pointers modified in write barriers are handled. @@ -1191,7 +1215,7 @@ size_t HadesGC::OldGen::sweepSegmentsRemaining() const { } size_t HadesGC::OldGen::getMemorySize() const { - size_t memorySize = segments_.size() * sizeof(AlignedHeapSegment); + size_t memorySize = segments_.size() * sizeof(FixedSizeHeapSegment); memorySize += segmentBuckets_.size() * sizeof(SegmentBuckets); return memorySize; } @@ -1217,7 +1241,7 @@ HadesGC::HadesGC( maxHeapSize_{std::max( gcConfig.getMaxHeapSize(), // At least one YG segment and one OG segment. - 2 * AlignedHeapSegment::storageSize())}, + 2 * FixedSizeHeapSegment::storageSize())}, provider_(std::move(provider)), oldGen_{*this}, backgroundExecutor_{ @@ -1228,22 +1252,22 @@ HadesGC::HadesGC( occupancyTarget_(gcConfig.getOccupancyTarget()), ygAverageSurvivalBytes_{ /*weight*/ 0.5, - /*init*/ kYGInitialSizeFactor * AlignedHeapSegment::maxSize() * + /*init*/ kYGInitialSizeFactor * FixedSizeHeapSegment::maxSize() * kYGInitialSurvivalRatio} { (void)vmExperimentFlags; std::lock_guard lk(gcMutex_); crashMgr_->setCustomData("HermesGC", getKindAsStr().c_str()); // createSegment relies on member variables and should not be called until // they are initialised. - llvh::ErrorOr newYoungGen = createSegment(); + llvh::ErrorOr newYoungGen = createSegment(); if (!newYoungGen) hermes_fatal("Failed to initialize the young gen", newYoungGen.getError()); setYoungGen(std::move(newYoungGen.get())); const size_t initHeapSize = std::max( {gcConfig.getMinHeapSize(), gcConfig.getInitHeapSize(), - AlignedHeapSegment::maxSize()}); - oldGen_.setTargetSizeBytes(initHeapSize - AlignedHeapSegment::maxSize()); + FixedSizeHeapSegment::maxSize()}); + oldGen_.setTargetSizeBytes(initHeapSize - FixedSizeHeapSegment::maxSize()); } HadesGC::~HadesGC() { @@ -1260,7 +1284,7 @@ void HadesGC::getHeapInfo(HeapInfo &info) { info.allocatedBytes = allocatedBytes(); // Heap size includes fragmentation, which means every segment is fully used. info.heapSize = - (oldGen_.numSegments() + 1) * AlignedHeapSegment::storageSize(); + (oldGen_.numSegments() + 1) * FixedSizeHeapSegment::storageSize(); // If YG isn't empty, its bytes haven't been accounted for yet, add them here. info.totalAllocatedBytes = totalAllocatedBytes_ + youngGen().used(); info.va = info.heapSize; @@ -1490,7 +1514,7 @@ void HadesGC::oldGenCollection(std::string cause, bool forceCompaction) { // First, clear any mark bits that were set by a previous collection or // direct-to-OG allocation, they aren't needed anymore. - for (AlignedHeapSegment &seg : oldGen_) + for (FixedSizeHeapSegment &seg : oldGen_) seg.markBitArray().reset(); // Unmark all symbols in the identifier table, as Symbol liveness will be @@ -1661,13 +1685,13 @@ void HadesGC::prepareCompactee(bool forceCompaction) { // from the heap, we only want to compact if there are at least 2 segments in // the OG. uint64_t buffer = std::max( - oldGen_.targetSizeBytes() / 20, AlignedHeapSegment::maxSize()); + oldGen_.targetSizeBytes() / 20, FixedSizeHeapSegment::maxSize()); uint64_t threshold = oldGen_.targetSizeBytes() + buffer; uint64_t totalBytes = oldGen_.size() + oldGen_.externalBytes(); if ((forceCompaction || totalBytes > threshold) && oldGen_.numSegments() > 1) { compactee_.segment = - std::make_shared(oldGen_.popSegment()); + std::make_shared(oldGen_.popSegment()); addSegmentExtentToCrashManager( *compactee_.segment, kCompacteeNameForCrashMgr); compactee_.start = compactee_.segment->lowLim(); @@ -1706,7 +1730,7 @@ void HadesGC::finalizeCompactee() { // allocated in the compactee. oldGen_.incrementAllocatedBytes(-preAllocated); - const size_t segIdx = AlignedHeapSegment::getSegmentIndexFromStart( + const size_t segIdx = FixedSizeHeapSegment::getSegmentIndexFromStart( compactee_.segment->lowLim()); segmentIndices_.push_back(segIdx); removeSegmentExtentFromCrashManager(std::to_string(segIdx)); @@ -1864,7 +1888,7 @@ void HadesGC::finalizeAll() { forCompactedObjsInSegment( *compactee_.segment, finalizeCallback, getPointerBase()); - for (AlignedHeapSegment &seg : oldGen_) + for (FixedSizeHeapSegment &seg : oldGen_) forAllObjsInSegment(seg, finalizeCallback); } @@ -1900,6 +1924,18 @@ void HadesGC::debitExternalMemory(GCCell *cell, uint32_t sz) { } void HadesGC::writeBarrierSlow(const GCHermesValue *loc, HermesValue value) { +#ifdef HERMES_SLOW_DEBUG + // Don't run the check when background thread is sweeping, which may merge + // cells and poison memory outside of FreeListCell. + if (!kConcurrentGC || concurrentPhase_ != Phase::Sweep) { + auto *obj = + FixedSizeHeapSegment::cardTableCovering(loc)->findObjectContaining(loc); + assert( + !VTable::getVTable(obj->getKind())->allowLargeAlloc && + "writeBarrierSlow() only works for GCCell that does not support large allocation"); + } +#endif + if (ogMarkingBarriers_) { snapshotWriteBarrierInternal(*loc); } @@ -1909,9 +1945,35 @@ void HadesGC::writeBarrierSlow(const GCHermesValue *loc, HermesValue value) { relocationWriteBarrier(loc, value.getPointer()); } +void HadesGC::writeBarrierSlowForLargeObj( + const GCCell *owningObj, + const GCHermesValue *loc, + HermesValue value) { + if (ogMarkingBarriers_) { + snapshotWriteBarrierInternal(*loc); + } + if (!value.isPointer()) { + return; + } + relocationWriteBarrierForLargeObj( + owningObj, loc, static_cast(value.getPointer())); +} + void HadesGC::writeBarrierSlow( const GCSmallHermesValue *loc, SmallHermesValue value) { +#ifdef HERMES_SLOW_DEBUG + // Don't run the check when background thread is sweeping, which may merge + // cells and poison memory outside of FreeListCell. + if (!kConcurrentGC || concurrentPhase_ != Phase::Sweep) { + auto *obj = + FixedSizeHeapSegment::cardTableCovering(loc)->findObjectContaining(loc); + assert( + !VTable::getVTable(obj->getKind())->allowLargeAlloc && + "writeBarrierSlow() only works for GCCell that does not support large allocation"); + } +#endif + if (ogMarkingBarriers_) { snapshotWriteBarrierInternal(*loc); } @@ -1921,7 +1983,33 @@ void HadesGC::writeBarrierSlow( relocationWriteBarrier(loc, value.getPointer(getPointerBase())); } +void HadesGC::writeBarrierSlowForLargeObj( + const GCCell *owningObj, + const GCSmallHermesValue *loc, + SmallHermesValue value) { + if (ogMarkingBarriers_) { + snapshotWriteBarrierInternal(*loc); + } + if (!value.isPointer()) { + return; + } + relocationWriteBarrierForLargeObj( + owningObj, loc, value.getPointer(getPointerBase())); +} + void HadesGC::writeBarrierSlow(const GCPointerBase *loc, const GCCell *value) { +#ifdef HERMES_SLOW_DEBUG + // Don't run the check when background thread is sweeping, which may merge + // cells and poison memory outside of FreeListCell. + if (!kConcurrentGC || concurrentPhase_ != Phase::Sweep) { + auto *obj = + FixedSizeHeapSegment::cardTableCovering(loc)->findObjectContaining(loc); + assert( + !VTable::getVTable(obj->getKind())->allowLargeAlloc && + "writeBarrierSlow() only works for GCCell that does not support large allocation"); + } +#endif + if (*loc && ogMarkingBarriers_) snapshotWriteBarrierInternal(*loc); // Always do the non-snapshot write barrier in order for YG to be able to @@ -1929,9 +2017,33 @@ void HadesGC::writeBarrierSlow(const GCPointerBase *loc, const GCCell *value) { relocationWriteBarrier(loc, value); } +void HadesGC::writeBarrierSlowForLargeObj( + const GCCell *owningObj, + const GCPointerBase *loc, + const GCCell *value) { + if (*loc && ogMarkingBarriers_) + snapshotWriteBarrierInternal(*loc); + // Always do the non-snapshot write barrier in order for YG to be able to + // scan cards. + relocationWriteBarrierForLargeObj(owningObj, loc, value); +} + void HadesGC::constructorWriteBarrierSlow( const GCHermesValue *loc, HermesValue value) { + // If constructorWriteBarrier() is called when the owning object is still + // being constructed, its CellKind is not set yet, so we can't assert that + // the owning object does not support large allocation. Instead, we check + // the size of segment covering this object. GCCells that support large + // allocation but get allocated in a normal segment would still work here. +#ifdef HERMES_SLOW_DEBUG + auto segmentSize = + FixedSizeHeapSegment::cardTableCovering(loc)->getSegmentSize(); + assert( + (segmentSize == FixedSizeHeapSegment::kSize) && + "constructorWriteBarrierSlow() does not work for objects larger than the size of FixedSizeHeapSegment"); +#endif + // A constructor never needs to execute a SATB write barrier, since its // previous value was definitely not live. if (!value.isPointer()) { @@ -1940,9 +2052,35 @@ void HadesGC::constructorWriteBarrierSlow( relocationWriteBarrier(loc, value.getPointer()); } +void HadesGC::constructorWriteBarrierSlowForLargeObj( + const GCCell *owningObj, + const GCHermesValue *loc, + HermesValue value) { + // A constructor never needs to execute a SATB write barrier, since its + // previous value was definitely not live. + if (!value.isPointer()) { + return; + } + relocationWriteBarrierForLargeObj( + owningObj, loc, static_cast(value.getPointer())); +} + void HadesGC::constructorWriteBarrierSlow( const GCSmallHermesValue *loc, SmallHermesValue value) { + // If constructorWriteBarrier() is called when the owning object is still + // being constructed, its CellKind is not set yet, so we can't assert that + // the owning object does not support large allocation. Instead, we check + // the size of segment covering this object. GCCells that support large + // allocation but get allocated in a normal segment would still work here. +#ifdef HERMES_SLOW_DEBUG + auto segmentSize = + FixedSizeHeapSegment::cardTableCovering(loc)->getSegmentSize(); + assert( + (segmentSize == FixedSizeHeapSegment::kSize) && + "constructorWriteBarrierSlow() does not work for objects larger than the size of FixedSizeHeapSegment"); +#endif + // A constructor never needs to execute a SATB write barrier, since its // previous value was definitely not live. if (!value.isPointer()) { @@ -1951,11 +2089,26 @@ void HadesGC::constructorWriteBarrierSlow( relocationWriteBarrier(loc, value.getPointer(getPointerBase())); } +void HadesGC::constructorWriteBarrierSlowForLargeObj( + const GCCell *owningObj, + const GCSmallHermesValue *loc, + SmallHermesValue value) { + // A constructor never needs to execute a SATB write barrier, since its + // previous value was definitely not live. + if (!value.isPointer()) { + return; + } + relocationWriteBarrierForLargeObj( + owningObj, loc, value.getPointer(getPointerBase())); +} + void HadesGC::constructorWriteBarrierRangeSlow( + const GCCell *owningObj, const GCHermesValue *start, uint32_t numHVs) { assert( - AlignedHeapSegment::containedInSame(start, start + numHVs) && + reinterpret_cast(start + numHVs) < + AlignedHeapSegment::storageEnd(owningObj) && "Range must start and end within a heap segment."); // Most constructors should be running in the YG, so in the common case, we @@ -1963,17 +2116,19 @@ void HadesGC::constructorWriteBarrierRangeSlow( // then just dirty all the cards corresponding to it, and we can scan them for // pointers later. This is less precise but makes the write barrier faster. - AlignedHeapSegment::cardTableCovering(start)->dirtyCardsForAddressRange( + FixedSizeHeapSegment::cardTableCovering(owningObj)->dirtyCardsForAddressRange( start, start + numHVs); } void HadesGC::constructorWriteBarrierRangeSlow( + const GCCell *owningObj, const GCSmallHermesValue *start, uint32_t numHVs) { assert( - AlignedHeapSegment::containedInSame(start, start + numHVs) && + reinterpret_cast(start + numHVs) < + AlignedHeapSegment::storageEnd(owningObj) && "Range must start and end within a heap segment."); - AlignedHeapSegment::cardTableCovering(start)->dirtyCardsForAddressRange( + FixedSizeHeapSegment::cardTableCovering(owningObj)->dirtyCardsForAddressRange( start, start + numHVs); } @@ -2048,7 +2203,7 @@ void HadesGC::relocationWriteBarrier(const void *loc, const void *value) { // Do not dirty cards for compactee->compactee, yg->yg, or yg->compactee // pointers. But do dirty cards for compactee->yg pointers, since compaction // may not happen in the next YG. - if (AlignedHeapSegment::containedInSame(loc, value)) { + if (FixedSizeHeapSegment::containedInSame(loc, value)) { return; } if (inYoungGen(value) || compactee_.contains(value)) { @@ -2057,7 +2212,21 @@ void HadesGC::relocationWriteBarrier(const void *loc, const void *value) { // allocation. // Note that this *only* applies since the boundaries are updated separately // from the card table being marked itself. - AlignedHeapSegment::cardTableCovering(loc)->dirtyCardForAddress(loc); + FixedSizeHeapSegment::cardTableCovering(loc)->dirtyCardForAddress(loc); + } +} + +void HadesGC::relocationWriteBarrierForLargeObj( + const GCCell *owningObj, + const void *loc, + const GCCell *value) { + assert(!inYoungGen(loc) && "Pre-condition from other callers"); + if (AlignedHeapSegment::containedInSame(owningObj, value)) { + return; + } + if (inYoungGen(value) || compactee_.contains(value)) { + AlignedHeapSegment::cardTableCovering(owningObj) + ->dirtyCardForAddressInLargeObj(loc); } } @@ -2098,7 +2267,7 @@ void HadesGC::forAllObjs(const std::function &callback) { callback(cell); } }; - for (AlignedHeapSegment &seg : oldGen_) { + for (FixedSizeHeapSegment &seg : oldGen_) { if (concurrentPhase_ != Phase::Sweep) forAllObjsInSegment(seg, callback); else @@ -2216,7 +2385,7 @@ GCCell *HadesGC::OldGen::alloc(uint32_t sz) { // Before waiting for a collection to finish, check if we're below the max // heap size and can simply allocate another segment. This will prevent // blocking the YG unnecessarily. - llvh::ErrorOr seg = gc_.createSegment(); + llvh::ErrorOr seg = gc_.createSegment(); if (seg) { // Complete this allocation using a bump alloc. AllocResult res = seg->alloc(sz); @@ -2389,6 +2558,7 @@ void HadesGC::youngGenEvacuateImpl(Acceptor &acceptor, bool doCompaction) { // object is only there for the forwarding pointer. GCCell *const cell = copyCell->getMarkedForwardingPointer().getNonNull(getPointerBase()); + acceptor.startCell(cell); markCell(acceptor, cell); } @@ -2517,7 +2687,7 @@ void HadesGC::youngGenCollection( // 3. The duration of this collection may not have met our pause time goals. youngGen().setEffectiveEnd( youngGen().start() + - static_cast(ygSizeFactor_ * AlignedHeapSegment::maxSize())); + static_cast(ygSizeFactor_ * FixedSizeHeapSegment::maxSize())); // We have to set these after the collection, in case a compaction took // place and updated these metrics. @@ -2598,7 +2768,7 @@ bool HadesGC::promoteYoungGenToOldGen() { // TODO: Add more stringent criteria for turning off this flag, for instance, // once the heap reaches a certain size. That would avoid growing the heap to // the maximum possible size before stopping promotions. - llvh::ErrorOr newYoungGen = createSegment(); + llvh::ErrorOr newYoungGen = createSegment(); if (!newYoungGen) { promoteYGToOG_ = false; return false; @@ -2624,7 +2794,7 @@ bool HadesGC::promoteYoungGenToOldGen() { return true; } -AlignedHeapSegment HadesGC::setYoungGen(AlignedHeapSegment seg) { +FixedSizeHeapSegment HadesGC::setYoungGen(FixedSizeHeapSegment seg) { addSegmentExtentToCrashManager(seg, "YG"); youngGenFinalizables_.clear(); std::swap(youngGen_, seg); @@ -2678,7 +2848,7 @@ void HadesGC::updateYoungGenSizeFactor() { template void HadesGC::scanDirtyCardsForSegment( EvacAcceptor &acceptor, - AlignedHeapSegment &seg) { + FixedSizeHeapSegment &seg) { const auto &cardTable = seg.cardTable(); // Use level instead of end in case the OG segment is still in bump alloc // mode. @@ -2705,8 +2875,8 @@ void HadesGC::scanDirtyCardsForSegment( const auto iEnd = oiEnd ? *oiEnd : to; assert( - (iEnd == to || !cardTable.isCardForIndexDirty(iEnd)) && - cardTable.isCardForIndexDirty(iEnd - 1) && + (iEnd == to || !cardTable.isCardForIndexDirtyInLargeObj(iEnd)) && + cardTable.isCardForIndexDirtyInLargeObj(iEnd - 1) && "end should either be the end of the card table, or the first " "non-dirty card after a sequence of dirty cards"); assert(iBegin < iEnd && "Indices must be apart by at least one"); @@ -2730,8 +2900,10 @@ void HadesGC::scanDirtyCardsForSegment( // expensive. // Mark the first object with respect to the dirty card boundaries. - if (visitUnmarked || AlignedHeapSegment::getCellMarkBit(obj)) + if (visitUnmarked || AlignedHeapSegment::getCellMarkBit(obj)) { + acceptor.startCell(obj); markCellWithinRange(acceptor, obj, begin, end); + } obj = obj->nextCell(); // If there are additional objects in this card, scan them. @@ -2742,8 +2914,10 @@ void HadesGC::scanDirtyCardsForSegment( // object where next is within the card. for (GCCell *next = obj->nextCell(); next < boundary; next = next->nextCell()) { - if (visitUnmarked || AlignedHeapSegment::getCellMarkBit(obj)) + if (visitUnmarked || AlignedHeapSegment::getCellMarkBit(obj)) { + acceptor.startCell(obj); markCell(acceptor, obj); + } obj = next; } @@ -2752,8 +2926,10 @@ void HadesGC::scanDirtyCardsForSegment( assert( obj < boundary && obj->nextCell() >= boundary && "Last object in card must touch or cross cross the card boundary"); - if (visitUnmarked || AlignedHeapSegment::getCellMarkBit(obj)) + if (visitUnmarked || AlignedHeapSegment::getCellMarkBit(obj)) { + acceptor.startCell(obj); markCellWithinRange(acceptor, obj, begin, end); + } } from = iEnd; @@ -2774,7 +2950,7 @@ void HadesGC::scanDirtyCards(EvacAcceptor &acceptor) { for (size_t i = 0; i < segEnd; ++i) { // It is safe to hold this reference across a push_back into // oldGen_.segments_ since references into a deque are not invalidated. - AlignedHeapSegment &seg = oldGen_[i]; + FixedSizeHeapSegment &seg = oldGen_[i]; scanDirtyCardsForSegment(acceptor, seg); // Do not clear the card table if the OG thread is currently marking to // prepare for a compaction. Note that we should clear the card tables if @@ -2810,7 +2986,7 @@ uint64_t HadesGC::externalBytes() const { uint64_t HadesGC::segmentFootprint() const { size_t totalSegments = oldGen_.numSegments() + (youngGen_ ? 1 : 0) + (compactee_.segment ? 1 : 0); - return totalSegments * AlignedHeapSegment::storageSize(); + return totalSegments * FixedSizeHeapSegment::storageSize(); } uint64_t HadesGC::heapFootprint() const { @@ -2833,7 +3009,7 @@ uint64_t HadesGC::OldGen::externalBytes() const { uint64_t HadesGC::OldGen::size() const { size_t totalSegments = numSegments() + (gc_.compactee_.segment ? 1 : 0); - return totalSegments * AlignedHeapSegment::maxSize(); + return totalSegments * FixedSizeHeapSegment::maxSize(); } uint64_t HadesGC::OldGen::targetSizeBytes() const { @@ -2864,7 +3040,7 @@ llvh::ErrorOr HadesGC::getVMFootprintForTest() const { return ygFootprint; // Add each OG segment. - for (const AlignedHeapSegment &seg : oldGen_) { + for (const FixedSizeHeapSegment &seg : oldGen_) { auto segFootprint = hermes::oscompat::vm_footprint(seg.start(), seg.hiLim()); if (!segFootprint) @@ -2874,19 +3050,20 @@ llvh::ErrorOr HadesGC::getVMFootprintForTest() const { return footprint; } -std::deque::iterator HadesGC::OldGen::begin() { +std::deque::iterator HadesGC::OldGen::begin() { return segments_.begin(); } -std::deque::iterator HadesGC::OldGen::end() { +std::deque::iterator HadesGC::OldGen::end() { return segments_.end(); } -std::deque::const_iterator HadesGC::OldGen::begin() const { +std::deque::const_iterator HadesGC::OldGen::begin() + const { return segments_.begin(); } -std::deque::const_iterator HadesGC::OldGen::end() const { +std::deque::const_iterator HadesGC::OldGen::end() const { return segments_.end(); } @@ -2894,21 +3071,20 @@ size_t HadesGC::OldGen::numSegments() const { return segments_.size(); } -AlignedHeapSegment &HadesGC::OldGen::operator[](size_t i) { +FixedSizeHeapSegment &HadesGC::OldGen::operator[](size_t i) { return segments_[i]; } -llvh::ErrorOr HadesGC::createSegment() { +llvh::ErrorOr HadesGC::createSegment() { // No heap size limit when Handle-SAN is on, to allow the heap enough room to // keep moving things around. if (!sanitizeRate_ && heapFootprint() >= maxHeapSize_) return make_error_code(OOMError::MaxHeapReached); - - auto res = AlignedHeapSegment::create(provider_.get(), "hades-segment"); + auto res = FixedSizeHeapSegment::create(provider_.get(), "hades-segment"); if (!res) { return res.getError(); } - AlignedHeapSegment seg(std::move(res.get())); + FixedSizeHeapSegment seg(std::move(res.get())); // Even if compressed pointers are off, we still use the segment index for // crash manager indices. size_t segIdx; @@ -2921,12 +3097,12 @@ llvh::ErrorOr HadesGC::createSegment() { gcCallbacks_.registerHeapSegment(segIdx, seg.lowLim()); addSegmentExtentToCrashManager(seg, std::to_string(segIdx)); seg.markBitArray().set(); - return llvh::ErrorOr(std::move(seg)); + return llvh::ErrorOr(std::move(seg)); } -void HadesGC::OldGen::addSegment(AlignedHeapSegment seg) { +void HadesGC::OldGen::addSegment(FixedSizeHeapSegment seg) { segments_.emplace_back(std::move(seg)); - AlignedHeapSegment &newSeg = segments_.back(); + FixedSizeHeapSegment &newSeg = segments_.back(); incrementAllocatedBytes(newSeg.used()); // Add a set of freelist buckets for this segment. segmentBuckets_.emplace_back(); @@ -2947,7 +3123,7 @@ void HadesGC::OldGen::addSegment(AlignedHeapSegment seg) { gc_.addSegmentExtentToCrashManager(newSeg, std::to_string(numSegments())); } -AlignedHeapSegment HadesGC::OldGen::popSegment() { +FixedSizeHeapSegment HadesGC::OldGen::popSegment() { const auto &segBuckets = segmentBuckets_.back(); for (size_t bucket = 0; bucket < kNumFreelistBuckets; ++bucket) { if (segBuckets[bucket].head) { @@ -2969,13 +3145,13 @@ void HadesGC::OldGen::setTargetSizeBytes(size_t targetSizeBytes) { } bool HadesGC::inOldGen(const void *p) const { - // If it isn't in any OG segment or the compactee, then this pointer is not in - // the OG. + // If it isn't in any OG segment or the compactee, then this pointer is not + // in the OG. return compactee_.contains(p) || std::any_of( oldGen_.begin(), oldGen_.end(), - [p](const AlignedHeapSegment &seg) { return seg.contains(p); }); + [p](const FixedSizeHeapSegment &seg) { return seg.contains(p); }); } void HadesGC::yieldToOldGen() { @@ -3013,12 +3189,13 @@ size_t HadesGC::getDrainRate() { assert(!kConcurrentGC); // Set a fixed floor on the mark rate, regardless of the pause time budget. - // yieldToOldGen may operate in multiples of this drain rate if it fits in the - // budget. Pinning the mark rate in this way helps us keep the dynamically - // computed OG collection threshold in a reasonable range. On a slow device, - // where we can only do one iteration of this drain rate, the OG threshold - // will be ~75%. And by not increasing the drain rate when the threshold is - // high, we avoid having a one-way ratchet effect that hurts pause times. + // yieldToOldGen may operate in multiples of this drain rate if it fits in + // the budget. Pinning the mark rate in this way helps us keep the + // dynamically computed OG collection threshold in a reasonable range. On a + // slow device, where we can only do one iteration of this drain rate, the + // OG threshold will be ~75%. And by not increasing the drain rate when the + // threshold is high, we avoid having a one-way ratchet effect that hurts + // pause times. constexpr size_t baseMarkRate = 3; uint64_t drainRate = baseMarkRate * ygAverageSurvivalBytes_; // In case the allocation rate is extremely low, set a lower bound to ensure @@ -3028,7 +3205,7 @@ size_t HadesGC::getDrainRate() { } void HadesGC::addSegmentExtentToCrashManager( - const AlignedHeapSegment &seg, + const FixedSizeHeapSegment &seg, const std::string &extraName) { assert(!extraName.empty() && "extraName can't be empty"); if (!crashMgr_) { @@ -3088,6 +3265,7 @@ void HadesGC::verifyCardTable() { assert(inGC() && "Must be in GC to call verifyCardTable"); struct VerifyCardDirtyAcceptor final : public SlotAcceptor { HadesGC &gc; + const GCCell *currentCell{nullptr}; explicit VerifyCardDirtyAcceptor(HadesGC &gc) : gc(gc) {} @@ -3097,8 +3275,8 @@ void HadesGC::verifyCardTable() { gc.compactee_.evacContains(valuePtr); if (!gc.inYoungGen(locPtr) && (gc.inYoungGen(valuePtr) || crossRegionCompacteePtr)) { - assert(AlignedHeapSegment::cardTableCovering(locPtr) - ->isCardForAddressDirty(locPtr)); + assert(AlignedHeapSegment::cardTableCovering(currentCell) + ->isCardForAddressDirtyInLargeObj(locPtr)); } } @@ -3117,11 +3295,13 @@ void HadesGC::verifyCardTable() { void accept(const GCSymbolID &hv) override {} }; - VerifyCardDirtyAcceptor acceptor{*this}; - forAllObjs([this, &acceptor](GCCell *cell) { markCell(acceptor, cell); }); + forAllObjs([this, &acceptor](GCCell *cell) { + acceptor.currentCell = cell; + markCell(acceptor, cell); + }); - for (const AlignedHeapSegment &seg : oldGen_) { + for (const FixedSizeHeapSegment &seg : oldGen_) { seg.cardTable().verifyBoundaries(seg.start(), seg.level()); } } diff --git a/unittests/VMRuntime/AlignedHeapSegmentTest.cpp b/unittests/VMRuntime/AlignedHeapSegmentTest.cpp index 6362b80d6f1..a087dd26904 100644 --- a/unittests/VMRuntime/AlignedHeapSegmentTest.cpp +++ b/unittests/VMRuntime/AlignedHeapSegmentTest.cpp @@ -37,18 +37,18 @@ static char *alignPointer(char *p, size_t align) { struct AlignedHeapSegmentTest : public ::testing::Test { AlignedHeapSegmentTest() : provider_(StorageProvider::mmapProvider()), - s(std::move(AlignedHeapSegment::create(provider_.get()).get())) {} + s(std::move(FixedSizeHeapSegment::create(provider_.get()).get())) {} ~AlignedHeapSegmentTest() = default; std::unique_ptr provider_; - AlignedHeapSegment s; + FixedSizeHeapSegment s; }; #ifndef NDEBUG TEST_F(AlignedHeapSegmentTest, FailedAllocation) { LimitedStorageProvider limitedProvider{StorageProvider::mmapProvider(), 0}; - auto result = AlignedHeapSegment::create(&limitedProvider); + auto result = FixedSizeHeapSegment::create(&limitedProvider); EXPECT_FALSE(result); } #endif // !NDEBUG @@ -57,48 +57,48 @@ TEST_F(AlignedHeapSegmentTest, Start) { char *lo = s.lowLim(); char *hi = s.hiLim(); - EXPECT_EQ(lo, AlignedHeapSegment::storageStart(lo)); + EXPECT_EQ(lo, FixedSizeHeapSegment::storageStart(lo)); EXPECT_EQ( lo, - AlignedHeapSegment::storageStart( - lo + AlignedHeapSegment::storageSize() / 2)); - EXPECT_EQ(lo, AlignedHeapSegment::storageStart(hi - 1)); + FixedSizeHeapSegment::storageStart( + lo + FixedSizeHeapSegment::storageSize() / 2)); + EXPECT_EQ(lo, FixedSizeHeapSegment::storageStart(hi - 1)); // `hi` is the first address in the storage following \c storage (if // such a storage existed). - EXPECT_EQ(hi, AlignedHeapSegment::storageStart(hi)); + EXPECT_EQ(hi, FixedSizeHeapSegment::storageStart(hi)); } TEST_F(AlignedHeapSegmentTest, End) { char *lo = s.lowLim(); char *hi = s.hiLim(); - EXPECT_EQ(hi, AlignedHeapSegment::storageEnd(lo)); + EXPECT_EQ(hi, FixedSizeHeapSegment::storageEnd(lo)); EXPECT_EQ( hi, - AlignedHeapSegment::storageEnd( - lo + AlignedHeapSegment::storageSize() / 2)); - EXPECT_EQ(hi, AlignedHeapSegment::storageEnd(hi - 1)); + FixedSizeHeapSegment::storageEnd( + lo + FixedSizeHeapSegment::storageSize() / 2)); + EXPECT_EQ(hi, FixedSizeHeapSegment::storageEnd(hi - 1)); // `hi` is the first address in the storage following \c storage (if // such a storage existed). EXPECT_EQ( - hi + AlignedHeapSegment::storageSize(), - AlignedHeapSegment::storageEnd(hi)); + hi + FixedSizeHeapSegment::storageSize(), + FixedSizeHeapSegment::storageEnd(hi)); } TEST_F(AlignedHeapSegmentTest, Offset) { char *lo = s.lowLim(); char *hi = s.hiLim(); - const size_t size = AlignedHeapSegment::storageSize(); + const size_t size = FixedSizeHeapSegment::storageSize(); - EXPECT_EQ(0, AlignedHeapSegment::offset(lo)); - EXPECT_EQ(size / 2, AlignedHeapSegment::offset(lo + size / 2)); - EXPECT_EQ(size - 1, AlignedHeapSegment::offset(hi - 1)); + EXPECT_EQ(0, FixedSizeHeapSegment::offset(lo)); + EXPECT_EQ(size / 2, FixedSizeHeapSegment::offset(lo + size / 2)); + EXPECT_EQ(size - 1, FixedSizeHeapSegment::offset(hi - 1)); // `hi` is the first address in the storage following \c storage (if // such a storage existed). - EXPECT_EQ(0, AlignedHeapSegment::offset(hi)); + EXPECT_EQ(0, FixedSizeHeapSegment::offset(hi)); } TEST_F(AlignedHeapSegmentTest, AdviseUnused) { @@ -108,16 +108,17 @@ TEST_F(AlignedHeapSegmentTest, AdviseUnused) { #if !defined(_WINDOWS) && !defined(HERMESVM_ALLOW_HUGE_PAGES) const size_t PG_SIZE = oscompat::page_size(); - ASSERT_EQ(0, AlignedHeapSegment::storageSize() % PG_SIZE); + ASSERT_EQ(0, FixedSizeHeapSegment::storageSize() % PG_SIZE); - const size_t TOTAL_PAGES = AlignedHeapSegment::storageSize() / PG_SIZE; + const size_t TOTAL_PAGES = FixedSizeHeapSegment::storageSize() / PG_SIZE; const size_t FREED_PAGES = TOTAL_PAGES / 2; // We can't use the storage of s here since it contains guard pages and also // s.start() may not align to actual page boundary. - void *storage = provider_->newStorage().get(); + void *storage = + provider_->newStorage(FixedSizeHeapSegment::storageSize()).get(); char *start = reinterpret_cast(storage); - char *end = start + AlignedHeapSegment::storageSize(); + char *end = start + FixedSizeHeapSegment::storageSize(); // On some platforms, the mapping containing [start, end) can be larger than // [start, end) itself, and the extra space may already contribute to the @@ -139,7 +140,7 @@ TEST_F(AlignedHeapSegmentTest, AdviseUnused) { EXPECT_EQ(*initial + TOTAL_PAGES, *touched); EXPECT_EQ(*touched - FREED_PAGES, *marked); - provider_->deleteStorage(storage); + provider_->deleteStorage(storage, FixedSizeHeapSegment::storageSize()); #endif } @@ -151,15 +152,15 @@ TEST_F(AlignedHeapSegmentTest, Containment) { EXPECT_FALSE(s.contains(s.hiLim())); // Interior - EXPECT_TRUE(s.contains(s.lowLim() + AlignedHeapSegment::storageSize() / 2)); + EXPECT_TRUE(s.contains(s.lowLim() + FixedSizeHeapSegment::storageSize() / 2)); } TEST_F(AlignedHeapSegmentTest, Alignment) { /** - * This test alternates between allocating an AlignedHeapSegment, and an + * This test alternates between allocating an FixedSizeHeapSegment, and an * anonymous "spacer" mapping such that the i-th spacer has size: * - * AlignedHeapSegment::storageSize() + i MB + * FixedSizeHeapSegment::storageSize() + i MB * * In the worst case the anonymous mappings are perfectly interleaved with the * aligned storage, and we must be intentional about aligning the storage @@ -175,7 +176,7 @@ TEST_F(AlignedHeapSegmentTest, Alignment) { * - A box's width includes its left boundary and excludes its right boundary. * - A / boundary indicates 1MB belongs to the previous box and 1MB to the * next. - * - Boxes labeled with `A` are AlignedHeapSegment. + * - Boxes labeled with `A` are FixedSizeHeapSegment. * - Boxes labeled with `S` are spacers. * - Boxes with no label are unmapped. * @@ -183,16 +184,16 @@ TEST_F(AlignedHeapSegmentTest, Alignment) { * allocation pattern we (might) get from allocating in a tight loop. */ - std::vector segments; + std::vector segments; std::vector spacers; const size_t MB = 1 << 20; - const size_t SIZE = AlignedHeapSegment::storageSize(); + const size_t SIZE = FixedSizeHeapSegment::storageSize(); for (size_t space = SIZE + MB; space < 2 * SIZE; space += MB) { segments.emplace_back( - std::move(AlignedHeapSegment::create(provider_.get()).get())); - AlignedHeapSegment &seg = segments.back(); + std::move(FixedSizeHeapSegment::create(provider_.get()).get())); + FixedSizeHeapSegment &seg = segments.back(); EXPECT_EQ(seg.lowLim(), alignPointer(seg.lowLim(), SIZE)); @@ -200,7 +201,7 @@ TEST_F(AlignedHeapSegmentTest, Alignment) { } { // When \c storages goes out of scope, it will correctly destruct the \c - // AlignedHeapSegment instances it holds. \c spacers, on the other hand, + // FixedSizeHeapSegment instances it holds. \c spacers, on the other hand, // holds only raw pointers, so we must clean them up manually: size_t space = SIZE + MB; for (void *spacer : spacers) { @@ -252,7 +253,7 @@ TEST_F(AlignedHeapSegmentTest, AllocTest) { } TEST_F(AlignedHeapSegmentTest, FullSize) { - EXPECT_EQ(s.size(), AlignedHeapSegment::maxSize()); + EXPECT_EQ(s.size(), FixedSizeHeapSegment::maxSize()); EXPECT_EQ(s.size(), s.available()); EXPECT_EQ(s.size(), s.hiLim() - s.start()); @@ -279,7 +280,7 @@ using AlignedHeapSegmentDeathTest = AlignedHeapSegmentTest; // Allocating into a null segment causes an assertion failure on !NDEBUG builds. TEST_F(AlignedHeapSegmentDeathTest, NullAlloc) { - AlignedHeapSegment s; + FixedSizeHeapSegment s; constexpr uint32_t SIZE = heapAlignSize(sizeof(GCCell)); EXPECT_DEATH_IF_SUPPORTED({ s.alloc(SIZE); }, "null segment"); } diff --git a/unittests/VMRuntime/CardObjectBoundaryNCTest.cpp b/unittests/VMRuntime/CardObjectBoundaryNCTest.cpp index e763c37f122..2bb1e31f2ba 100644 --- a/unittests/VMRuntime/CardObjectBoundaryNCTest.cpp +++ b/unittests/VMRuntime/CardObjectBoundaryNCTest.cpp @@ -33,7 +33,7 @@ struct CardObjectBoundaryNCTest : public ::testing::Test { } std::unique_ptr provider; - AlignedHeapSegment segment; + FixedSizeHeapSegment segment; CardTable::Boundary boundary; size_t segStartIndex; @@ -41,7 +41,7 @@ struct CardObjectBoundaryNCTest : public ::testing::Test { CardObjectBoundaryNCTest::CardObjectBoundaryNCTest() : provider(StorageProvider::mmapProvider()), - segment(std::move(AlignedHeapSegment::create(provider.get()).get())), + segment(std::move(FixedSizeHeapSegment::create(provider.get()).get())), boundary(segment.cardTable().nextBoundary(segment.start())), segStartIndex(boundary.index()) {} diff --git a/unittests/VMRuntime/CardTableNCTest.cpp b/unittests/VMRuntime/CardTableNCTest.cpp index adaffe0651d..682ba412515 100644 --- a/unittests/VMRuntime/CardTableNCTest.cpp +++ b/unittests/VMRuntime/CardTableNCTest.cpp @@ -36,9 +36,9 @@ struct CardTableNCTest : public ::testing::Test { protected: std::unique_ptr provider{StorageProvider::mmapProvider()}; - AlignedHeapSegment seg{ - std::move(AlignedHeapSegment::create(provider.get()).get())}; - CardTable *table{new (seg.lowLim()) CardTable()}; + FixedSizeHeapSegment seg{ + std::move(FixedSizeHeapSegment::create(provider.get()).get())}; + CardTable *table{new (seg.lowLim()) CardTable(FixedSizeHeapSegment::kSize)}; // Addresses in the aligned storage to interact with during the tests. std::vector addrs; @@ -52,15 +52,16 @@ void CardTableNCTest::dirtyRangeTest( table->dirtyCardsForAddressRange(dirtyStart, dirtyEnd); for (char *p = expectedStart; p < expectedEnd; p += CardTable::kCardSize) { - EXPECT_TRUE(table->isCardForAddressDirty(p)); + EXPECT_TRUE(table->isCardForAddressDirtyInLargeObj(p)); } } CardTableNCTest::CardTableNCTest() { // For purposes of this test, we'll assume the first writeable byte of - // the segment comes just after the card table (which is at the - // start of the segment). - auto first = seg.lowLim() + sizeof(CardTable); + // the segment comes just after the memory region that can be mapped by + // kFirstUsedIndex bytes. + auto first = seg.lowLim() + + CardTable::kFirstUsedIndex * CardTable::kHeapBytesPerCardByte; auto last = reinterpret_cast(llvh::alignDown( reinterpret_cast(seg.hiLim() - 1), CardTable::kCardSize)); @@ -79,7 +80,7 @@ CardTableNCTest::CardTableNCTest() { TEST_F(CardTableNCTest, AddressToIndex) { // Expected indices in the card table corresponding to the probe // addresses into the storage. - const size_t lastIx = CardTable::kValidIndices - 1; + const size_t lastIx = table->getEndIndex() - 1; std::vector indices{ CardTable::kFirstUsedIndex, CardTable::kFirstUsedIndex + 1, @@ -104,29 +105,30 @@ TEST_F(CardTableNCTest, AddressToIndexBoundary) { // the storage. ASSERT_EQ(seg.lowLim(), reinterpret_cast(table)); - const size_t hiLim = CardTable::kValidIndices; + const size_t hiLim = table->getEndIndex(); EXPECT_EQ(0, table->addressToIndex(seg.lowLim())); EXPECT_EQ(hiLim, table->addressToIndex(seg.hiLim())); } TEST_F(CardTableNCTest, DirtyAddress) { - const size_t lastIx = CardTable::kValidIndices - 1; + const size_t lastIx = table->getEndIndex() - 1; for (char *addr : addrs) { size_t ind = table->addressToIndex(addr); - EXPECT_FALSE(ind > 0 && table->isCardForIndexDirty(ind - 1)) + EXPECT_FALSE(ind > 0 && table->isCardForIndexDirtyInLargeObj(ind - 1)) << "initial " << ind << " - 1"; - EXPECT_FALSE(table->isCardForIndexDirty(ind)) << "initial " << ind; - EXPECT_FALSE(ind < lastIx && table->isCardForIndexDirty(ind + 1)) + EXPECT_FALSE(table->isCardForIndexDirtyInLargeObj(ind)) + << "initial " << ind; + EXPECT_FALSE(ind < lastIx && table->isCardForIndexDirtyInLargeObj(ind + 1)) << "initial " << ind << " + 1"; - table->dirtyCardForAddress(addr); + table->dirtyCardForAddressInLargeObj(addr); - EXPECT_FALSE(ind > 0 && table->isCardForIndexDirty(ind - 1)) + EXPECT_FALSE(ind > 0 && table->isCardForIndexDirtyInLargeObj(ind - 1)) << "dirty " << ind << " - 1"; - EXPECT_TRUE(table->isCardForIndexDirty(ind)) << "dirty " << ind; - EXPECT_FALSE(ind < lastIx && table->isCardForIndexDirty(ind + 1)) + EXPECT_TRUE(table->isCardForIndexDirtyInLargeObj(ind)) << "dirty " << ind; + EXPECT_FALSE(ind < lastIx && table->isCardForIndexDirtyInLargeObj(ind + 1)) << "dirty " << ind << " + 1"; table->clear(); @@ -137,7 +139,8 @@ TEST_F(CardTableNCTest, DirtyAddress) { TEST_F(CardTableNCTest, DirtyAddressRangeEmpty) { char *addr = addrs.at(0); table->dirtyCardsForAddressRange(addr, addr); - EXPECT_FALSE(table->findNextDirtyCard(0, CardTable::kValidIndices)); + EXPECT_FALSE(table->findNextDirtyCard( + CardTable::kFirstUsedIndex, table->getEndIndex())); } /// Dirty an address range smaller than a single card. @@ -186,26 +189,26 @@ TEST_F(CardTableNCTest, DirtyAddressRangeLarge) { TEST_F(CardTableNCTest, Initial) { for (char *addr : addrs) { - EXPECT_FALSE(table->isCardForAddressDirty(addr)); + EXPECT_FALSE(table->isCardForAddressDirtyInLargeObj(addr)); } } TEST_F(CardTableNCTest, Clear) { for (char *addr : addrs) { - ASSERT_FALSE(table->isCardForAddressDirty(addr)); + ASSERT_FALSE(table->isCardForAddressDirtyInLargeObj(addr)); } for (char *addr : addrs) { - table->dirtyCardForAddress(addr); + table->dirtyCardForAddressInLargeObj(addr); } for (char *addr : addrs) { - ASSERT_TRUE(table->isCardForAddressDirty(addr)); + ASSERT_TRUE(table->isCardForAddressDirtyInLargeObj(addr)); } table->clear(); for (char *addr : addrs) { - EXPECT_FALSE(table->isCardForAddressDirty(addr)); + EXPECT_FALSE(table->isCardForAddressDirtyInLargeObj(addr)); } } @@ -213,8 +216,8 @@ TEST_F(CardTableNCTest, NextDirtyCardImmediate) { char *addr = addrs.at(addrs.size() / 2); size_t ind = table->addressToIndex(addr); - table->dirtyCardForAddress(addr); - auto dirty = table->findNextDirtyCard(ind, CardTable::kValidIndices); + table->dirtyCardForAddressInLargeObj(addr); + auto dirty = table->findNextDirtyCard(ind, table->getEndIndex()); ASSERT_TRUE(dirty); EXPECT_EQ(ind, *dirty); @@ -222,17 +225,18 @@ TEST_F(CardTableNCTest, NextDirtyCardImmediate) { TEST_F(CardTableNCTest, NextDirtyCard) { /// Empty case: No dirty cards - EXPECT_FALSE(table->findNextDirtyCard(0, CardTable::kValidIndices)); + EXPECT_FALSE(table->findNextDirtyCard( + CardTable::kFirstUsedIndex, table->getEndIndex())); - size_t from = 0; + size_t from = CardTable::kFirstUsedIndex; for (char *addr : addrs) { - table->dirtyCardForAddress(addr); + table->dirtyCardForAddressInLargeObj(addr); auto ind = table->addressToIndex(addr); EXPECT_FALSE(table->findNextDirtyCard(from, ind)); auto atEnd = table->findNextDirtyCard(from, ind + 1); - auto inMiddle = table->findNextDirtyCard(from, CardTable::kValidIndices); + auto inMiddle = table->findNextDirtyCard(from, table->getEndIndex()); ASSERT_TRUE(atEnd); EXPECT_EQ(ind, *atEnd); diff --git a/unittests/VMRuntime/CrashManagerTest.cpp b/unittests/VMRuntime/CrashManagerTest.cpp index 8201eb4247e..84f98055b19 100644 --- a/unittests/VMRuntime/CrashManagerTest.cpp +++ b/unittests/VMRuntime/CrashManagerTest.cpp @@ -31,7 +31,7 @@ using ::testing::MatchesRegex; namespace { // We make this not FixedSize, to allow direct allocation in the old generation. -using SegmentCell = EmptyCell; +using SegmentCell = EmptyCell; class TestCrashManager : public CrashManager { public: @@ -107,8 +107,8 @@ TEST(CrashManagerTest, HeapExtentsCorrect) { uint32_t numHeapSegmentsNumbered = 0; int32_t keyNum; for (const auto &[key, payload] : contextualCustomData) { - // Keeps track whether key represents an AlignedHeapSegment so that payload - // can be validated below. + // Keeps track whether key represents an FixedSizeHeapSegment so that + // payload can be validated below. bool validatePayload = false; if (key == ygSegmentName) { validatePayload = true; diff --git a/unittests/VMRuntime/GCBasicsTest.cpp b/unittests/VMRuntime/GCBasicsTest.cpp index 2c71a106be5..7c6c074731a 100644 --- a/unittests/VMRuntime/GCBasicsTest.cpp +++ b/unittests/VMRuntime/GCBasicsTest.cpp @@ -356,9 +356,9 @@ TEST(GCCallbackTest, TestCallbackInvoked) { } #ifndef HERMESVM_GC_MALLOC -using SegmentCell = EmptyCell; +using SegmentCell = EmptyCell; TEST(GCBasicsTestNCGen, TestIDPersistsAcrossMultipleCollections) { - constexpr size_t kHeapSizeHint = AlignedHeapSegment::maxSize() * 10; + constexpr size_t kHeapSizeHint = FixedSizeHeapSegment::maxSize() * 10; const GCConfig kGCConfig = TestGCConfigFixedSize(kHeapSizeHint); auto runtime = DummyRuntime::create(kGCConfig); diff --git a/unittests/VMRuntime/GCFragmentationTest.cpp b/unittests/VMRuntime/GCFragmentationTest.cpp index b554dcea6d4..714b1213c4f 100644 --- a/unittests/VMRuntime/GCFragmentationTest.cpp +++ b/unittests/VMRuntime/GCFragmentationTest.cpp @@ -30,15 +30,15 @@ TEST(GCFragmentationTest, TestCoalescing) { // allocate. static const size_t kNumAvailableSegments = kNumSegments + 1; static const size_t kHeapSize = - AlignedHeapSegment::maxSize() * kNumAvailableSegments; + FixedSizeHeapSegment::maxSize() * kNumAvailableSegments; static const GCConfig kGCConfig = TestGCConfigFixedSize(kHeapSize); auto runtime = DummyRuntime::create(kGCConfig); DummyRuntime &rt = *runtime; - using SixteenthCell = EmptyCell; - using EighthCell = EmptyCell; - using QuarterCell = EmptyCell; + using SixteenthCell = EmptyCell; + using EighthCell = EmptyCell; + using QuarterCell = EmptyCell; { GCScope scope(rt); diff --git a/unittests/VMRuntime/GCLazySegmentNCTest.cpp b/unittests/VMRuntime/GCLazySegmentNCTest.cpp index c90af85c5ad..26c3a2eba7c 100644 --- a/unittests/VMRuntime/GCLazySegmentNCTest.cpp +++ b/unittests/VMRuntime/GCLazySegmentNCTest.cpp @@ -27,13 +27,13 @@ struct GCLazySegmentNCTest : public ::testing::Test {}; using GCLazySegmentNCDeathTest = GCLazySegmentNCTest; -using SegmentCell = EmptyCell; +using SegmentCell = EmptyCell; -constexpr size_t kHeapSizeHint = AlignedHeapSegment::maxSize() * 10; +constexpr size_t kHeapSizeHint = FixedSizeHeapSegment::maxSize() * 10; const GCConfig kGCConfig = TestGCConfigFixedSize(kHeapSizeHint); -constexpr size_t kHeapVA = AlignedHeapSegment::storageSize() * 10; +constexpr size_t kHeapVA = FixedSizeHeapSegment::storageSize() * 10; constexpr size_t kHeapVALimited = - kHeapVA / 2 + AlignedHeapSegment::storageSize() - 1; + kHeapVA / 2 + FixedSizeHeapSegment::storageSize() - 1; /// We are able to materialize every segment. TEST_F(GCLazySegmentNCTest, MaterializeAll) { diff --git a/unittests/VMRuntime/GCOOMTest.cpp b/unittests/VMRuntime/GCOOMTest.cpp index 87f521074ff..22e7b048cf0 100644 --- a/unittests/VMRuntime/GCOOMTest.cpp +++ b/unittests/VMRuntime/GCOOMTest.cpp @@ -34,10 +34,10 @@ static void exceedMaxHeap( GCConfig::Builder baseConfig = kTestGCConfigBaseBuilder) { static constexpr size_t kSegments = 10; static constexpr size_t kHeapSizeHint = - AlignedHeapSegment::maxSize() * kSegments; + FixedSizeHeapSegment::maxSize() * kSegments; // Only one of these cells will fit into a segment, with the maximum amount of // space wasted in the segment. - using AwkwardCell = EmptyCell; + using AwkwardCell = EmptyCell; auto runtime = DummyRuntime::create(TestGCConfigFixedSize(kHeapSizeHint, baseConfig)); diff --git a/unittests/VMRuntime/GCReturnUnusedMemoryTest.cpp b/unittests/VMRuntime/GCReturnUnusedMemoryTest.cpp index 022373a9b1a..c257078971b 100644 --- a/unittests/VMRuntime/GCReturnUnusedMemoryTest.cpp +++ b/unittests/VMRuntime/GCReturnUnusedMemoryTest.cpp @@ -32,7 +32,7 @@ TEST(GCReturnUnusedMemoryTest, CollectReturnsFreeMemory) { DummyRuntime &rt = *runtime; auto &gc = rt.getHeap(); - using SemiCell = EmptyCell; + using SemiCell = EmptyCell; llvh::ErrorOr before = 0; { diff --git a/unittests/VMRuntime/MarkBitArrayNCTest.cpp b/unittests/VMRuntime/MarkBitArrayNCTest.cpp index 455c1996fb1..efbd975c651 100644 --- a/unittests/VMRuntime/MarkBitArrayNCTest.cpp +++ b/unittests/VMRuntime/MarkBitArrayNCTest.cpp @@ -20,16 +20,24 @@ #include using namespace hermes::vm; -using MarkBitArray = AlignedHeapSegment::Contents::MarkBitArray; +using MarkBitArray = FixedSizeHeapSegment::Contents::MarkBitArray; namespace { struct MarkBitArrayTest : public ::testing::Test { MarkBitArrayTest(); + size_t addressToMarkBitArrayIndex(const void *addr) { + // Since we only test FixedSizeHeapSegment in this file, it's safe to cast + // address in the segment to a GCCell pointer (i.e., we can always compute + // the correct segment start address from this pointer). + auto *cell = reinterpret_cast(addr); + return seg.addressToMarkBitArrayIndex(cell); + } + protected: std::unique_ptr provider; - AlignedHeapSegment seg; + FixedSizeHeapSegment seg; MarkBitArray &mba; // Addresses in the aligned storage to interact w ith during the tests. @@ -38,7 +46,7 @@ struct MarkBitArrayTest : public ::testing::Test { MarkBitArrayTest::MarkBitArrayTest() : provider(StorageProvider::mmapProvider()), - seg{std::move(AlignedHeapSegment::create(provider.get()).get())}, + seg{std::move(FixedSizeHeapSegment::create(provider.get()).get())}, mba(seg.markBitArray()) { auto first = seg.lowLim(); auto last = reinterpret_cast( @@ -66,7 +74,7 @@ TEST_F(MarkBitArrayTest, AddressToIndex) { char *addr = addrs.at(i); size_t ind = indices.at(i); - EXPECT_EQ(ind, AlignedHeapSegment::addressToMarkBitArrayIndex(addr)) + EXPECT_EQ(ind, addressToMarkBitArrayIndex(addr)) << "0x" << std::hex << (void *)addr << " -> " << ind; char *toAddr = seg.lowLim() + (ind << LogHeapAlign); EXPECT_EQ(toAddr, addr) @@ -78,7 +86,7 @@ TEST_F(MarkBitArrayTest, MarkGet) { const size_t lastIx = mba.size() - 1; for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); EXPECT_FALSE(ind > 0 && mba.at(ind - 1)) << "initial " << ind << " - 1"; EXPECT_FALSE(mba.at(ind)) << "initial " << ind; @@ -97,37 +105,37 @@ TEST_F(MarkBitArrayTest, MarkGet) { TEST_F(MarkBitArrayTest, Initial) { for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); EXPECT_FALSE(mba.at(ind)); } } TEST_F(MarkBitArrayTest, Clear) { for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); ASSERT_FALSE(mba.at(ind)); } for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(ind, true); } for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); ASSERT_TRUE(mba.at(ind)); } mba.reset(); for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); EXPECT_FALSE(mba.at(ind)); } } TEST_F(MarkBitArrayTest, NextMarkedBitImmediate) { char *addr = addrs.at(addrs.size() / 2); - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(ind, true); EXPECT_EQ(ind, mba.findNextSetBitFrom(ind)); @@ -140,7 +148,7 @@ TEST_F(MarkBitArrayTest, NextMarkedBit) { EXPECT_EQ(FOUND_NONE, mba.findNextSetBitFrom(0)); std::queue indices; for (char *addr : addrs) { - auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + auto ind = addressToMarkBitArrayIndex(addr); mba.set(ind, true); indices.push(ind); } @@ -154,7 +162,7 @@ TEST_F(MarkBitArrayTest, NextMarkedBit) { TEST_F(MarkBitArrayTest, NextUnmarkedBitImmediate) { char *addr = addrs.at(addrs.size() / 2); - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(); mba.set(ind, false); EXPECT_EQ(ind, mba.findNextZeroBitFrom(ind)); @@ -167,7 +175,7 @@ TEST_F(MarkBitArrayTest, NextUnmarkedBit) { EXPECT_EQ(FOUND_NONE, mba.findNextZeroBitFrom(0)); std::queue indices; for (char *addr : addrs) { - auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + auto ind = addressToMarkBitArrayIndex(addr); mba.set(ind, false); indices.push(ind); } @@ -182,7 +190,7 @@ TEST_F(MarkBitArrayTest, NextUnmarkedBit) { TEST_F(MarkBitArrayTest, PrevMarkedBitImmediate) { char *addr = addrs.at(addrs.size() / 2); - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(ind, true); EXPECT_EQ(ind, mba.findPrevSetBitFrom(ind + 1)); } @@ -196,7 +204,7 @@ TEST_F(MarkBitArrayTest, PrevMarkedBit) { std::queue indices; size_t addrIdx = addrs.size(); while (addrIdx-- > 0) { - auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addrs[addrIdx]); + auto ind = addressToMarkBitArrayIndex(addrs[addrIdx]); mba.set(ind, true); indices.push(ind); } @@ -209,7 +217,7 @@ TEST_F(MarkBitArrayTest, PrevMarkedBit) { TEST_F(MarkBitArrayTest, PrevUnmarkedBitImmediate) { char *addr = addrs.at(addrs.size() / 2); - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(); mba.set(ind, false); EXPECT_EQ(ind, mba.findPrevZeroBitFrom(ind + 1)); @@ -225,7 +233,7 @@ TEST_F(MarkBitArrayTest, PrevUnmarkedBit) { std::queue indices; size_t addrIdx = addrs.size(); while (addrIdx-- > 0) { - auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addrs[addrIdx]); + auto ind = addressToMarkBitArrayIndex(addrs[addrIdx]); mba.set(ind, false); indices.push(ind); } diff --git a/unittests/VMRuntime/StorageProviderTest.cpp b/unittests/VMRuntime/StorageProviderTest.cpp index e189bcabce0..3a5f8b05ece 100644 --- a/unittests/VMRuntime/StorageProviderTest.cpp +++ b/unittests/VMRuntime/StorageProviderTest.cpp @@ -12,8 +12,6 @@ #include "hermes/VM/AlignedHeapSegment.h" #include "hermes/VM/LimitedStorageProvider.h" -#include "llvh/ADT/STLExtras.h" - using namespace hermes; using namespace hermes::vm; @@ -24,8 +22,8 @@ struct NullStorageProvider : public StorageProvider { static std::unique_ptr create(); protected: - llvh::ErrorOr newStorageImpl(const char *) override; - void deleteStorageImpl(void *) override; + llvh::ErrorOr newStorageImpl(size_t sz, const char *) override; + void deleteStorageImpl(void *, size_t sz) override; }; /* static */ @@ -33,7 +31,9 @@ std::unique_ptr NullStorageProvider::create() { return std::make_unique(); } -llvh::ErrorOr NullStorageProvider::newStorageImpl(const char *) { +llvh::ErrorOr NullStorageProvider::newStorageImpl( + size_t sz, + const char *) { // Doesn't matter what code is returned here. return make_error_code(OOMError::TestVMLimitReached); } @@ -43,33 +43,43 @@ enum StorageProviderType { ContiguousVAProvider, }; +struct StorageProviderParam { + StorageProviderType providerType; + size_t storageSize; + size_t vaSize; +}; + static std::unique_ptr GetStorageProvider( - StorageProviderType type) { + StorageProviderType type, + size_t vaSize) { switch (type) { case MmapProvider: return StorageProvider::mmapProvider(); case ContiguousVAProvider: - return StorageProvider::contiguousVAProvider( - AlignedHeapSegment::storageSize()); + return StorageProvider::contiguousVAProvider(vaSize); default: return nullptr; } } class StorageProviderTest - : public ::testing::TestWithParam {}; + : public ::testing::TestWithParam {}; -void NullStorageProvider::deleteStorageImpl(void *) {} +void NullStorageProvider::deleteStorageImpl(void *, size_t sz) {} + +/// Minimum segment storage size. +static constexpr size_t SIZE = FixedSizeHeapSegment::storageSize(); TEST_P(StorageProviderTest, StorageProviderSucceededAllocsLogCount) { - auto provider{GetStorageProvider(GetParam())}; + auto ¶ms = GetParam(); + auto provider{GetStorageProvider(params.providerType, params.vaSize)}; ASSERT_EQ(0, provider->numSucceededAllocs()); ASSERT_EQ(0, provider->numFailedAllocs()); ASSERT_EQ(0, provider->numDeletedAllocs()); ASSERT_EQ(0, provider->numLiveAllocs()); - auto result = provider->newStorage("Test"); + auto result = provider->newStorage(params.storageSize, "Test"); ASSERT_TRUE(result); void *s = result.get(); @@ -78,7 +88,7 @@ TEST_P(StorageProviderTest, StorageProviderSucceededAllocsLogCount) { EXPECT_EQ(0, provider->numDeletedAllocs()); EXPECT_EQ(1, provider->numLiveAllocs()); - provider->deleteStorage(s); + provider->deleteStorage(s, params.storageSize); EXPECT_EQ(1, provider->numSucceededAllocs()); EXPECT_EQ(0, provider->numFailedAllocs()); @@ -94,7 +104,7 @@ TEST(StorageProviderTest, StorageProviderFailedAllocsLogCount) { ASSERT_EQ(0, provider->numDeletedAllocs()); ASSERT_EQ(0, provider->numLiveAllocs()); - auto result = provider->newStorage("Test"); + auto result = provider->newStorage(SIZE, "Test"); ASSERT_FALSE(result); EXPECT_EQ(0, provider->numSucceededAllocs()); @@ -107,20 +117,20 @@ TEST(StorageProviderTest, LimitedStorageProviderEnforce) { constexpr size_t LIM = 2; LimitedStorageProvider provider{ StorageProvider::mmapProvider(), - AlignedHeapSegment::storageSize() * LIM, + SIZE * LIM, }; void *live[LIM]; for (size_t i = 0; i < LIM; ++i) { - auto result = provider.newStorage("Live"); + auto result = provider.newStorage(SIZE, "Live"); ASSERT_TRUE(result); live[i] = result.get(); } - EXPECT_FALSE(provider.newStorage("Dead")); + EXPECT_FALSE(provider.newStorage(SIZE, "Dead")); // Clean-up for (auto s : live) { - provider.deleteStorage(s); + provider.deleteStorage(s, SIZE); } } @@ -128,16 +138,16 @@ TEST(StorageProviderTest, LimitedStorageProviderTrackDelete) { constexpr size_t LIM = 2; LimitedStorageProvider provider{ StorageProvider::mmapProvider(), - AlignedHeapSegment::storageSize() * LIM, + SIZE * LIM, }; // If the storage gets deleted, we should be able to re-allocate it, even if // the total number of allocations exceeds the limit. for (size_t i = 0; i < LIM + 1; ++i) { - auto result = provider.newStorage("Live"); + auto result = provider.newStorage(SIZE, "Live"); ASSERT_TRUE(result); auto *s = result.get(); - provider.deleteStorage(s); + provider.deleteStorage(s, SIZE); } } @@ -145,13 +155,13 @@ TEST(StorageProviderTest, LimitedStorageProviderDeleteNull) { constexpr size_t LIM = 2; LimitedStorageProvider provider{ StorageProvider::mmapProvider(), - AlignedHeapSegment::storageSize() * LIM, + SIZE * LIM, }; void *live[LIM]; for (size_t i = 0; i < LIM; ++i) { - auto result = provider.newStorage("Live"); + auto result = provider.newStorage(SIZE, "Live"); ASSERT_TRUE(result); live[i] = result.get(); } @@ -159,27 +169,25 @@ TEST(StorageProviderTest, LimitedStorageProviderDeleteNull) { // The allocations should fail because we have hit the limit, and the // deletions should not affect the limit, because they are of null storages. for (size_t i = 0; i < 2; ++i) { - auto result = provider.newStorage("Live"); + auto result = provider.newStorage(SIZE, "Live"); EXPECT_FALSE(result); } // Clean-up for (auto s : live) { - provider.deleteStorage(s); + provider.deleteStorage(s, SIZE); } } TEST(StorageProviderTest, StorageProviderAllocsCount) { constexpr size_t LIM = 2; - auto provider = - std::unique_ptr{new LimitedStorageProvider{ - StorageProvider::mmapProvider(), - AlignedHeapSegment::storageSize() * LIM}}; + auto provider = std::unique_ptr{ + new LimitedStorageProvider{StorageProvider::mmapProvider(), SIZE * LIM}}; constexpr size_t FAILS = 3; void *storages[LIM]; for (size_t i = 0; i < LIM; ++i) { - auto result = provider->newStorage(); + auto result = provider->newStorage(SIZE); ASSERT_TRUE(result); storages[i] = result.get(); } @@ -188,7 +196,7 @@ TEST(StorageProviderTest, StorageProviderAllocsCount) { EXPECT_EQ(LIM, provider->numLiveAllocs()); for (size_t i = 0; i < FAILS; ++i) { - auto result = provider->newStorage(); + auto result = provider->newStorage(SIZE); ASSERT_FALSE(result); } @@ -196,21 +204,66 @@ TEST(StorageProviderTest, StorageProviderAllocsCount) { // Clean-up for (auto s : storages) { - provider->deleteStorage(s); + provider->deleteStorage(s, SIZE); } EXPECT_EQ(0, provider->numLiveAllocs()); EXPECT_EQ(LIM, provider->numDeletedAllocs()); } +/// Testing that the ContiguousProvider allocates and deallocates as intended, +/// which is to always allocate at lowest-address free space and correctly free +/// the space when the storage is deleted. +TEST(StorageProviderTest, ContiguousProviderTest) { + auto provider = + GetStorageProvider(StorageProviderType::ContiguousVAProvider, SIZE * 10); + + size_t sz1 = SIZE * 5; + auto result = provider->newStorage(sz1); + ASSERT_TRUE(result); + auto *s1 = *result; + + size_t sz2 = SIZE * 3; + result = provider->newStorage(sz2); + ASSERT_TRUE(result); + auto *s2 = *result; + + size_t sz3 = SIZE * 3; + result = provider->newStorage(sz3); + ASSERT_FALSE(result); + + provider->deleteStorage(s1, sz1); + + result = provider->newStorage(sz3); + ASSERT_TRUE(result); + auto *s3 = *result; + + size_t sz4 = SIZE * 2; + result = provider->newStorage(sz4); + ASSERT_TRUE(result); + auto *s4 = *result; + + result = provider->newStorage(sz4); + ASSERT_TRUE(result); + auto *s5 = *result; + + provider->deleteStorage(s2, sz2); + provider->deleteStorage(s3, sz3); + provider->deleteStorage(s4, sz4); + provider->deleteStorage(s5, sz4); +} + /// StorageGuard will free storage on scope exit. class StorageGuard final { public: - StorageGuard(std::shared_ptr provider, void *storage) - : provider_(std::move(provider)), storage_(storage) {} + StorageGuard( + std::shared_ptr provider, + void *storage, + size_t sz) + : provider_(std::move(provider)), storage_(storage), sz_(sz) {} ~StorageGuard() { - provider_->deleteStorage(storage_); + provider_->deleteStorage(storage_, sz_); } void *raw() const { @@ -220,6 +273,7 @@ class StorageGuard final { private: std::shared_ptr provider_; void *storage_; + size_t sz_; }; #ifndef NDEBUG @@ -235,8 +289,8 @@ class SetVALimit final { } }; -static const size_t KB = 1 << 10; -static const size_t MB = KB * KB; +static constexpr size_t KB = 1 << 10; +static constexpr size_t MB = KB * KB; TEST(StorageProviderTest, SucceedsWithoutReducing) { // Should succeed without reducing the size at all. @@ -261,16 +315,13 @@ TEST(StorageProviderTest, SucceedsAfterReducing) { } { // Test using the aligned storage alignment - SetVALimit limit{50 * AlignedHeapSegment::storageSize()}; - auto result = vmAllocateAllowLess( - 100 * AlignedHeapSegment::storageSize(), - 30 * AlignedHeapSegment::storageSize(), - AlignedHeapSegment::storageSize()); + SetVALimit limit{50 * SIZE}; + auto result = vmAllocateAllowLess(100 * SIZE, 30 * SIZE, SIZE); ASSERT_TRUE(result); auto memAndSize = result.get(); EXPECT_TRUE(memAndSize.first != nullptr); - EXPECT_GE(memAndSize.second, 30 * AlignedHeapSegment::storageSize()); - EXPECT_LE(memAndSize.second, 50 * AlignedHeapSegment::storageSize()); + EXPECT_GE(memAndSize.second, 30 * SIZE); + EXPECT_LE(memAndSize.second, 50 * SIZE); } } @@ -282,11 +333,14 @@ TEST(StorageProviderTest, FailsDueToLimitLowerThanMin) { } TEST_P(StorageProviderTest, VirtualMemoryFreed) { - SetVALimit limit{10 * MB}; + SetVALimit limit{25 * MB}; + auto ¶ms = GetParam(); for (size_t i = 0; i < 20; i++) { - std::shared_ptr sp = GetStorageProvider(GetParam()); - StorageGuard sg{sp, *sp->newStorage()}; + std::shared_ptr sp = + GetStorageProvider(params.providerType, params.vaSize); + StorageGuard sg{ + sp, *sp->newStorage(params.storageSize), params.storageSize}; } } @@ -295,6 +349,17 @@ TEST_P(StorageProviderTest, VirtualMemoryFreed) { INSTANTIATE_TEST_CASE_P( StorageProviderTests, StorageProviderTest, - ::testing::Values(MmapProvider, ContiguousVAProvider)); + ::testing::Values( + StorageProviderParam{ + MmapProvider, + SIZE, + 0, + }, + StorageProviderParam{ + ContiguousVAProvider, + SIZE, + SIZE, + }, + StorageProviderParam{ContiguousVAProvider, SIZE * 5, SIZE * 5})); } // namespace