Update Files

This commit is contained in:
2025-01-22 17:22:38 +01:00
parent 89b9349629
commit 4c5e729485
5132 changed files with 1195369 additions and 0 deletions

View File

@ -0,0 +1,65 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef INCLUDE_CPPGC_INTERNAL_API_CONSTANTS_H_
#define INCLUDE_CPPGC_INTERNAL_API_CONSTANTS_H_
#include <cstddef>
#include <cstdint>
#include "v8config.h" // NOLINT(build/include_directory)
namespace cppgc {
namespace internal {
// Embedders should not rely on this code!
// Internal constants to avoid exposing internal types on the API surface.
namespace api_constants {
constexpr size_t kKB = 1024;
constexpr size_t kMB = kKB * 1024;
constexpr size_t kGB = kMB * 1024;
// Offset of the uint16_t bitfield from the payload contaning the
// in-construction bit. This is subtracted from the payload pointer to get
// to the right bitfield.
static constexpr size_t kFullyConstructedBitFieldOffsetFromPayload =
2 * sizeof(uint16_t);
// Mask for in-construction bit.
static constexpr uint16_t kFullyConstructedBitMask = uint16_t{1};
static constexpr size_t kPageSize = size_t{1} << 17;
#if defined(V8_TARGET_ARCH_ARM64) && defined(V8_OS_MACOS)
constexpr size_t kGuardPageSize = 0;
#else
constexpr size_t kGuardPageSize = 4096;
#endif
static constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2;
#if defined(CPPGC_CAGED_HEAP)
#if defined(CPPGC_2GB_CAGE)
constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(2) * kGB;
#else // !defined(CPPGC_2GB_CAGE)
constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(4) * kGB;
#endif // !defined(CPPGC_2GB_CAGE)
constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize;
#endif // defined(CPPGC_CAGED_HEAP)
static constexpr size_t kDefaultAlignment = sizeof(void*);
// Maximum support alignment for a type as in `alignof(T)`.
static constexpr size_t kMaxSupportedAlignment = 2 * kDefaultAlignment;
// Granularity of heap allocations.
constexpr size_t kAllocationGranularity = sizeof(void*);
} // namespace api_constants
} // namespace internal
} // namespace cppgc
#endif // INCLUDE_CPPGC_INTERNAL_API_CONSTANTS_H_

View File

@ -0,0 +1,48 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef INCLUDE_CPPGC_INTERNAL_ATOMIC_ENTRY_FLAG_H_
#define INCLUDE_CPPGC_INTERNAL_ATOMIC_ENTRY_FLAG_H_
#include <atomic>
namespace cppgc {
namespace internal {
// A flag which provides a fast check whether a scope may be entered on the
// current thread, without needing to access thread-local storage or mutex. Can
// have false positives (i.e., spuriously report that it might be entered), so
// it is expected that this will be used in tandem with a precise check that the
// scope is in fact entered on that thread.
//
// Example:
// g_frobnicating_flag.MightBeEntered() &&
// ThreadLocalFrobnicator().IsFrobnicating()
//
// Relaxed atomic operations are sufficient, since:
// - all accesses remain atomic
// - each thread must observe its own operations in order
// - no thread ever exits the flag more times than it enters (if used correctly)
// And so if a thread observes zero, it must be because it has observed an equal
// number of exits as entries.
class AtomicEntryFlag final {
public:
void Enter() { entries_.fetch_add(1, std::memory_order_relaxed); }
void Exit() { entries_.fetch_sub(1, std::memory_order_relaxed); }
// Returns false only if the current thread is not between a call to Enter
// and a call to Exit. Returns true if this thread or another thread may
// currently be in the scope guarded by this flag.
bool MightBeEntered() const {
return entries_.load(std::memory_order_relaxed) != 0;
}
private:
std::atomic_int entries_{0};
};
} // namespace internal
} // namespace cppgc
#endif // INCLUDE_CPPGC_INTERNAL_ATOMIC_ENTRY_FLAG_H_

View File

@ -0,0 +1,45 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef INCLUDE_CPPGC_INTERNAL_BASE_PAGE_HANDLE_H_
#define INCLUDE_CPPGC_INTERNAL_BASE_PAGE_HANDLE_H_
#include "cppgc/heap-handle.h"
#include "cppgc/internal/api-constants.h"
#include "cppgc/internal/logging.h"
#include "v8config.h" // NOLINT(build/include_directory)
namespace cppgc {
namespace internal {
// The class is needed in the header to allow for fast access to HeapHandle in
// the write barrier.
class BasePageHandle {
public:
static V8_INLINE BasePageHandle* FromPayload(void* payload) {
return reinterpret_cast<BasePageHandle*>(
(reinterpret_cast<uintptr_t>(payload) &
~(api_constants::kPageSize - 1)) +
api_constants::kGuardPageSize);
}
static V8_INLINE const BasePageHandle* FromPayload(const void* payload) {
return FromPayload(const_cast<void*>(payload));
}
HeapHandle& heap_handle() { return heap_handle_; }
const HeapHandle& heap_handle() const { return heap_handle_; }
protected:
explicit BasePageHandle(HeapHandle& heap_handle) : heap_handle_(heap_handle) {
CPPGC_DCHECK(reinterpret_cast<uintptr_t>(this) % api_constants::kPageSize ==
api_constants::kGuardPageSize);
}
HeapHandle& heap_handle_;
};
} // namespace internal
} // namespace cppgc
#endif // INCLUDE_CPPGC_INTERNAL_BASE_PAGE_HANDLE_H_

View File

@ -0,0 +1,111 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_LOCAL_DATA_H_
#define INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_LOCAL_DATA_H_
#include <array>
#include <cstddef>
#include <cstdint>
#include "cppgc/internal/api-constants.h"
#include "cppgc/internal/caged-heap.h"
#include "cppgc/internal/logging.h"
#include "cppgc/platform.h"
#include "v8config.h" // NOLINT(build/include_directory)
#if __cpp_lib_bitopts
#include <bit>
#endif // __cpp_lib_bitopts
#if defined(CPPGC_CAGED_HEAP)
namespace cppgc {
namespace internal {
class HeapBase;
class HeapBaseHandle;
#if defined(CPPGC_YOUNG_GENERATION)
// AgeTable is the bytemap needed for the fast generation check in the write
// barrier. AgeTable contains entries that correspond to 4096 bytes memory
// regions (cards). Each entry in the table represents generation of the objects
// that reside on the corresponding card (young, old or mixed).
class V8_EXPORT AgeTable final {
static constexpr size_t kRequiredSize = 1 * api_constants::kMB;
static constexpr size_t kAllocationGranularity =
api_constants::kAllocationGranularity;
public:
// Represents age of the objects living on a single card.
enum class Age : uint8_t { kOld, kYoung, kMixed };
// When setting age for a range, consider or ignore ages of the adjacent
// cards.
enum class AdjacentCardsPolicy : uint8_t { kConsider, kIgnore };
static constexpr size_t kCardSizeInBytes =
api_constants::kCagedHeapReservationSize / kRequiredSize;
void SetAge(uintptr_t cage_offset, Age age) {
table_[card(cage_offset)] = age;
}
V8_INLINE Age GetAge(uintptr_t cage_offset) const {
return table_[card(cage_offset)];
}
void SetAgeForRange(uintptr_t cage_offset_begin, uintptr_t cage_offset_end,
Age age, AdjacentCardsPolicy adjacent_cards_policy);
Age GetAgeForRange(uintptr_t cage_offset_begin,
uintptr_t cage_offset_end) const;
void ResetForTesting();
private:
V8_INLINE size_t card(uintptr_t offset) const {
constexpr size_t kGranularityBits =
#if __cpp_lib_bitopts
std::countr_zero(static_cast<uint32_t>(kCardSizeInBytes));
#elif V8_HAS_BUILTIN_CTZ
__builtin_ctz(static_cast<uint32_t>(kCardSizeInBytes));
#else //! V8_HAS_BUILTIN_CTZ
// Hardcode and check with assert.
#if defined(CPPGC_2GB_CAGE)
11;
#else // !defined(CPPGC_2GB_CAGE)
12;
#endif // !defined(CPPGC_2GB_CAGE)
#endif // !V8_HAS_BUILTIN_CTZ
static_assert((1 << kGranularityBits) == kCardSizeInBytes);
const size_t entry = offset >> kGranularityBits;
CPPGC_DCHECK(table_.size() > entry);
return entry;
}
std::array<Age, kRequiredSize> table_;
};
static_assert(sizeof(AgeTable) == 1 * api_constants::kMB,
"Size of AgeTable is 1MB");
#endif // CPPGC_YOUNG_GENERATION
struct CagedHeapLocalData final {
V8_INLINE static CagedHeapLocalData& Get() {
return *reinterpret_cast<CagedHeapLocalData*>(CagedHeapBase::GetBase());
}
#if defined(CPPGC_YOUNG_GENERATION)
AgeTable age_table;
#endif
};
} // namespace internal
} // namespace cppgc
#endif // defined(CPPGC_CAGED_HEAP)
#endif // INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_LOCAL_DATA_H_

View File

@ -0,0 +1,61 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_H_
#define INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_H_
#include <climits>
#include <cstddef>
#include "cppgc/internal/api-constants.h"
#include "cppgc/internal/base-page-handle.h"
#include "v8config.h" // NOLINT(build/include_directory)
#if defined(CPPGC_CAGED_HEAP)
namespace cppgc {
namespace internal {
class V8_EXPORT CagedHeapBase {
public:
V8_INLINE static uintptr_t OffsetFromAddress(const void* address) {
return reinterpret_cast<uintptr_t>(address) &
(api_constants::kCagedHeapReservationAlignment - 1);
}
V8_INLINE static bool IsWithinCage(const void* address) {
CPPGC_DCHECK(g_heap_base_);
return (reinterpret_cast<uintptr_t>(address) &
~(api_constants::kCagedHeapReservationAlignment - 1)) ==
g_heap_base_;
}
V8_INLINE static bool AreWithinCage(const void* addr1, const void* addr2) {
#if defined(CPPGC_2GB_CAGE)
static constexpr size_t kHalfWordShift = sizeof(uint32_t) * CHAR_BIT - 1;
#else //! defined(CPPGC_2GB_CAGE)
static constexpr size_t kHalfWordShift = sizeof(uint32_t) * CHAR_BIT;
#endif //! defined(CPPGC_2GB_CAGE)
static_assert((static_cast<size_t>(1) << kHalfWordShift) ==
api_constants::kCagedHeapReservationSize);
CPPGC_DCHECK(g_heap_base_);
return !(((reinterpret_cast<uintptr_t>(addr1) ^ g_heap_base_) |
(reinterpret_cast<uintptr_t>(addr2) ^ g_heap_base_)) >>
kHalfWordShift);
}
V8_INLINE static uintptr_t GetBase() { return g_heap_base_; }
private:
friend class CagedHeap;
static uintptr_t g_heap_base_;
};
} // namespace internal
} // namespace cppgc
#endif // defined(CPPGC_CAGED_HEAP)
#endif // INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_H_

View File

@ -0,0 +1,38 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef INCLUDE_CPPGC_INTERNAL_COMPILER_SPECIFIC_H_
#define INCLUDE_CPPGC_INTERNAL_COMPILER_SPECIFIC_H_
namespace cppgc {
#if defined(__has_attribute)
#define CPPGC_HAS_ATTRIBUTE(FEATURE) __has_attribute(FEATURE)
#else
#define CPPGC_HAS_ATTRIBUTE(FEATURE) 0
#endif
#if defined(__has_cpp_attribute)
#define CPPGC_HAS_CPP_ATTRIBUTE(FEATURE) __has_cpp_attribute(FEATURE)
#else
#define CPPGC_HAS_CPP_ATTRIBUTE(FEATURE) 0
#endif
// [[no_unique_address]] comes in C++20 but supported in clang with -std >=
// c++11.
#if CPPGC_HAS_CPP_ATTRIBUTE(no_unique_address)
#define CPPGC_NO_UNIQUE_ADDRESS [[no_unique_address]]
#else
#define CPPGC_NO_UNIQUE_ADDRESS
#endif
#if CPPGC_HAS_ATTRIBUTE(unused)
#define CPPGC_UNUSED __attribute__((unused))
#else
#define CPPGC_UNUSED
#endif
} // namespace cppgc
#endif // INCLUDE_CPPGC_INTERNAL_COMPILER_SPECIFIC_H_

View File

@ -0,0 +1,93 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef INCLUDE_CPPGC_INTERNAL_FINALIZER_TRAIT_H_
#define INCLUDE_CPPGC_INTERNAL_FINALIZER_TRAIT_H_
#include <type_traits>
#include "cppgc/type-traits.h"
namespace cppgc {
namespace internal {
using FinalizationCallback = void (*)(void*);
template <typename T, typename = void>
struct HasFinalizeGarbageCollectedObject : std::false_type {};
template <typename T>
struct HasFinalizeGarbageCollectedObject<
T,
std::void_t<decltype(std::declval<T>().FinalizeGarbageCollectedObject())>>
: std::true_type {};
// The FinalizerTraitImpl specifies how to finalize objects.
template <typename T, bool isFinalized>
struct FinalizerTraitImpl;
template <typename T>
struct FinalizerTraitImpl<T, true> {
private:
// Dispatch to custom FinalizeGarbageCollectedObject().
struct Custom {
static void Call(void* obj) {
static_cast<T*>(obj)->FinalizeGarbageCollectedObject();
}
};
// Dispatch to regular destructor.
struct Destructor {
static void Call(void* obj) { static_cast<T*>(obj)->~T(); }
};
using FinalizeImpl =
std::conditional_t<HasFinalizeGarbageCollectedObject<T>::value, Custom,
Destructor>;
public:
static void Finalize(void* obj) {
static_assert(sizeof(T), "T must be fully defined");
FinalizeImpl::Call(obj);
}
};
template <typename T>
struct FinalizerTraitImpl<T, false> {
static void Finalize(void* obj) {
static_assert(sizeof(T), "T must be fully defined");
}
};
// The FinalizerTrait is used to determine if a type requires finalization and
// what finalization means.
template <typename T>
struct FinalizerTrait {
private:
// Object has a finalizer if it has
// - a custom FinalizeGarbageCollectedObject method, or
// - a destructor.
static constexpr bool kNonTrivialFinalizer =
internal::HasFinalizeGarbageCollectedObject<T>::value ||
!std::is_trivially_destructible<typename std::remove_cv<T>::type>::value;
static void Finalize(void* obj) {
internal::FinalizerTraitImpl<T, kNonTrivialFinalizer>::Finalize(obj);
}
public:
static constexpr bool HasFinalizer() { return kNonTrivialFinalizer; }
// The callback used to finalize an object of type T.
static constexpr FinalizationCallback kCallback =
kNonTrivialFinalizer ? Finalize : nullptr;
};
template <typename T>
constexpr FinalizationCallback FinalizerTrait<T>::kCallback;
} // namespace internal
} // namespace cppgc
#endif // INCLUDE_CPPGC_INTERNAL_FINALIZER_TRAIT_H_

View File

@ -0,0 +1,155 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef INCLUDE_CPPGC_INTERNAL_GC_INFO_H_
#define INCLUDE_CPPGC_INTERNAL_GC_INFO_H_
#include <atomic>
#include <cstdint>
#include <type_traits>
#include "cppgc/internal/finalizer-trait.h"
#include "cppgc/internal/name-trait.h"
#include "cppgc/trace-trait.h"
#include "v8config.h" // NOLINT(build/include_directory)
namespace cppgc {
namespace internal {
using GCInfoIndex = uint16_t;
struct V8_EXPORT EnsureGCInfoIndexTrait final {
// Acquires a new GC info object and returns the index. In addition, also
// updates `registered_index` atomically.
template <typename T>
V8_INLINE static GCInfoIndex EnsureIndex(
std::atomic<GCInfoIndex>& registered_index) {
return EnsureGCInfoIndexTraitDispatch<T>{}(registered_index);
}
private:
template <typename T, bool = std::is_polymorphic<T>::value,
bool = FinalizerTrait<T>::HasFinalizer(),
bool = NameTrait<T>::HasNonHiddenName()>
struct EnsureGCInfoIndexTraitDispatch;
static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback,
FinalizationCallback,
NameCallback);
static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback,
FinalizationCallback);
static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback, NameCallback);
static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback);
static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback,
FinalizationCallback,
NameCallback);
static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback,
FinalizationCallback);
static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback,
NameCallback);
static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback);
};
#define DISPATCH(is_polymorphic, has_finalizer, has_non_hidden_name, function) \
template <typename T> \
struct EnsureGCInfoIndexTrait::EnsureGCInfoIndexTraitDispatch< \
T, is_polymorphic, has_finalizer, has_non_hidden_name> { \
V8_INLINE GCInfoIndex \
operator()(std::atomic<GCInfoIndex>& registered_index) { \
return function; \
} \
};
// --------------------------------------------------------------------- //
// DISPATCH(is_polymorphic, has_finalizer, has_non_hidden_name, function)
// --------------------------------------------------------------------- //
DISPATCH(true, true, true, //
EnsureGCInfoIndexPolymorphic(registered_index, //
TraceTrait<T>::Trace, //
FinalizerTrait<T>::kCallback, //
NameTrait<T>::GetName)) //
DISPATCH(true, true, false, //
EnsureGCInfoIndexPolymorphic(registered_index, //
TraceTrait<T>::Trace, //
FinalizerTrait<T>::kCallback)) //
DISPATCH(true, false, true, //
EnsureGCInfoIndexPolymorphic(registered_index, //
TraceTrait<T>::Trace, //
NameTrait<T>::GetName)) //
DISPATCH(true, false, false, //
EnsureGCInfoIndexPolymorphic(registered_index, //
TraceTrait<T>::Trace)) //
DISPATCH(false, true, true, //
EnsureGCInfoIndexNonPolymorphic(registered_index, //
TraceTrait<T>::Trace, //
FinalizerTrait<T>::kCallback, //
NameTrait<T>::GetName)) //
DISPATCH(false, true, false, //
EnsureGCInfoIndexNonPolymorphic(registered_index, //
TraceTrait<T>::Trace, //
FinalizerTrait<T>::kCallback)) //
DISPATCH(false, false, true, //
EnsureGCInfoIndexNonPolymorphic(registered_index, //
TraceTrait<T>::Trace, //
NameTrait<T>::GetName)) //
DISPATCH(false, false, false, //
EnsureGCInfoIndexNonPolymorphic(registered_index, //
TraceTrait<T>::Trace)) //
#undef DISPATCH
// Fold types based on finalizer behavior. Note that finalizer characteristics
// align with trace behavior, i.e., destructors are virtual when trace methods
// are and vice versa.
template <typename T, typename ParentMostGarbageCollectedType>
struct GCInfoFolding {
static constexpr bool kHasVirtualDestructorAtBase =
std::has_virtual_destructor<ParentMostGarbageCollectedType>::value;
static constexpr bool kBothTypesAreTriviallyDestructible =
std::is_trivially_destructible<ParentMostGarbageCollectedType>::value &&
std::is_trivially_destructible<T>::value;
static constexpr bool kHasCustomFinalizerDispatchAtBase =
internal::HasFinalizeGarbageCollectedObject<
ParentMostGarbageCollectedType>::value;
#ifdef CPPGC_SUPPORTS_OBJECT_NAMES
static constexpr bool kWantsDetailedObjectNames = true;
#else // !CPPGC_SUPPORTS_OBJECT_NAMES
static constexpr bool kWantsDetailedObjectNames = false;
#endif // !CPPGC_SUPPORTS_OBJECT_NAMES
// Folding would regresses name resolution when deriving names from C++
// class names as it would just folds a name to the base class name.
using ResultType = std::conditional_t<(kHasVirtualDestructorAtBase ||
kBothTypesAreTriviallyDestructible ||
kHasCustomFinalizerDispatchAtBase) &&
!kWantsDetailedObjectNames,
ParentMostGarbageCollectedType, T>;
};
// Trait determines how the garbage collector treats objects wrt. to traversing,
// finalization, and naming.
template <typename T>
struct GCInfoTrait final {
V8_INLINE static GCInfoIndex Index() {
static_assert(sizeof(T), "T must be fully defined");
static std::atomic<GCInfoIndex>
registered_index; // Uses zero initialization.
const GCInfoIndex index = registered_index.load(std::memory_order_acquire);
return index ? index
: EnsureGCInfoIndexTrait::EnsureIndex<T>(registered_index);
}
};
} // namespace internal
} // namespace cppgc
#endif // INCLUDE_CPPGC_INTERNAL_GC_INFO_H_

View File

@ -0,0 +1,50 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef INCLUDE_CPPGC_INTERNAL_LOGGING_H_
#define INCLUDE_CPPGC_INTERNAL_LOGGING_H_
#include "cppgc/source-location.h"
#include "v8config.h" // NOLINT(build/include_directory)
namespace cppgc {
namespace internal {
void V8_EXPORT DCheckImpl(const char*,
const SourceLocation& = SourceLocation::Current());
[[noreturn]] void V8_EXPORT
FatalImpl(const char*, const SourceLocation& = SourceLocation::Current());
// Used to ignore -Wunused-variable.
template <typename>
struct EatParams {};
#if defined(DEBUG)
#define CPPGC_DCHECK_MSG(condition, message) \
do { \
if (V8_UNLIKELY(!(condition))) { \
::cppgc::internal::DCheckImpl(message); \
} \
} while (false)
#else // !defined(DEBUG)
#define CPPGC_DCHECK_MSG(condition, message) \
(static_cast<void>(::cppgc::internal::EatParams<decltype( \
static_cast<void>(condition), message)>{}))
#endif // !defined(DEBUG)
#define CPPGC_DCHECK(condition) CPPGC_DCHECK_MSG(condition, #condition)
#define CPPGC_CHECK_MSG(condition, message) \
do { \
if (V8_UNLIKELY(!(condition))) { \
::cppgc::internal::FatalImpl(message); \
} \
} while (false)
#define CPPGC_CHECK(condition) CPPGC_CHECK_MSG(condition, #condition)
} // namespace internal
} // namespace cppgc
#endif // INCLUDE_CPPGC_INTERNAL_LOGGING_H_

View File

@ -0,0 +1,236 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef INCLUDE_CPPGC_INTERNAL_MEMBER_STORAGE_H_
#define INCLUDE_CPPGC_INTERNAL_MEMBER_STORAGE_H_
#include <atomic>
#include <cstddef>
#include <type_traits>
#include "cppgc/internal/api-constants.h"
#include "cppgc/internal/logging.h"
#include "cppgc/sentinel-pointer.h"
#include "v8config.h" // NOLINT(build/include_directory)
namespace cppgc {
namespace internal {
#if defined(CPPGC_POINTER_COMPRESSION)
#if defined(__clang__)
// Attribute const allows the compiler to assume that CageBaseGlobal::g_base_
// doesn't change (e.g. across calls) and thereby avoid redundant loads.
#define CPPGC_CONST __attribute__((const))
#define CPPGC_REQUIRE_CONSTANT_INIT \
__attribute__((require_constant_initialization))
#else // defined(__clang__)
#define CPPGC_CONST
#define CPPGC_REQUIRE_CONSTANT_INIT
#endif // defined(__clang__)
class CageBaseGlobal final {
public:
V8_INLINE CPPGC_CONST static uintptr_t Get() {
CPPGC_DCHECK(IsBaseConsistent());
return g_base_;
}
V8_INLINE CPPGC_CONST static bool IsSet() {
CPPGC_DCHECK(IsBaseConsistent());
return (g_base_ & ~kLowerHalfWordMask) != 0;
}
private:
// We keep the lower halfword as ones to speed up decompression.
static constexpr uintptr_t kLowerHalfWordMask =
(api_constants::kCagedHeapReservationAlignment - 1);
static V8_EXPORT uintptr_t g_base_ CPPGC_REQUIRE_CONSTANT_INIT;
CageBaseGlobal() = delete;
V8_INLINE static bool IsBaseConsistent() {
return kLowerHalfWordMask == (g_base_ & kLowerHalfWordMask);
}
friend class CageBaseGlobalUpdater;
};
#undef CPPGC_REQUIRE_CONSTANT_INIT
#undef CPPGC_CONST
class V8_TRIVIAL_ABI CompressedPointer final {
public:
using IntegralType = uint32_t;
V8_INLINE CompressedPointer() : value_(0u) {}
V8_INLINE explicit CompressedPointer(const void* ptr)
: value_(Compress(ptr)) {}
V8_INLINE explicit CompressedPointer(std::nullptr_t) : value_(0u) {}
V8_INLINE explicit CompressedPointer(SentinelPointer)
: value_(kCompressedSentinel) {}
V8_INLINE const void* Load() const { return Decompress(value_); }
V8_INLINE const void* LoadAtomic() const {
return Decompress(
reinterpret_cast<const std::atomic<IntegralType>&>(value_).load(
std::memory_order_relaxed));
}
V8_INLINE void Store(const void* ptr) { value_ = Compress(ptr); }
V8_INLINE void StoreAtomic(const void* value) {
reinterpret_cast<std::atomic<IntegralType>&>(value_).store(
Compress(value), std::memory_order_relaxed);
}
V8_INLINE void Clear() { value_ = 0u; }
V8_INLINE bool IsCleared() const { return !value_; }
V8_INLINE bool IsSentinel() const { return value_ == kCompressedSentinel; }
V8_INLINE uint32_t GetAsInteger() const { return value_; }
V8_INLINE friend bool operator==(CompressedPointer a, CompressedPointer b) {
return a.value_ == b.value_;
}
V8_INLINE friend bool operator!=(CompressedPointer a, CompressedPointer b) {
return a.value_ != b.value_;
}
V8_INLINE friend bool operator<(CompressedPointer a, CompressedPointer b) {
return a.value_ < b.value_;
}
V8_INLINE friend bool operator<=(CompressedPointer a, CompressedPointer b) {
return a.value_ <= b.value_;
}
V8_INLINE friend bool operator>(CompressedPointer a, CompressedPointer b) {
return a.value_ > b.value_;
}
V8_INLINE friend bool operator>=(CompressedPointer a, CompressedPointer b) {
return a.value_ >= b.value_;
}
static V8_INLINE IntegralType Compress(const void* ptr) {
static_assert(
SentinelPointer::kSentinelValue == 0b10,
"The compression scheme relies on the sentinel encoded as 0b10");
static constexpr size_t kGigaCageMask =
~(api_constants::kCagedHeapReservationAlignment - 1);
CPPGC_DCHECK(CageBaseGlobal::IsSet());
const uintptr_t base = CageBaseGlobal::Get();
CPPGC_DCHECK(!ptr || ptr == kSentinelPointer ||
(base & kGigaCageMask) ==
(reinterpret_cast<uintptr_t>(ptr) & kGigaCageMask));
#if defined(CPPGC_2GB_CAGE)
// Truncate the pointer.
auto compressed =
static_cast<IntegralType>(reinterpret_cast<uintptr_t>(ptr));
#else // !defined(CPPGC_2GB_CAGE)
const auto uptr = reinterpret_cast<uintptr_t>(ptr);
// Shift the pointer by one and truncate.
auto compressed = static_cast<IntegralType>(uptr >> 1);
#endif // !defined(CPPGC_2GB_CAGE)
// Normal compressed pointers must have the MSB set.
CPPGC_DCHECK((!compressed || compressed == kCompressedSentinel) ||
(compressed & (1 << 31)));
return compressed;
}
static V8_INLINE void* Decompress(IntegralType ptr) {
CPPGC_DCHECK(CageBaseGlobal::IsSet());
const uintptr_t base = CageBaseGlobal::Get();
// Treat compressed pointer as signed and cast it to uint64_t, which will
// sign-extend it.
#if defined(CPPGC_2GB_CAGE)
const uint64_t mask = static_cast<uint64_t>(static_cast<int32_t>(ptr));
#else // !defined(CPPGC_2GB_CAGE)
// Then, shift the result by one. It's important to shift the unsigned
// value, as otherwise it would result in undefined behavior.
const uint64_t mask = static_cast<uint64_t>(static_cast<int32_t>(ptr)) << 1;
#endif // !defined(CPPGC_2GB_CAGE)
return reinterpret_cast<void*>(mask & base);
}
private:
#if defined(CPPGC_2GB_CAGE)
static constexpr IntegralType kCompressedSentinel =
SentinelPointer::kSentinelValue;
#else // !defined(CPPGC_2GB_CAGE)
static constexpr IntegralType kCompressedSentinel =
SentinelPointer::kSentinelValue >> 1;
#endif // !defined(CPPGC_2GB_CAGE)
// All constructors initialize `value_`. Do not add a default value here as it
// results in a non-atomic write on some builds, even when the atomic version
// of the constructor is used.
IntegralType value_;
};
#endif // defined(CPPGC_POINTER_COMPRESSION)
class V8_TRIVIAL_ABI RawPointer final {
public:
using IntegralType = uintptr_t;
V8_INLINE RawPointer() : ptr_(nullptr) {}
V8_INLINE explicit RawPointer(const void* ptr) : ptr_(ptr) {}
V8_INLINE const void* Load() const { return ptr_; }
V8_INLINE const void* LoadAtomic() const {
return reinterpret_cast<const std::atomic<const void*>&>(ptr_).load(
std::memory_order_relaxed);
}
V8_INLINE void Store(const void* ptr) { ptr_ = ptr; }
V8_INLINE void StoreAtomic(const void* ptr) {
reinterpret_cast<std::atomic<const void*>&>(ptr_).store(
ptr, std::memory_order_relaxed);
}
V8_INLINE void Clear() { ptr_ = nullptr; }
V8_INLINE bool IsCleared() const { return !ptr_; }
V8_INLINE bool IsSentinel() const { return ptr_ == kSentinelPointer; }
V8_INLINE uintptr_t GetAsInteger() const {
return reinterpret_cast<uintptr_t>(ptr_);
}
V8_INLINE friend bool operator==(RawPointer a, RawPointer b) {
return a.ptr_ == b.ptr_;
}
V8_INLINE friend bool operator!=(RawPointer a, RawPointer b) {
return a.ptr_ != b.ptr_;
}
V8_INLINE friend bool operator<(RawPointer a, RawPointer b) {
return a.ptr_ < b.ptr_;
}
V8_INLINE friend bool operator<=(RawPointer a, RawPointer b) {
return a.ptr_ <= b.ptr_;
}
V8_INLINE friend bool operator>(RawPointer a, RawPointer b) {
return a.ptr_ > b.ptr_;
}
V8_INLINE friend bool operator>=(RawPointer a, RawPointer b) {
return a.ptr_ >= b.ptr_;
}
private:
// All constructors initialize `ptr_`. Do not add a default value here as it
// results in a non-atomic write on some builds, even when the atomic version
// of the constructor is used.
const void* ptr_;
};
#if defined(CPPGC_POINTER_COMPRESSION)
using MemberStorage = CompressedPointer;
#else // !defined(CPPGC_POINTER_COMPRESSION)
using MemberStorage = RawPointer;
#endif // !defined(CPPGC_POINTER_COMPRESSION)
} // namespace internal
} // namespace cppgc
#endif // INCLUDE_CPPGC_INTERNAL_MEMBER_STORAGE_H_

View File

@ -0,0 +1,137 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef INCLUDE_CPPGC_INTERNAL_NAME_TRAIT_H_
#define INCLUDE_CPPGC_INTERNAL_NAME_TRAIT_H_
#include <cstddef>
#include <cstdint>
#include <type_traits>
#include "cppgc/name-provider.h"
#include "v8config.h" // NOLINT(build/include_directory)
namespace cppgc {
namespace internal {
#if CPPGC_SUPPORTS_OBJECT_NAMES && defined(__clang__)
#define CPPGC_SUPPORTS_COMPILE_TIME_TYPENAME 1
// Provides constexpr c-string storage for a name of fixed |Size| characters.
// Automatically appends terminating 0 byte.
template <size_t Size>
struct NameBuffer {
char name[Size + 1]{};
static constexpr NameBuffer FromCString(const char* str) {
NameBuffer result;
for (size_t i = 0; i < Size; ++i) result.name[i] = str[i];
result.name[Size] = 0;
return result;
}
};
template <typename T>
const char* GetTypename() {
static constexpr char kSelfPrefix[] =
"const char *cppgc::internal::GetTypename() [T =";
static_assert(__builtin_strncmp(__PRETTY_FUNCTION__, kSelfPrefix,
sizeof(kSelfPrefix) - 1) == 0,
"The prefix must match");
static constexpr const char* kTypenameStart =
__PRETTY_FUNCTION__ + sizeof(kSelfPrefix);
static constexpr size_t kTypenameSize =
__builtin_strlen(__PRETTY_FUNCTION__) - sizeof(kSelfPrefix) - 1;
// NameBuffer is an indirection that is needed to make sure that only a
// substring of __PRETTY_FUNCTION__ gets materialized in the binary.
static constexpr auto buffer =
NameBuffer<kTypenameSize>::FromCString(kTypenameStart);
return buffer.name;
}
#else
#define CPPGC_SUPPORTS_COMPILE_TIME_TYPENAME 0
#endif
struct HeapObjectName {
const char* value;
bool name_was_hidden;
};
enum class HeapObjectNameForUnnamedObject : uint8_t {
kUseClassNameIfSupported,
kUseHiddenName,
};
class V8_EXPORT NameTraitBase {
protected:
static HeapObjectName GetNameFromTypeSignature(const char*);
};
// Trait that specifies how the garbage collector retrieves the name for a
// given object.
template <typename T>
class NameTrait final : public NameTraitBase {
public:
static constexpr bool HasNonHiddenName() {
#if CPPGC_SUPPORTS_COMPILE_TIME_TYPENAME
return true;
#elif CPPGC_SUPPORTS_OBJECT_NAMES
return true;
#else // !CPPGC_SUPPORTS_OBJECT_NAMES
return std::is_base_of<NameProvider, T>::value;
#endif // !CPPGC_SUPPORTS_OBJECT_NAMES
}
static HeapObjectName GetName(
const void* obj, HeapObjectNameForUnnamedObject name_retrieval_mode) {
return GetNameFor(static_cast<const T*>(obj), name_retrieval_mode);
}
private:
static HeapObjectName GetNameFor(const NameProvider* name_provider,
HeapObjectNameForUnnamedObject) {
// Objects inheriting from `NameProvider` are not considered unnamed as
// users already provided a name for them.
return {name_provider->GetHumanReadableName(), false};
}
static HeapObjectName GetNameFor(
const void*, HeapObjectNameForUnnamedObject name_retrieval_mode) {
if (name_retrieval_mode == HeapObjectNameForUnnamedObject::kUseHiddenName)
return {NameProvider::kHiddenName, true};
#if CPPGC_SUPPORTS_COMPILE_TIME_TYPENAME
return {GetTypename<T>(), false};
#elif CPPGC_SUPPORTS_OBJECT_NAMES
#if defined(V8_CC_GNU)
#define PRETTY_FUNCTION_VALUE __PRETTY_FUNCTION__
#elif defined(V8_CC_MSVC)
#define PRETTY_FUNCTION_VALUE __FUNCSIG__
#else
#define PRETTY_FUNCTION_VALUE nullptr
#endif
static const HeapObjectName leaky_name =
GetNameFromTypeSignature(PRETTY_FUNCTION_VALUE);
return leaky_name;
#undef PRETTY_FUNCTION_VALUE
#else // !CPPGC_SUPPORTS_OBJECT_NAMES
return {NameProvider::kHiddenName, true};
#endif // !CPPGC_SUPPORTS_OBJECT_NAMES
}
};
using NameCallback = HeapObjectName (*)(const void*,
HeapObjectNameForUnnamedObject);
} // namespace internal
} // namespace cppgc
#undef CPPGC_SUPPORTS_COMPILE_TIME_TYPENAME
#endif // INCLUDE_CPPGC_INTERNAL_NAME_TRAIT_H_

View File

@ -0,0 +1,214 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef INCLUDE_CPPGC_INTERNAL_PERSISTENT_NODE_H_
#define INCLUDE_CPPGC_INTERNAL_PERSISTENT_NODE_H_
#include <array>
#include <memory>
#include <vector>
#include "cppgc/internal/logging.h"
#include "cppgc/trace-trait.h"
#include "v8config.h" // NOLINT(build/include_directory)
namespace cppgc {
namespace internal {
class CrossThreadPersistentRegion;
class FatalOutOfMemoryHandler;
class RootVisitor;
// PersistentNode represents a variant of two states:
// 1) traceable node with a back pointer to the Persistent object;
// 2) freelist entry.
class PersistentNode final {
public:
PersistentNode() = default;
PersistentNode(const PersistentNode&) = delete;
PersistentNode& operator=(const PersistentNode&) = delete;
void InitializeAsUsedNode(void* owner, TraceRootCallback trace) {
CPPGC_DCHECK(trace);
owner_ = owner;
trace_ = trace;
}
void InitializeAsFreeNode(PersistentNode* next) {
next_ = next;
trace_ = nullptr;
}
void UpdateOwner(void* owner) {
CPPGC_DCHECK(IsUsed());
owner_ = owner;
}
PersistentNode* FreeListNext() const {
CPPGC_DCHECK(!IsUsed());
return next_;
}
void Trace(RootVisitor& root_visitor) const {
CPPGC_DCHECK(IsUsed());
trace_(root_visitor, owner_);
}
bool IsUsed() const { return trace_; }
void* owner() const {
CPPGC_DCHECK(IsUsed());
return owner_;
}
private:
// PersistentNode acts as a designated union:
// If trace_ != nullptr, owner_ points to the corresponding Persistent handle.
// Otherwise, next_ points to the next freed PersistentNode.
union {
void* owner_ = nullptr;
PersistentNode* next_;
};
TraceRootCallback trace_ = nullptr;
};
class V8_EXPORT PersistentRegionBase {
using PersistentNodeSlots = std::array<PersistentNode, 256u>;
public:
// Clears Persistent fields to avoid stale pointers after heap teardown.
~PersistentRegionBase();
PersistentRegionBase(const PersistentRegionBase&) = delete;
PersistentRegionBase& operator=(const PersistentRegionBase&) = delete;
void Iterate(RootVisitor&);
size_t NodesInUse() const;
void ClearAllUsedNodes();
protected:
explicit PersistentRegionBase(const FatalOutOfMemoryHandler& oom_handler);
PersistentNode* TryAllocateNodeFromFreeList(void* owner,
TraceRootCallback trace) {
PersistentNode* node = nullptr;
if (V8_LIKELY(free_list_head_)) {
node = free_list_head_;
free_list_head_ = free_list_head_->FreeListNext();
CPPGC_DCHECK(!node->IsUsed());
node->InitializeAsUsedNode(owner, trace);
nodes_in_use_++;
}
return node;
}
void FreeNode(PersistentNode* node) {
CPPGC_DCHECK(node);
CPPGC_DCHECK(node->IsUsed());
node->InitializeAsFreeNode(free_list_head_);
free_list_head_ = node;
CPPGC_DCHECK(nodes_in_use_ > 0);
nodes_in_use_--;
}
PersistentNode* RefillFreeListAndAllocateNode(void* owner,
TraceRootCallback trace);
private:
template <typename PersistentBaseClass>
void ClearAllUsedNodes();
void RefillFreeList();
std::vector<std::unique_ptr<PersistentNodeSlots>> nodes_;
PersistentNode* free_list_head_ = nullptr;
size_t nodes_in_use_ = 0;
const FatalOutOfMemoryHandler& oom_handler_;
friend class CrossThreadPersistentRegion;
};
// Variant of PersistentRegionBase that checks whether the allocation and
// freeing happens only on the thread that created the region.
class V8_EXPORT PersistentRegion final : public PersistentRegionBase {
public:
explicit PersistentRegion(const FatalOutOfMemoryHandler&);
// Clears Persistent fields to avoid stale pointers after heap teardown.
~PersistentRegion() = default;
PersistentRegion(const PersistentRegion&) = delete;
PersistentRegion& operator=(const PersistentRegion&) = delete;
V8_INLINE PersistentNode* AllocateNode(void* owner, TraceRootCallback trace) {
CPPGC_DCHECK(IsCreationThread());
auto* node = TryAllocateNodeFromFreeList(owner, trace);
if (V8_LIKELY(node)) return node;
// Slow path allocation allows for checking thread correspondence.
CPPGC_CHECK(IsCreationThread());
return RefillFreeListAndAllocateNode(owner, trace);
}
V8_INLINE void FreeNode(PersistentNode* node) {
CPPGC_DCHECK(IsCreationThread());
PersistentRegionBase::FreeNode(node);
}
private:
bool IsCreationThread();
int creation_thread_id_;
};
// CrossThreadPersistent uses PersistentRegionBase but protects it using this
// lock when needed.
class V8_EXPORT PersistentRegionLock final {
public:
PersistentRegionLock();
~PersistentRegionLock();
static void AssertLocked();
};
// Variant of PersistentRegionBase that checks whether the PersistentRegionLock
// is locked.
class V8_EXPORT CrossThreadPersistentRegion final
: protected PersistentRegionBase {
public:
explicit CrossThreadPersistentRegion(const FatalOutOfMemoryHandler&);
// Clears Persistent fields to avoid stale pointers after heap teardown.
~CrossThreadPersistentRegion();
CrossThreadPersistentRegion(const CrossThreadPersistentRegion&) = delete;
CrossThreadPersistentRegion& operator=(const CrossThreadPersistentRegion&) =
delete;
V8_INLINE PersistentNode* AllocateNode(void* owner, TraceRootCallback trace) {
PersistentRegionLock::AssertLocked();
auto* node = TryAllocateNodeFromFreeList(owner, trace);
if (V8_LIKELY(node)) return node;
return RefillFreeListAndAllocateNode(owner, trace);
}
V8_INLINE void FreeNode(PersistentNode* node) {
PersistentRegionLock::AssertLocked();
PersistentRegionBase::FreeNode(node);
}
void Iterate(RootVisitor&);
size_t NodesInUse() const;
void ClearAllUsedNodes();
};
} // namespace internal
} // namespace cppgc
#endif // INCLUDE_CPPGC_INTERNAL_PERSISTENT_NODE_H_

View File

@ -0,0 +1,207 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef INCLUDE_CPPGC_INTERNAL_POINTER_POLICIES_H_
#define INCLUDE_CPPGC_INTERNAL_POINTER_POLICIES_H_
#include <cstdint>
#include <type_traits>
#include "cppgc/internal/member-storage.h"
#include "cppgc/internal/write-barrier.h"
#include "cppgc/sentinel-pointer.h"
#include "cppgc/source-location.h"
#include "cppgc/type-traits.h"
#include "v8config.h" // NOLINT(build/include_directory)
namespace cppgc {
namespace internal {
class HeapBase;
class PersistentRegion;
class CrossThreadPersistentRegion;
// Tags to distinguish between strong and weak member types.
class StrongMemberTag;
class WeakMemberTag;
class UntracedMemberTag;
struct DijkstraWriteBarrierPolicy {
V8_INLINE static void InitializingBarrier(const void*, const void*) {
// Since in initializing writes the source object is always white, having no
// barrier doesn't break the tri-color invariant.
}
V8_INLINE static void AssigningBarrier(const void* slot, const void* value) {
WriteBarrier::Params params;
const WriteBarrier::Type type =
WriteBarrier::GetWriteBarrierType(slot, value, params);
WriteBarrier(type, params, slot, value);
}
V8_INLINE static void AssigningBarrier(const void* slot,
MemberStorage storage) {
WriteBarrier::Params params;
const WriteBarrier::Type type =
WriteBarrier::GetWriteBarrierType(slot, storage, params);
WriteBarrier(type, params, slot, storage.Load());
}
private:
V8_INLINE static void WriteBarrier(WriteBarrier::Type type,
const WriteBarrier::Params& params,
const void* slot, const void* value) {
switch (type) {
case WriteBarrier::Type::kGenerational:
WriteBarrier::GenerationalBarrier<
WriteBarrier::GenerationalBarrierType::kPreciseSlot>(params, slot);
break;
case WriteBarrier::Type::kMarking:
WriteBarrier::DijkstraMarkingBarrier(params, value);
break;
case WriteBarrier::Type::kNone:
break;
}
}
};
struct NoWriteBarrierPolicy {
V8_INLINE static void InitializingBarrier(const void*, const void*) {}
V8_INLINE static void AssigningBarrier(const void*, const void*) {}
V8_INLINE static void AssigningBarrier(const void*, MemberStorage) {}
};
class V8_EXPORT SameThreadEnabledCheckingPolicyBase {
protected:
void CheckPointerImpl(const void* ptr, bool points_to_payload,
bool check_off_heap_assignments);
const HeapBase* heap_ = nullptr;
};
template <bool kCheckOffHeapAssignments>
class V8_EXPORT SameThreadEnabledCheckingPolicy
: private SameThreadEnabledCheckingPolicyBase {
protected:
template <typename T>
void CheckPointer(const T* ptr) {
if (!ptr || (kSentinelPointer == ptr)) return;
CheckPointersImplTrampoline<T>::Call(this, ptr);
}
private:
template <typename T, bool = IsCompleteV<T>>
struct CheckPointersImplTrampoline {
static void Call(SameThreadEnabledCheckingPolicy* policy, const T* ptr) {
policy->CheckPointerImpl(ptr, false, kCheckOffHeapAssignments);
}
};
template <typename T>
struct CheckPointersImplTrampoline<T, true> {
static void Call(SameThreadEnabledCheckingPolicy* policy, const T* ptr) {
policy->CheckPointerImpl(ptr, IsGarbageCollectedTypeV<T>,
kCheckOffHeapAssignments);
}
};
};
class DisabledCheckingPolicy {
protected:
V8_INLINE void CheckPointer(const void*) {}
};
#ifdef DEBUG
// Off heap members are not connected to object graph and thus cannot ressurect
// dead objects.
using DefaultMemberCheckingPolicy =
SameThreadEnabledCheckingPolicy<false /* kCheckOffHeapAssignments*/>;
using DefaultPersistentCheckingPolicy =
SameThreadEnabledCheckingPolicy<true /* kCheckOffHeapAssignments*/>;
#else // !DEBUG
using DefaultMemberCheckingPolicy = DisabledCheckingPolicy;
using DefaultPersistentCheckingPolicy = DisabledCheckingPolicy;
#endif // !DEBUG
// For CT(W)P neither marking information (for value), nor objectstart bitmap
// (for slot) are guaranteed to be present because there's no synchronization
// between heaps after marking.
using DefaultCrossThreadPersistentCheckingPolicy = DisabledCheckingPolicy;
class KeepLocationPolicy {
public:
constexpr const SourceLocation& Location() const { return location_; }
protected:
constexpr KeepLocationPolicy() = default;
constexpr explicit KeepLocationPolicy(const SourceLocation& location)
: location_(location) {}
// KeepLocationPolicy must not copy underlying source locations.
KeepLocationPolicy(const KeepLocationPolicy&) = delete;
KeepLocationPolicy& operator=(const KeepLocationPolicy&) = delete;
// Location of the original moved from object should be preserved.
KeepLocationPolicy(KeepLocationPolicy&&) = default;
KeepLocationPolicy& operator=(KeepLocationPolicy&&) = default;
private:
SourceLocation location_;
};
class IgnoreLocationPolicy {
public:
constexpr SourceLocation Location() const { return {}; }
protected:
constexpr IgnoreLocationPolicy() = default;
constexpr explicit IgnoreLocationPolicy(const SourceLocation&) {}
};
#if CPPGC_SUPPORTS_OBJECT_NAMES
using DefaultLocationPolicy = KeepLocationPolicy;
#else
using DefaultLocationPolicy = IgnoreLocationPolicy;
#endif
struct StrongPersistentPolicy {
using IsStrongPersistent = std::true_type;
static V8_EXPORT PersistentRegion& GetPersistentRegion(const void* object);
};
struct WeakPersistentPolicy {
using IsStrongPersistent = std::false_type;
static V8_EXPORT PersistentRegion& GetPersistentRegion(const void* object);
};
struct StrongCrossThreadPersistentPolicy {
using IsStrongPersistent = std::true_type;
static V8_EXPORT CrossThreadPersistentRegion& GetPersistentRegion(
const void* object);
};
struct WeakCrossThreadPersistentPolicy {
using IsStrongPersistent = std::false_type;
static V8_EXPORT CrossThreadPersistentRegion& GetPersistentRegion(
const void* object);
};
// Forward declarations setting up the default policies.
template <typename T, typename WeaknessPolicy,
typename LocationPolicy = DefaultLocationPolicy,
typename CheckingPolicy = DefaultCrossThreadPersistentCheckingPolicy>
class BasicCrossThreadPersistent;
template <typename T, typename WeaknessPolicy,
typename LocationPolicy = DefaultLocationPolicy,
typename CheckingPolicy = DefaultPersistentCheckingPolicy>
class BasicPersistent;
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy = DefaultMemberCheckingPolicy>
class BasicMember;
} // namespace internal
} // namespace cppgc
#endif // INCLUDE_CPPGC_INTERNAL_POINTER_POLICIES_H_

View File

@ -0,0 +1,477 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef INCLUDE_CPPGC_INTERNAL_WRITE_BARRIER_H_
#define INCLUDE_CPPGC_INTERNAL_WRITE_BARRIER_H_
#include <cstddef>
#include <cstdint>
#include "cppgc/heap-handle.h"
#include "cppgc/heap-state.h"
#include "cppgc/internal/api-constants.h"
#include "cppgc/internal/atomic-entry-flag.h"
#include "cppgc/internal/base-page-handle.h"
#include "cppgc/internal/member-storage.h"
#include "cppgc/platform.h"
#include "cppgc/sentinel-pointer.h"
#include "cppgc/trace-trait.h"
#include "v8config.h" // NOLINT(build/include_directory)
#if defined(CPPGC_CAGED_HEAP)
#include "cppgc/internal/caged-heap-local-data.h"
#include "cppgc/internal/caged-heap.h"
#endif
namespace cppgc {
class HeapHandle;
namespace internal {
#if defined(CPPGC_CAGED_HEAP)
class WriteBarrierTypeForCagedHeapPolicy;
#else // !CPPGC_CAGED_HEAP
class WriteBarrierTypeForNonCagedHeapPolicy;
#endif // !CPPGC_CAGED_HEAP
class V8_EXPORT WriteBarrier final {
public:
enum class Type : uint8_t {
kNone,
kMarking,
kGenerational,
};
enum class GenerationalBarrierType : uint8_t {
kPreciseSlot,
kPreciseUncompressedSlot,
kImpreciseSlot,
};
struct Params {
HeapHandle* heap = nullptr;
#if V8_ENABLE_CHECKS
Type type = Type::kNone;
#endif // !V8_ENABLE_CHECKS
#if defined(CPPGC_CAGED_HEAP)
uintptr_t slot_offset = 0;
uintptr_t value_offset = 0;
#endif // CPPGC_CAGED_HEAP
};
enum class ValueMode {
kValuePresent,
kNoValuePresent,
};
// Returns the required write barrier for a given `slot` and `value`.
static V8_INLINE Type GetWriteBarrierType(const void* slot, const void* value,
Params& params);
// Returns the required write barrier for a given `slot` and `value`.
static V8_INLINE Type GetWriteBarrierType(const void* slot, MemberStorage,
Params& params);
// Returns the required write barrier for a given `slot`.
template <typename HeapHandleCallback>
static V8_INLINE Type GetWriteBarrierType(const void* slot, Params& params,
HeapHandleCallback callback);
// Returns the required write barrier for a given `value`.
static V8_INLINE Type GetWriteBarrierType(const void* value, Params& params);
static V8_INLINE void DijkstraMarkingBarrier(const Params& params,
const void* object);
static V8_INLINE void DijkstraMarkingBarrierRange(
const Params& params, const void* first_element, size_t element_size,
size_t number_of_elements, TraceCallback trace_callback);
static V8_INLINE void SteeleMarkingBarrier(const Params& params,
const void* object);
#if defined(CPPGC_YOUNG_GENERATION)
template <GenerationalBarrierType>
static V8_INLINE void GenerationalBarrier(const Params& params,
const void* slot);
#else // !CPPGC_YOUNG_GENERATION
template <GenerationalBarrierType>
static V8_INLINE void GenerationalBarrier(const Params& params,
const void* slot){}
#endif // CPPGC_YOUNG_GENERATION
#if V8_ENABLE_CHECKS
static void CheckParams(Type expected_type, const Params& params);
#else // !V8_ENABLE_CHECKS
static void CheckParams(Type expected_type, const Params& params) {}
#endif // !V8_ENABLE_CHECKS
// The FlagUpdater class allows cppgc internal to update
// |write_barrier_enabled_|.
class FlagUpdater;
static bool IsEnabled() { return write_barrier_enabled_.MightBeEntered(); }
private:
WriteBarrier() = delete;
#if defined(CPPGC_CAGED_HEAP)
using WriteBarrierTypePolicy = WriteBarrierTypeForCagedHeapPolicy;
#else // !CPPGC_CAGED_HEAP
using WriteBarrierTypePolicy = WriteBarrierTypeForNonCagedHeapPolicy;
#endif // !CPPGC_CAGED_HEAP
static void DijkstraMarkingBarrierSlow(const void* value);
static void DijkstraMarkingBarrierSlowWithSentinelCheck(const void* value);
static void DijkstraMarkingBarrierRangeSlow(HeapHandle& heap_handle,
const void* first_element,
size_t element_size,
size_t number_of_elements,
TraceCallback trace_callback);
static void SteeleMarkingBarrierSlow(const void* value);
static void SteeleMarkingBarrierSlowWithSentinelCheck(const void* value);
#if defined(CPPGC_YOUNG_GENERATION)
static CagedHeapLocalData& GetLocalData(HeapHandle&);
static void GenerationalBarrierSlow(const CagedHeapLocalData& local_data,
const AgeTable& age_table,
const void* slot, uintptr_t value_offset,
HeapHandle* heap_handle);
static void GenerationalBarrierForUncompressedSlotSlow(
const CagedHeapLocalData& local_data, const AgeTable& age_table,
const void* slot, uintptr_t value_offset, HeapHandle* heap_handle);
static void GenerationalBarrierForSourceObjectSlow(
const CagedHeapLocalData& local_data, const void* object,
HeapHandle* heap_handle);
#endif // CPPGC_YOUNG_GENERATION
static AtomicEntryFlag write_barrier_enabled_;
};
template <WriteBarrier::Type type>
V8_INLINE WriteBarrier::Type SetAndReturnType(WriteBarrier::Params& params) {
if constexpr (type == WriteBarrier::Type::kNone)
return WriteBarrier::Type::kNone;
#if V8_ENABLE_CHECKS
params.type = type;
#endif // !V8_ENABLE_CHECKS
return type;
}
#if defined(CPPGC_CAGED_HEAP)
class V8_EXPORT WriteBarrierTypeForCagedHeapPolicy final {
public:
template <WriteBarrier::ValueMode value_mode, typename HeapHandleCallback>
static V8_INLINE WriteBarrier::Type Get(const void* slot, const void* value,
WriteBarrier::Params& params,
HeapHandleCallback callback) {
return ValueModeDispatch<value_mode>::Get(slot, value, params, callback);
}
template <WriteBarrier::ValueMode value_mode, typename HeapHandleCallback>
static V8_INLINE WriteBarrier::Type Get(const void* slot, MemberStorage value,
WriteBarrier::Params& params,
HeapHandleCallback callback) {
return ValueModeDispatch<value_mode>::Get(slot, value, params, callback);
}
template <WriteBarrier::ValueMode value_mode, typename HeapHandleCallback>
static V8_INLINE WriteBarrier::Type Get(const void* value,
WriteBarrier::Params& params,
HeapHandleCallback callback) {
return GetNoSlot(value, params, callback);
}
private:
WriteBarrierTypeForCagedHeapPolicy() = delete;
template <typename HeapHandleCallback>
static V8_INLINE WriteBarrier::Type GetNoSlot(const void* value,
WriteBarrier::Params& params,
HeapHandleCallback) {
const bool within_cage = CagedHeapBase::IsWithinCage(value);
if (!within_cage) return WriteBarrier::Type::kNone;
// We know that |value| points either within the normal page or to the
// beginning of large-page, so extract the page header by bitmasking.
BasePageHandle* page =
BasePageHandle::FromPayload(const_cast<void*>(value));
HeapHandle& heap_handle = page->heap_handle();
if (V8_UNLIKELY(heap_handle.is_incremental_marking_in_progress())) {
return SetAndReturnType<WriteBarrier::Type::kMarking>(params);
}
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
}
template <WriteBarrier::ValueMode value_mode>
struct ValueModeDispatch;
};
template <>
struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch<
WriteBarrier::ValueMode::kValuePresent> {
template <typename HeapHandleCallback>
static V8_INLINE WriteBarrier::Type Get(const void* slot,
MemberStorage storage,
WriteBarrier::Params& params,
HeapHandleCallback) {
if (V8_LIKELY(!WriteBarrier::IsEnabled()))
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
return BarrierEnabledGet(slot, storage.Load(), params);
}
template <typename HeapHandleCallback>
static V8_INLINE WriteBarrier::Type Get(const void* slot, const void* value,
WriteBarrier::Params& params,
HeapHandleCallback) {
if (V8_LIKELY(!WriteBarrier::IsEnabled()))
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
return BarrierEnabledGet(slot, value, params);
}
private:
static V8_INLINE WriteBarrier::Type BarrierEnabledGet(
const void* slot, const void* value, WriteBarrier::Params& params) {
const bool within_cage = CagedHeapBase::AreWithinCage(slot, value);
if (!within_cage) return WriteBarrier::Type::kNone;
// We know that |value| points either within the normal page or to the
// beginning of large-page, so extract the page header by bitmasking.
BasePageHandle* page =
BasePageHandle::FromPayload(const_cast<void*>(value));
HeapHandle& heap_handle = page->heap_handle();
if (V8_LIKELY(!heap_handle.is_incremental_marking_in_progress())) {
#if defined(CPPGC_YOUNG_GENERATION)
if (!heap_handle.is_young_generation_enabled())
return WriteBarrier::Type::kNone;
params.heap = &heap_handle;
params.slot_offset = CagedHeapBase::OffsetFromAddress(slot);
params.value_offset = CagedHeapBase::OffsetFromAddress(value);
return SetAndReturnType<WriteBarrier::Type::kGenerational>(params);
#else // !CPPGC_YOUNG_GENERATION
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
#endif // !CPPGC_YOUNG_GENERATION
}
// Use marking barrier.
params.heap = &heap_handle;
return SetAndReturnType<WriteBarrier::Type::kMarking>(params);
}
};
template <>
struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch<
WriteBarrier::ValueMode::kNoValuePresent> {
template <typename HeapHandleCallback>
static V8_INLINE WriteBarrier::Type Get(const void* slot, const void*,
WriteBarrier::Params& params,
HeapHandleCallback callback) {
if (V8_LIKELY(!WriteBarrier::IsEnabled()))
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
HeapHandle& handle = callback();
#if defined(CPPGC_YOUNG_GENERATION)
if (V8_LIKELY(!handle.is_incremental_marking_in_progress())) {
if (!handle.is_young_generation_enabled()) {
return WriteBarrier::Type::kNone;
}
params.heap = &handle;
// Check if slot is on stack.
if (V8_UNLIKELY(!CagedHeapBase::IsWithinCage(slot))) {
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
}
params.slot_offset = CagedHeapBase::OffsetFromAddress(slot);
return SetAndReturnType<WriteBarrier::Type::kGenerational>(params);
}
#else // !defined(CPPGC_YOUNG_GENERATION)
if (V8_UNLIKELY(!handle.is_incremental_marking_in_progress())) {
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
}
#endif // !defined(CPPGC_YOUNG_GENERATION)
params.heap = &handle;
return SetAndReturnType<WriteBarrier::Type::kMarking>(params);
}
};
#endif // CPPGC_CAGED_HEAP
class V8_EXPORT WriteBarrierTypeForNonCagedHeapPolicy final {
public:
template <WriteBarrier::ValueMode value_mode, typename HeapHandleCallback>
static V8_INLINE WriteBarrier::Type Get(const void* slot, const void* value,
WriteBarrier::Params& params,
HeapHandleCallback callback) {
return ValueModeDispatch<value_mode>::Get(slot, value, params, callback);
}
template <WriteBarrier::ValueMode value_mode, typename HeapHandleCallback>
static V8_INLINE WriteBarrier::Type Get(const void* slot, MemberStorage value,
WriteBarrier::Params& params,
HeapHandleCallback callback) {
// `MemberStorage` will always be `RawPointer` for non-caged heap builds.
// Just convert to `void*` in this case.
return ValueModeDispatch<value_mode>::Get(slot, value.Load(), params,
callback);
}
template <WriteBarrier::ValueMode value_mode, typename HeapHandleCallback>
static V8_INLINE WriteBarrier::Type Get(const void* value,
WriteBarrier::Params& params,
HeapHandleCallback callback) {
// The slot will never be used in `Get()` below.
return Get<WriteBarrier::ValueMode::kValuePresent>(nullptr, value, params,
callback);
}
private:
template <WriteBarrier::ValueMode value_mode>
struct ValueModeDispatch;
WriteBarrierTypeForNonCagedHeapPolicy() = delete;
};
template <>
struct WriteBarrierTypeForNonCagedHeapPolicy::ValueModeDispatch<
WriteBarrier::ValueMode::kValuePresent> {
template <typename HeapHandleCallback>
static V8_INLINE WriteBarrier::Type Get(const void*, const void* object,
WriteBarrier::Params& params,
HeapHandleCallback callback) {
// The following check covers nullptr as well as sentinel pointer.
if (object <= static_cast<void*>(kSentinelPointer)) {
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
}
if (V8_LIKELY(!WriteBarrier::IsEnabled())) {
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
}
// We know that |object| is within the normal page or in the beginning of a
// large page, so extract the page header by bitmasking.
BasePageHandle* page =
BasePageHandle::FromPayload(const_cast<void*>(object));
HeapHandle& heap_handle = page->heap_handle();
if (V8_LIKELY(heap_handle.is_incremental_marking_in_progress())) {
return SetAndReturnType<WriteBarrier::Type::kMarking>(params);
}
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
}
};
template <>
struct WriteBarrierTypeForNonCagedHeapPolicy::ValueModeDispatch<
WriteBarrier::ValueMode::kNoValuePresent> {
template <typename HeapHandleCallback>
static V8_INLINE WriteBarrier::Type Get(const void*, const void*,
WriteBarrier::Params& params,
HeapHandleCallback callback) {
if (V8_UNLIKELY(WriteBarrier::IsEnabled())) {
HeapHandle& handle = callback();
if (V8_LIKELY(handle.is_incremental_marking_in_progress())) {
params.heap = &handle;
return SetAndReturnType<WriteBarrier::Type::kMarking>(params);
}
}
return WriteBarrier::Type::kNone;
}
};
// static
WriteBarrier::Type WriteBarrier::GetWriteBarrierType(
const void* slot, const void* value, WriteBarrier::Params& params) {
return WriteBarrierTypePolicy::Get<ValueMode::kValuePresent>(slot, value,
params, []() {});
}
// static
WriteBarrier::Type WriteBarrier::GetWriteBarrierType(
const void* slot, MemberStorage value, WriteBarrier::Params& params) {
return WriteBarrierTypePolicy::Get<ValueMode::kValuePresent>(slot, value,
params, []() {});
}
// static
template <typename HeapHandleCallback>
WriteBarrier::Type WriteBarrier::GetWriteBarrierType(
const void* slot, WriteBarrier::Params& params,
HeapHandleCallback callback) {
return WriteBarrierTypePolicy::Get<ValueMode::kNoValuePresent>(
slot, nullptr, params, callback);
}
// static
WriteBarrier::Type WriteBarrier::GetWriteBarrierType(
const void* value, WriteBarrier::Params& params) {
return WriteBarrierTypePolicy::Get<ValueMode::kValuePresent>(value, params,
[]() {});
}
// static
void WriteBarrier::DijkstraMarkingBarrier(const Params& params,
const void* object) {
CheckParams(Type::kMarking, params);
#if defined(CPPGC_CAGED_HEAP)
// Caged heap already filters out sentinels.
DijkstraMarkingBarrierSlow(object);
#else // !CPPGC_CAGED_HEAP
DijkstraMarkingBarrierSlowWithSentinelCheck(object);
#endif // !CPPGC_CAGED_HEAP
}
// static
void WriteBarrier::DijkstraMarkingBarrierRange(const Params& params,
const void* first_element,
size_t element_size,
size_t number_of_elements,
TraceCallback trace_callback) {
CheckParams(Type::kMarking, params);
DijkstraMarkingBarrierRangeSlow(*params.heap, first_element, element_size,
number_of_elements, trace_callback);
}
// static
void WriteBarrier::SteeleMarkingBarrier(const Params& params,
const void* object) {
CheckParams(Type::kMarking, params);
#if defined(CPPGC_CAGED_HEAP)
// Caged heap already filters out sentinels.
SteeleMarkingBarrierSlow(object);
#else // !CPPGC_CAGED_HEAP
SteeleMarkingBarrierSlowWithSentinelCheck(object);
#endif // !CPPGC_CAGED_HEAP
}
#if defined(CPPGC_YOUNG_GENERATION)
// static
template <WriteBarrier::GenerationalBarrierType type>
void WriteBarrier::GenerationalBarrier(const Params& params, const void* slot) {
CheckParams(Type::kGenerational, params);
const CagedHeapLocalData& local_data = CagedHeapLocalData::Get();
const AgeTable& age_table = local_data.age_table;
// Bail out if the slot (precise or imprecise) is in young generation.
if (V8_LIKELY(age_table.GetAge(params.slot_offset) == AgeTable::Age::kYoung))
return;
// Dispatch between different types of barriers.
// TODO(chromium:1029379): Consider reload local_data in the slow path to
// reduce register pressure.
if constexpr (type == GenerationalBarrierType::kPreciseSlot) {
GenerationalBarrierSlow(local_data, age_table, slot, params.value_offset,
params.heap);
} else if constexpr (type ==
GenerationalBarrierType::kPreciseUncompressedSlot) {
GenerationalBarrierForUncompressedSlotSlow(
local_data, age_table, slot, params.value_offset, params.heap);
} else {
GenerationalBarrierForSourceObjectSlow(local_data, slot, params.heap);
}
}
#endif // !CPPGC_YOUNG_GENERATION
} // namespace internal
} // namespace cppgc
#endif // INCLUDE_CPPGC_INTERNAL_WRITE_BARRIER_H_