mirror of
https://gitee.com/openharmony/arkcompiler_ets_runtime
synced 2024-10-07 16:13:49 +00:00
Rewrite shared barrier using IR
Change-Id: Ic09134927ec68a33caed71a1ea98487bfd3aa5a0 Signed-off-by: Artem Udovichenko <artem.udovichenko@huawei.com>
This commit is contained in:
parent
5c29f13508
commit
acb78a78d6
@ -2000,6 +2000,22 @@ DEF_CALL_SIGNATURE(InsertLocalToShareRSet)
|
||||
callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC);
|
||||
}
|
||||
|
||||
DEF_CALL_SIGNATURE(SetBitAtomic)
|
||||
{
|
||||
// 3 : 3 input parameters
|
||||
CallSignature index("SetBitAtomic", 0, 3, ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID());
|
||||
*callSign = index;
|
||||
// 3 : 3 input parameters
|
||||
std::array<VariableType, 3> params = {
|
||||
VariableType::NATIVE_POINTER(),
|
||||
VariableType::INT32(),
|
||||
VariableType::INT32()
|
||||
};
|
||||
callSign->SetParameters(params.data());
|
||||
callSign->SetGCLeafFunction(true);
|
||||
callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC);
|
||||
}
|
||||
|
||||
#define DEF_FLOAT_UNARY_CALL_SIGNATURE_BY_NAME(NAME) \
|
||||
DEF_CALL_SIGNATURE(NAME) \
|
||||
{ \
|
||||
|
@ -466,6 +466,7 @@ private:
|
||||
V(GetActualArgvNoGC) \
|
||||
V(InsertOldToNewRSet) \
|
||||
V(InsertLocalToShareRSet) \
|
||||
V(SetBitAtomic) \
|
||||
V(DoubleToInt) \
|
||||
V(DoubleToLength) \
|
||||
V(FloatMod) \
|
||||
|
@ -1576,9 +1576,40 @@ void StubBuilder::SetValueWithBarrier(GateRef glue, GateRef obj, GateRef offset,
|
||||
BRANCH(BoolAnd(objectNotInShare, valueRegionInShare), &shareBarrier, &shareBarrierExit);
|
||||
Bind(&shareBarrier);
|
||||
{
|
||||
// todo(lukai) fastpath
|
||||
CallNGCRuntime(glue, RTSTUB_ID(InsertLocalToShareRSet), { glue, obj, offset });
|
||||
Jump(&shareBarrierExit);
|
||||
Label callSharedBarrier(env);
|
||||
Label storeToSharedRSet(env);
|
||||
GateRef loadOffset = IntPtr(Region::PackedData::GetLocalToShareSetOffset(env_->Is32Bit()));
|
||||
auto localToShareSet = Load(VariableType::NATIVE_POINTER(), objectRegion, loadOffset);
|
||||
BRANCH(IntPtrEqual(localToShareSet, IntPtr(0)), &callSharedBarrier, &storeToSharedRSet);
|
||||
Bind(&storeToSharedRSet);
|
||||
{
|
||||
// (slotAddr - this) >> TAGGED_TYPE_SIZE_LOG
|
||||
GateRef bitOffsetPtr = IntPtrLSR(PtrSub(slotAddr, objectRegion), IntPtr(TAGGED_TYPE_SIZE_LOG));
|
||||
GateRef bitOffset = TruncPtrToInt32(bitOffsetPtr);
|
||||
GateRef bitPerWordLog2 = Int32(GCBitset::BIT_PER_WORD_LOG2);
|
||||
GateRef bytePerWord = Int32(GCBitset::BYTE_PER_WORD);
|
||||
// bitOffset >> BIT_PER_WORD_LOG2
|
||||
GateRef index = Int32LSR(bitOffset, bitPerWordLog2);
|
||||
GateRef byteIndex = Int32Mul(index, bytePerWord);
|
||||
// bitset_[index] |= mask;
|
||||
GateRef bitsetData = PtrAdd(localToShareSet, IntPtr(RememberedSet::GCBITSET_DATA_OFFSET));
|
||||
GateRef oldsetValue = Load(VariableType::INT32(), bitsetData, byteIndex);
|
||||
GateRef mask = GetBitMask(bitOffset);
|
||||
GateRef flag = Int32And(oldsetValue, mask);
|
||||
// Load the bit using relaxed memory order.
|
||||
// If the bit is set, do nothing (local->shared barrier is done).
|
||||
// Else call runtime.
|
||||
Label atomicSet(env);
|
||||
BRANCH(Int32NotEqual(flag, Int32(0)), ¬ValidIndex, &atomicSet);
|
||||
Bind(&atomicSet);
|
||||
CallNGCRuntime(glue, RTSTUB_ID(SetBitAtomic), { PtrAdd(bitsetData, byteIndex), mask, oldsetValue });
|
||||
Jump(¬ValidIndex);
|
||||
}
|
||||
Bind(&callSharedBarrier);
|
||||
{
|
||||
CallNGCRuntime(glue, RTSTUB_ID(InsertLocalToShareRSet), { glue, obj, offset });
|
||||
Jump(¬ValidIndex);
|
||||
}
|
||||
}
|
||||
Bind(&shareBarrierExit);
|
||||
GateRef objectNotInYoung = BoolNot(InYoungGeneration(objectRegion));
|
||||
|
@ -62,13 +62,13 @@ inline RememberedSet *Region::GetOrCreateOldToNewRememberedSet()
|
||||
|
||||
inline RememberedSet *Region::GetOrCreateLocalToShareRememberedSet()
|
||||
{
|
||||
if (UNLIKELY(localToShareSet_ == nullptr)) {
|
||||
if (UNLIKELY(packedData_.localToShareSet_ == nullptr)) {
|
||||
LockHolder lock(*lock_);
|
||||
if (localToShareSet_ == nullptr) {
|
||||
localToShareSet_ = CreateRememberedSet();
|
||||
if (packedData_.localToShareSet_ == nullptr) {
|
||||
packedData_.localToShareSet_ = CreateRememberedSet();
|
||||
}
|
||||
}
|
||||
return localToShareSet_;
|
||||
return packedData_.localToShareSet_;
|
||||
}
|
||||
|
||||
inline void Region::MergeRSetForConcurrentSweeping()
|
||||
@ -130,10 +130,10 @@ inline bool Region::TestLocalToShare(uintptr_t addr)
|
||||
{
|
||||
ASSERT(InRange(addr));
|
||||
// Only used for heap verification, so donot need to use lock
|
||||
if (localToShareSet_ == nullptr) {
|
||||
if (packedData_.localToShareSet_ == nullptr) {
|
||||
return false;
|
||||
}
|
||||
return localToShareSet_->TestBit(ToUintPtr(this), addr);
|
||||
return packedData_.localToShareSet_->TestBit(ToUintPtr(this), addr);
|
||||
}
|
||||
|
||||
template <typename Visitor>
|
||||
@ -164,7 +164,7 @@ inline void Region::AtomicInsertCrossRegionRSet(uintptr_t addr)
|
||||
|
||||
inline bool Region::HasLocalToShareRememberedSet() const
|
||||
{
|
||||
return localToShareSet_ != nullptr;
|
||||
return packedData_.localToShareSet_ != nullptr;
|
||||
}
|
||||
|
||||
inline void Region::InsertLocalToShareRSet(uintptr_t addr)
|
||||
@ -181,24 +181,24 @@ inline void Region::AtomicInsertLocalToShareRSet(uintptr_t addr)
|
||||
|
||||
inline void Region::AtomicClearLocalToShareRSetInRange(uintptr_t start, uintptr_t end)
|
||||
{
|
||||
if (localToShareSet_ != nullptr) {
|
||||
localToShareSet_->AtomicClearRange(ToUintPtr(this), start, end);
|
||||
if (packedData_.localToShareSet_ != nullptr) {
|
||||
packedData_.localToShareSet_->AtomicClearRange(ToUintPtr(this), start, end);
|
||||
}
|
||||
}
|
||||
|
||||
inline void Region::DeleteLocalToShareRSet()
|
||||
{
|
||||
if (localToShareSet_ != nullptr) {
|
||||
nativeAreaAllocator_->Free(localToShareSet_, localToShareSet_->Size());
|
||||
localToShareSet_ = nullptr;
|
||||
if (packedData_.localToShareSet_ != nullptr) {
|
||||
nativeAreaAllocator_->Free(packedData_.localToShareSet_, packedData_.localToShareSet_->Size());
|
||||
packedData_.localToShareSet_ = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Visitor>
|
||||
inline void Region::AtomicIterateAllLocalToShareBits(Visitor visitor)
|
||||
{
|
||||
if (localToShareSet_ != nullptr) {
|
||||
localToShareSet_->AtomicIterateAllMarkedBits(ToUintPtr(this), visitor);
|
||||
if (packedData_.localToShareSet_ != nullptr) {
|
||||
packedData_.localToShareSet_->AtomicIterateAllMarkedBits(ToUintPtr(this), visitor);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -609,11 +609,13 @@ public:
|
||||
base::AlignedPointer,
|
||||
base::AlignedPointer,
|
||||
base::AlignedPointer,
|
||||
base::AlignedPointer,
|
||||
base::AlignedSize> {
|
||||
enum class Index : size_t {
|
||||
FlagIndex = 0,
|
||||
MarkGCBitSetIndex,
|
||||
OldToNewSetIndex,
|
||||
LocalToShareSetIndex,
|
||||
BeginIndex,
|
||||
BitSetSizeIndex,
|
||||
NumOfMembers
|
||||
@ -653,6 +655,11 @@ public:
|
||||
return GetOffset<static_cast<size_t>(Index::OldToNewSetIndex)>(isArch32);
|
||||
}
|
||||
|
||||
static size_t GetLocalToShareSetOffset(bool isArch32)
|
||||
{
|
||||
return GetOffset<static_cast<size_t>(Index::LocalToShareSetIndex)>(isArch32);
|
||||
}
|
||||
|
||||
static size_t GetBeginOffset(bool isArch32)
|
||||
{
|
||||
return GetOffset<static_cast<size_t>(Index::BeginIndex)>(isArch32);
|
||||
@ -661,6 +668,7 @@ public:
|
||||
alignas(EAS) PackedPtr flags_;
|
||||
alignas(EAS) GCBitset *markGCBitset_ {nullptr};
|
||||
alignas(EAS) RememberedSet *oldToNewSet_ {nullptr};
|
||||
alignas(EAS) RememberedSet *localToShareSet_ {nullptr};
|
||||
alignas(EAS) uintptr_t begin_ {0};
|
||||
alignas(EAS) size_t bitsetSize_ {0};
|
||||
};
|
||||
@ -690,7 +698,6 @@ private:
|
||||
|
||||
RememberedSet *crossRegionSet_ {nullptr};
|
||||
RememberedSet *sweepingRSet_ {nullptr};
|
||||
RememberedSet *localToShareSet_ {nullptr};
|
||||
Span<FreeObjectSet *> freeObjectSets_;
|
||||
Mutex *lock_ {nullptr};
|
||||
uint64_t wasted_;
|
||||
|
@ -3163,6 +3163,23 @@ void RuntimeStubs::InsertLocalToShareRSet([[maybe_unused]] uintptr_t argGlue,
|
||||
region->AtomicInsertLocalToShareRSet(slotAddr);
|
||||
}
|
||||
|
||||
void RuntimeStubs::SetBitAtomic(GCBitset::GCBitsetWord *word, GCBitset::GCBitsetWord mask,
|
||||
GCBitset::GCBitsetWord oldValue)
|
||||
{
|
||||
volatile auto atomicWord = reinterpret_cast<volatile std::atomic<GCBitset::GCBitsetWord> *>(word);
|
||||
GCBitset::GCBitsetWord oldValueBeforeCAS = oldValue;
|
||||
std::atomic_compare_exchange_strong_explicit(atomicWord, &oldValue, oldValue | mask,
|
||||
std::memory_order_release, std::memory_order_relaxed);
|
||||
while (oldValue != oldValueBeforeCAS) {
|
||||
if (oldValue & mask) {
|
||||
return;
|
||||
}
|
||||
oldValueBeforeCAS = oldValue;
|
||||
std::atomic_compare_exchange_strong_explicit(atomicWord, &oldValue, oldValue | mask,
|
||||
std::memory_order_release, std::memory_order_relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
void RuntimeStubs::MarkingBarrier([[maybe_unused]] uintptr_t argGlue,
|
||||
uintptr_t object, size_t offset, TaggedObject *value)
|
||||
{
|
||||
|
@ -128,6 +128,7 @@ using FastCallAotEntryType = JSTaggedValue (*)(uintptr_t glue, uint32_t argc, co
|
||||
V(GetActualArgvNoGC) \
|
||||
V(InsertOldToNewRSet) \
|
||||
V(InsertLocalToShareRSet) \
|
||||
V(SetBitAtomic) \
|
||||
V(MarkingBarrier) \
|
||||
V(StoreBarrier) \
|
||||
V(DoubleToInt) \
|
||||
@ -506,6 +507,8 @@ public:
|
||||
static JSTaggedType GetActualArgvNoGC(uintptr_t argGlue);
|
||||
static void InsertOldToNewRSet([[maybe_unused]] uintptr_t argGlue, uintptr_t object, size_t offset);
|
||||
static void InsertLocalToShareRSet([[maybe_unused]] uintptr_t argGlue, uintptr_t object, size_t offset);
|
||||
static void SetBitAtomic(GCBitset::GCBitsetWord *word, GCBitset::GCBitsetWord mask,
|
||||
GCBitset::GCBitsetWord oldValue);
|
||||
static int32_t DoubleToInt(double x, size_t bits);
|
||||
static JSTaggedType DoubleToLength(double x);
|
||||
static double FloatMod(double x, double y);
|
||||
|
Loading…
Reference in New Issue
Block a user