Bug 1470369 - Don't collect the nursery every GC slice during sweeping r=sfink

The main problem here is that we sweep weak caches off-thread, and when we finish sweeping a hash table the Enum class' destructor can rehash or resize the table, causing store buffer entries to be added or removed (since the table may now contain nursery pointers).

To address this the patch adds a store buffer lock and establishes that all off-thread store buffer access from inside the GC must take place with this lock held. The changes to GCHashSet/Map are a little gross; perhaps it would be better to add an explicit API to hash tables to allow us to postpone the rehash/resize operations but I haven't done that here.

Other complications are:

The TypeSetRef generic buffer entries can contain pointers into TI data that is moved during sweeping. We therefore do need to collect the nursery if there are any of those present. This was relatively rare in testing.

Finally, swapping objects can result in pointers into dying objects being put in the whole cell store buffer (because we do tricks with skipping barriers when we remap wrappers to not keep otherwise dead wrappers alive). We need to collect the nursery if these are present to prevent them being accessed after the dying objects are finalized.

Differential Revision: https://phabricator.services.mozilla.com/D77831
This commit is contained in:
Jon Coppeard 2020-06-03 08:10:45 +00:00
parent 70ed1b7b2a
commit 1975b4203f
16 changed files with 212 additions and 50 deletions

View File

@ -76,7 +76,12 @@ class GCHashMap : public js::HashMap<Key, Value, HashPolicy, AllocPolicy> {
bool needsSweep() const { return !this->empty(); }
void sweep() {
for (typename Base::Enum e(*this); !e.empty(); e.popFront()) {
typename Base::Enum e(*this);
sweepEntries(e);
}
void sweepEntries(typename Base::Enum& e) {
for (; !e.empty(); e.popFront()) {
if (MapSweepPolicy::needsSweep(&e.front().mutableKey(),
&e.front().value())) {
e.removeFront();
@ -270,7 +275,12 @@ class GCHashSet : public js::HashSet<T, HashPolicy, AllocPolicy> {
bool needsSweep() const { return !this->empty(); }
void sweep() {
for (typename Base::Enum e(*this); !e.empty(); e.popFront()) {
typename Base::Enum e(*this);
sweepEntries(e);
}
void sweepEntries(typename Base::Enum& e) {
for (; !e.empty(); e.popFront()) {
if (GCPolicy<T>::needsSweep(&e.mutableFront())) {
e.removeFront();
}
@ -410,9 +420,22 @@ class WeakCache<GCHashMap<Key, Value, HashPolicy, AllocPolicy, MapSweepPolicy>>
bool needsSweep() override { return map.needsSweep(); }
size_t sweep() override {
size_t sweep(js::gc::StoreBuffer* sbToLock) override {
size_t steps = map.count();
map.sweep();
// Create an Enum and sweep the table entries.
mozilla::Maybe<typename Map::Enum> e;
e.emplace(map);
map.sweepEntries(e.ref());
// Potentially take a lock while the Enum's destructor is called as this can
// rehash/resize the table and access the store buffer.
mozilla::Maybe<js::gc::AutoLockStoreBuffer> lock;
if (sbToLock) {
lock.emplace(sbToLock);
}
e.reset();
return steps;
}
@ -593,9 +616,24 @@ class WeakCache<GCHashSet<T, HashPolicy, AllocPolicy>>
set(std::forward<Args>(args)...),
needsBarrier(false) {}
size_t sweep() override {
size_t sweep(js::gc::StoreBuffer* sbToLock) override {
size_t steps = set.count();
set.sweep();
// Create an Enum and sweep the table entries. It's not necessary to take
// the store buffer lock yet.
mozilla::Maybe<typename Set::Enum> e;
e.emplace(set);
set.sweepEntries(e.ref());
// Destroy the Enum, potentially rehashing or resizing the table. Since this
// can access the store buffer, we need to take a lock for this if we're
// called off main thread.
mozilla::Maybe<js::gc::AutoLockStoreBuffer> lock;
if (sbToLock) {
lock.emplace(sbToLock);
}
e.reset();
return steps;
}

View File

@ -9,6 +9,29 @@
#include "js/HeapAPI.h"
namespace js {
namespace gc {
class StoreBuffer;
JS_PUBLIC_API void LockStoreBuffer(StoreBuffer* sb);
JS_PUBLIC_API void UnlockStoreBuffer(StoreBuffer* sb);
class AutoLockStoreBuffer {
StoreBuffer* sb;
public:
explicit AutoLockStoreBuffer(StoreBuffer* sb) : sb(sb) {
LockStoreBuffer(sb);
}
~AutoLockStoreBuffer() {
UnlockStoreBuffer(sb);
}
};
} // namespace gc
} // namespace js
namespace JS {
namespace detail {
class WeakCacheBase;
@ -32,7 +55,7 @@ class WeakCacheBase : public mozilla::LinkedListElement<WeakCacheBase> {
WeakCacheBase(WeakCacheBase&& other) = default;
virtual ~WeakCacheBase() = default;
virtual size_t sweep() = 0;
virtual size_t sweep(js::gc::StoreBuffer* sbToLock) = 0;
virtual bool needsSweep() = 0;
virtual bool setNeedsIncrementalBarrier(bool needs) {
@ -68,7 +91,7 @@ class WeakCache : protected detail::WeakCacheBase,
const T& get() const { return cache; }
T& get() { return cache; }
size_t sweep() override {
size_t sweep(js::gc::StoreBuffer* sbToLock) override {
GCPolicy<T>::sweep(&cache);
return 0;
}

View File

@ -2167,9 +2167,12 @@ void GCRuntime::sweepZoneAfterCompacting(MovingTracer* trc, Zone* zone) {
sweepTypesAfterCompacting(zone);
sweepFinalizationRegistries(zone);
zone->weakRefMap().sweep();
zone->sweepWeakMaps();
for (auto* cache : zone->weakCaches()) {
cache->sweep();
{
zone->sweepWeakMaps();
for (auto* cache : zone->weakCaches()) {
cache->sweep(nullptr);
}
}
if (jit::JitZone* jitZone = zone->jitZone()) {
@ -2365,7 +2368,7 @@ void GCRuntime::updateTypeDescrObjects(MovingTracer* trc, Zone* zone) {
// need to be updated. Do not update any non-reserved slots, since they might
// point back to unprocessed descriptor objects.
zone->typeDescrObjects().sweep();
zone->typeDescrObjects().sweep(nullptr);
for (auto r = zone->typeDescrObjects().all(); !r.empty(); r.popFront()) {
NativeObject* obj = &r.front()->as<NativeObject>();
@ -2547,7 +2550,7 @@ void GCRuntime::updateRuntimePointersToRelocatedCells(AutoGCSession& session) {
DebugAPI::sweepAll(rt->defaultFreeOp());
jit::JitRuntime::TraceWeakJitcodeGlobalTable(rt, &trc);
for (JS::detail::WeakCacheBase* cache : rt->weakCaches()) {
cache->sweep();
cache->sweep(nullptr);
}
// Type inference may put more blocks here to free.
@ -4424,7 +4427,7 @@ bool Compartment::findSweepGroupEdges() {
// zone is not still being marked when we start sweeping the wrapped zone.
// As an optimization, if the wrapped object is already marked black there
// is no danger of later marking and we can skip this.
if (key->asTenured().isMarkedBlack()) {
if (key->isMarkedBlack()) {
continue;
}
@ -4960,7 +4963,7 @@ class ImmediateSweepWeakCacheTask : public GCParallelTask {
void run() override {
AutoSetThreadIsSweeping threadIsSweeping(zone);
cache.sweep();
cache.sweep(&gc->storeBuffer());
}
};
@ -5040,6 +5043,9 @@ void GCRuntime::sweepWeakMaps() {
oomUnsafe.crash("clearing weak keys in beginSweepingSweepGroup()");
}
// Lock the storebuffer since this may access it when rehashing or resizing
// the tables.
AutoLockStoreBuffer lock(&storeBuffer());
zone->sweepWeakMaps();
}
}
@ -5240,7 +5246,7 @@ static void SweepAllWeakCachesOnMainThread(JSRuntime* rt) {
if (cache->needsIncrementalBarrier()) {
cache->setNeedsIncrementalBarrier(false);
}
cache->sweep();
cache->sweep(nullptr);
return true;
});
}
@ -5254,6 +5260,8 @@ IncrementalProgress GCRuntime::beginSweepingSweepGroup(JSFreeOp* fop,
using namespace gcstats;
MOZ_ASSERT(!storeBuffer().hasTypeSetPointers());
AutoSCC scc(stats(), sweepGroupIndex);
bool sweepingAtoms = false;
@ -5598,6 +5606,8 @@ IncrementalProgress GCRuntime::sweepTypeInformation(JSFreeOp* fop,
// the sweep group finishes we won't be able to determine which things in
// the zone are live.
MOZ_ASSERT(!storeBuffer().hasTypeSetPointers());
gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::SWEEP_COMPARTMENTS);
gcstats::AutoPhase ap2(stats(), gcstats::PhaseKind::SWEEP_TYPES);
@ -5685,7 +5695,7 @@ static size_t IncrementalSweepWeakCache(GCRuntime* gc,
JS::detail::WeakCacheBase* cache = item.cache;
MOZ_ASSERT(cache->needsIncrementalBarrier());
size_t steps = cache->sweep();
size_t steps = cache->sweep(&gc->storeBuffer());
cache->setNeedsIncrementalBarrier(false);
return steps;
@ -6555,16 +6565,16 @@ void GCRuntime::incrementalSlice(SliceBudget& budget,
MOZ_ASSERT_IF(isIncrementalGCInProgress(), isIncremental);
isIncremental = !budget.isUnlimited();
/*
* Non-incremental collection expects that the nursery is empty.
*/
if (!isIncremental) {
if (!isIncremental && !isIncrementalGCInProgress()) {
MOZ_ASSERT(nursery().isEmpty());
storeBuffer().checkEmpty();
}
isIncremental = !budget.isUnlimited();
if (useZeal && hasIncrementalTwoSliceZealMode()) {
/*
* Yields between slices occurs at predetermined points in these modes;
@ -6663,14 +6673,12 @@ void GCRuntime::incrementalSlice(SliceBudget& budget,
incrementalState = State::Sweep;
lastMarkSlice = false;
beginSweepPhase(reason, session);
[[fallthrough]];
case State::Sweep:
MOZ_ASSERT(nursery().isEmpty());
storeBuffer().checkEmpty();
rt->mainContextFromOwnThread()->traceWrapperGCRooters(&marker);
if (performSweepActions(budget) == NotFinished) {
@ -7103,25 +7111,49 @@ bool GCRuntime::shouldCollectNurseryForSlice(bool nonincrementalByAPI,
return false;
}
if (nursery().shouldCollect()) {
return true;
}
bool nonIncremental = nonincrementalByAPI || budget.isUnlimited();
bool shouldCollectForSweeping = storeBuffer().hasTypeSetPointers() ||
storeBuffer().mayHavePointersToDeadCells();
switch (incrementalState) {
case State::NotActive:
case State::Sweep:
case State::Compact:
return true;
case State::Mark:
return (mightSweepInThisSlice(nonIncremental) &&
shouldCollectForSweeping) ||
mightCompactInThisSlice(nonIncremental);
case State::Sweep:
return shouldCollectForSweeping ||
mightCompactInThisSlice(nonIncremental);
case State::Finalize:
return mightCompactInThisSlice(nonIncremental);
case State::Compact:
return true;
case State::Decommit:
return (nonincrementalByAPI || budget.isUnlimited() || lastMarkSlice ||
nursery().shouldCollect() || hasIncrementalTwoSliceZealMode());
case State::Finish:
return false;
case State::MarkRoots:
default:
MOZ_CRASH("Unexpected GC state");
}
return false;
}
inline bool GCRuntime::mightSweepInThisSlice(bool nonIncremental) {
MOZ_ASSERT(incrementalState < State::Sweep);
return nonIncremental || lastMarkSlice || hasIncrementalTwoSliceZealMode();
}
inline bool GCRuntime::mightCompactInThisSlice(bool nonIncremental) {
MOZ_ASSERT(incrementalState < State::Compact);
return isCompacting && (nonIncremental || hasIncrementalTwoSliceZealMode());
}
#ifdef JS_GC_ZEAL
static bool IsDeterministicGCReason(JS::GCReason reason) {
switch (reason) {

View File

@ -705,6 +705,8 @@ class GCRuntime {
JS::GCReason reason, AutoGCSession& session);
MOZ_MUST_USE bool shouldCollectNurseryForSlice(bool nonincrementalByAPI,
SliceBudget& budget);
bool mightSweepInThisSlice(bool nonIncremental);
bool mightCompactInThisSlice(bool nonIncremental);
void collectNursery(JS::GCReason reason, gcstats::PhaseKind phase);
friend class AutoCallGCCallbacks;
@ -1200,7 +1202,10 @@ class GCRuntime {
private:
MainThreadData<Nursery> nursery_;
MainThreadData<gc::StoreBuffer> storeBuffer_;
// The store buffer used to track tenured to nursery edges for generational
// GC. This is accessed off main thread when sweeping WeakCaches.
MainThreadOrGCTaskData<gc::StoreBuffer> storeBuffer_;
mozilla::TimeStamp lastLastDitchTime;

View File

@ -2700,8 +2700,6 @@ void GCMarker::repush(JSObject* obj) {
}
bool GCMarker::enterWeakMarkingMode() {
MOZ_ASSERT(runtime()->gc.nursery().isEmpty());
MOZ_ASSERT(weakMapAction() == ExpandWeakMaps);
MOZ_ASSERT(state != MarkingState::WeakMarking);
if (state == MarkingState::IterativeMarking) {
@ -3745,8 +3743,7 @@ inline bool SweepingTracer::sweepEdge(T** thingp) {
// Bug 1501334 : IsAboutToBeFinalized doesn't work for atoms
// Bug 1071218 : Refactor Debugger::sweepAll and
// JitRuntime::SweepJitcodeGlobalTable to work per sweep group
TenuredCell& tenured = thing->asTenured();
if (!tenured.isMarkedAny()) {
if (!thing->isMarkedAny()) {
*thingp = nullptr;
return false;
}

View File

@ -170,7 +170,6 @@ class NurseryAwareHashMap {
}
void sweep() {
MOZ_ASSERT(nurseryEntries.empty());
map.sweep();
}

View File

@ -10,6 +10,8 @@
#include "gc/Statistics.h"
#include "vm/ArgumentsObject.h"
#include "vm/JSContext.h"
#include "vm/MutexIDs.h"
#include "vm/Runtime.h"
#include "gc/GC-inl.h"
@ -17,6 +19,16 @@
using namespace js;
using namespace js::gc;
JS_PUBLIC_API void js::gc::LockStoreBuffer(StoreBuffer* sb) {
MOZ_ASSERT(sb);
sb->lock();
}
JS_PUBLIC_API void js::gc::UnlockStoreBuffer(StoreBuffer* sb) {
MOZ_ASSERT(sb);
sb->unlock();
}
bool StoreBuffer::WholeCellBuffer::init() {
MOZ_ASSERT(!head_);
if (!storage_) {
@ -55,18 +67,21 @@ void StoreBuffer::GenericBuffer::trace(JSTracer* trc) {
}
StoreBuffer::StoreBuffer(JSRuntime* rt, const Nursery& nursery)
: bufferVal(this, JS::GCReason::FULL_VALUE_BUFFER),
: lock_(mutexid::StoreBuffer),
bufferVal(this, JS::GCReason::FULL_VALUE_BUFFER),
bufStrCell(this, JS::GCReason::FULL_CELL_PTR_STR_BUFFER),
bufBigIntCell(this, JS::GCReason::FULL_CELL_PTR_BIGINT_BUFFER),
bufObjCell(this, JS::GCReason::FULL_CELL_PTR_OBJ_BUFFER),
bufferSlot(this, JS::GCReason::FULL_SLOT_BUFFER),
bufferWholeCell(this),
bufferGeneric(this),
cancelIonCompilations_(false),
runtime_(rt),
nursery_(nursery),
aboutToOverflow_(false),
enabled_(false)
enabled_(false),
cancelIonCompilations_(false),
hasTypeSetPointers_(false),
mayHavePointersToDeadCells_(false)
#ifdef DEBUG
,
mEntered(false)
@ -118,6 +133,8 @@ void StoreBuffer::clear() {
aboutToOverflow_ = false;
cancelIonCompilations_ = false;
hasTypeSetPointers_ = false;
mayHavePointersToDeadCells_ = false;
bufferVal.clear();
bufStrCell.clear();

View File

@ -19,6 +19,7 @@
#include "js/AllocPolicy.h"
#include "js/MemoryMetrics.h"
#include "js/UniquePtr.h"
#include "threading/Mutex.h"
namespace js {
namespace gc {
@ -26,6 +27,10 @@ namespace gc {
class Arena;
class ArenaCellSet;
#ifdef DEBUG
extern bool CurrentThreadHasLockedGC();
#endif
/*
* BufferableRef represents an abstract reference for use in the generational
* GC's remembered set. Entries in the store buffer that cannot be represented
@ -378,10 +383,23 @@ class StoreBuffer {
} Hasher;
};
// The GC runs tasks that may access the storebuffer in parallel and so must
// take a lock. The mutator may only access the storebuffer from the main
// thread.
inline void CheckAccess() const {
#ifdef DEBUG
if (JS::RuntimeHeapIsBusy()) {
MOZ_ASSERT((CurrentThreadCanAccessRuntime(runtime_) && !lock_.isHeld()) ||
lock_.ownedByCurrentThread());
} else {
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
}
#endif
}
template <typename Buffer, typename Edge>
void unput(Buffer& buffer, const Edge& edge) {
MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
CheckAccess();
if (!isEnabled()) {
return;
}
@ -391,8 +409,7 @@ class StoreBuffer {
template <typename Buffer, typename Edge>
void put(Buffer& buffer, const Edge& edge) {
MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
CheckAccess();
if (!isEnabled()) {
return;
}
@ -402,6 +419,8 @@ class StoreBuffer {
}
}
Mutex lock_;
MonoTypeBuffer<ValueEdge> bufferVal;
MonoTypeBuffer<StringPtrEdge> bufStrCell;
MonoTypeBuffer<BigIntPtrEdge> bufBigIntCell;
@ -409,13 +428,15 @@ class StoreBuffer {
MonoTypeBuffer<SlotsEdge> bufferSlot;
WholeCellBuffer bufferWholeCell;
GenericBuffer bufferGeneric;
bool cancelIonCompilations_;
JSRuntime* runtime_;
const Nursery& nursery_;
bool aboutToOverflow_;
bool enabled_;
bool cancelIonCompilations_;
bool hasTypeSetPointers_;
bool mayHavePointersToDeadCells_;
#ifdef DEBUG
bool mEntered; /* For ReentrancyGuard. */
#endif
@ -436,6 +457,21 @@ class StoreBuffer {
bool cancelIonCompilations() const { return cancelIonCompilations_; }
/*
* Type inference data structures are moved during sweeping, so if we we are
* to sweep them then we must make sure that the storebuffer has no pointers
* into them.
*/
bool hasTypeSetPointers() const { return hasTypeSetPointers_; }
/*
* Brain transplants may add whole cell buffer entires for dead cells. We must
* evict the nursery prior to sweeping arenas if any such entries are present.
*/
bool mayHavePointersToDeadCells() const {
return mayHavePointersToDeadCells_;
}
/* Insert a single edge into the buffer/remembered set. */
void putValue(JS::Value* vp) { put(bufferVal, ValueEdge(vp)); }
void unputValue(JS::Value* vp) { unput(bufferVal, ValueEdge(vp)); }
@ -467,6 +503,8 @@ class StoreBuffer {
}
void setShouldCancelIonCompilations() { cancelIonCompilations_ = true; }
void setHasTypeSetPointers() { hasTypeSetPointers_ = true; }
void setMayHavePointersToDeadCells() { mayHavePointersToDeadCells_ = true; }
/* Methods to trace the source of all edges in the store buffer. */
void traceValues(TenuringTracer& mover) { bufferVal.trace(mover); }
@ -486,6 +524,10 @@ class StoreBuffer {
JS::GCSizes* sizes);
void checkEmpty() const;
// For use by the GC only.
void lock() { lock_.lock(); }
void unlock() { lock_.unlock(); }
};
// A set of cells in an arena used to implement the whole cell store buffer.

View File

@ -497,7 +497,6 @@ void js::gc::MarkingValidator::nonIncrementalMark(AutoGCSession& session) {
JSRuntime* runtime = gc->rt;
GCMarker* gcmarker = &gc->marker;
MOZ_ASSERT(gc->nursery().isEmpty());
MOZ_ASSERT(!gcmarker->isWeakMarking());
/* Wait for off-thread parsing which can allocate. */
@ -671,7 +670,6 @@ void js::gc::MarkingValidator::validate() {
return;
}
MOZ_ASSERT(gc->nursery().isEmpty());
MOZ_ASSERT(!gc->marker.isWeakMarking());
gc->waitBackgroundSweepEnd();

View File

@ -354,8 +354,8 @@ void WeakMap<K, V>::traceMappings(WeakMapTracer* tracer) {
template <class K, class V>
void WeakMap<K, V>::assertEntriesNotAboutToBeFinalized() {
for (Range r = Base::all(); !r.empty(); r.popFront()) {
K k(r.front().key());
MOZ_ASSERT(!gc::IsAboutToBeFinalized(&k));
auto k = gc::detail::ExtractUnbarriered(r.front().key());
MOZ_ASSERT(!gc::IsAboutToBeFinalizedUnbarriered(&k));
JSObject* delegate = gc::detail::GetDelegate(k);
if (delegate) {
MOZ_ASSERT(!gc::IsAboutToBeFinalizedUnbarriered(&delegate),

View File

@ -59,10 +59,13 @@ void js::Mutex::preUnlockChecks() {
owningThread_.reset();
}
bool js::Mutex::isHeld() const { return owningThread_.isSome(); }
bool js::Mutex::ownedByCurrentThread() const {
// First determine this using the owningThread_ property, then check it via
// the mutex stack.
bool check = ThreadId::ThisThreadId() == owningThread_.value();
bool check = owningThread_.isSome() &&
ThreadId::ThisThreadId() == owningThread_.value();
Mutex* stack = HeldMutexStack.get();

View File

@ -75,6 +75,7 @@ class Mutex {
#ifdef DEBUG
public:
bool isHeld() const;
bool ownedByCurrentThread() const;
private:

View File

@ -1619,7 +1619,6 @@ bool InnerViewTable::sweepEntry(JSObject** pkey, ViewVector& views) {
}
void InnerViewTable::sweep() {
MOZ_ASSERT(nurseryKeys.empty());
map.sweep();
}

View File

@ -1606,8 +1606,12 @@ void JSObject::swap(JSContext* cx, HandleObject a, HandleObject b) {
* nursery pointers in either object.
*/
MOZ_ASSERT(!IsInsideNursery(a) && !IsInsideNursery(b));
cx->runtime()->gc.storeBuffer().putWholeCell(a);
cx->runtime()->gc.storeBuffer().putWholeCell(b);
gc::StoreBuffer& storeBuffer = cx->runtime()->gc.storeBuffer();
storeBuffer.putWholeCell(a);
storeBuffer.putWholeCell(b);
if (a->zone()->wasGCStarted() || b->zone()->wasGCStarted()) {
storeBuffer.setMayHavePointersToDeadCells();
}
unsigned r = NotifyGCPreSwap(a, b);

View File

@ -30,6 +30,7 @@
_(GlobalHelperThreadState, 300) \
\
_(GCLock, 400) \
_(StoreBuffer, 400) \
\
_(SharedImmutableStringsCache, 500) \
_(FutexThread, 500) \

View File

@ -733,6 +733,7 @@ void ConstraintTypeSet::postWriteBarrier(JSContext* cx, Type type) {
if (gc::StoreBuffer* sb = type.singletonNoBarrier()->storeBuffer()) {
sb->putGeneric(TypeSetRef(cx->zone(), this));
sb->setShouldCancelIonCompilations();
sb->setHasTypeSetPointers();
}
}
}
@ -4575,6 +4576,8 @@ TypeZone::~TypeZone() {
void TypeZone::beginSweep() {
MOZ_ASSERT(zone()->isGCSweepingOrCompacting());
MOZ_ASSERT(
!zone()->runtimeFromMainThread()->gc.storeBuffer().hasTypeSetPointers());
// Clear the analysis pool, but don't release its data yet. While sweeping
// types any live data will be allocated into the pool.