Bug 1196847 - Part 1: Allow storage of a unique id for a cell independent of address; r=jonco

--HG--
extra : rebase_source : 870e541aaff1ff4e203dc1b90a7ff5283e10f2fe
This commit is contained in:
Terrence Cole 2015-08-20 10:35:22 -07:00
parent efd3b25c78
commit 954ca0c585
14 changed files with 441 additions and 20 deletions

View File

@ -54,7 +54,8 @@ const size_t ChunkMarkBitmapOffset = 1032352;
const size_t ChunkMarkBitmapBits = 129024;
#endif
const size_t ChunkRuntimeOffset = ChunkSize - sizeof(void*);
const size_t ChunkLocationOffset = ChunkSize - 2 * sizeof(void*) - sizeof(uint64_t);
const size_t ChunkTrailerSize = 2 * sizeof(uintptr_t) + sizeof(uint64_t);
const size_t ChunkLocationOffset = ChunkSize - ChunkTrailerSize;
const size_t ArenaZoneOffset = 0;
/*

38
js/src/ds/LockGuard.h Normal file
View File

@ -0,0 +1,38 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef js_LockGuard_h
#define js_LockGuard_h
#include "mozilla/GuardObjects.h"
namespace js {
// An implementation of C++11's std::lock_guard, enhanced with a guard object
// to help with correct usage.
template <typename LockType>
class LockGuard
{
LockType& lockRef_;
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER;
public:
explicit LockGuard(LockType& lock
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
: lockRef_(lock)
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
lockRef_.lock();
}
~LockGuard() {
lockRef_.unlock();
}
};
} // namespace js
#endif // js_LockGuard_h

40
js/src/ds/SpinLock.h Normal file
View File

@ -0,0 +1,40 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef js_SpinLock_h
#define js_SpinLock_h
#include "mozilla/Atomics.h"
#include "ds/LockGuard.h"
namespace js {
// A trivial spin-lock implementation. Extremely fast when rarely-contended.
class SpinLock
{
mozilla::Atomic<bool, mozilla::ReleaseAcquire> locked_;
public:
SpinLock() : locked_(false) {}
void lock() {
do {
while (locked_)
; // Spin until the lock seems free.
} while (!locked_.compareExchange(false, true)); // Atomically take the lock.
}
void unlock() {
locked_ = false;
}
};
using AutoSpinLock = LockGuard<SpinLock>;
} // namespace js
#endif // js_SpinLock_h

View File

@ -654,6 +654,11 @@ class GCRuntime
size_t maxMallocBytesAllocated() { return maxMallocBytes; }
uint64_t nextCellUniqueId() {
MOZ_ASSERT(nextCellUniqueId_ > 0);
return nextCellUniqueId_++;
}
public:
// Internal public interface
js::gc::State state() const { return incrementalState; }
@ -1008,6 +1013,9 @@ class GCRuntime
size_t maxMallocBytes;
// An incrementing id used to assign unique ids to cells that require one.
uint64_t nextCellUniqueId_;
/*
* Number of the committed arenas in all GC chunks including empty chunks.
*/

View File

@ -293,6 +293,9 @@ class TenuredCell : public Cell
#endif
};
/* Cells are aligned to CellShift, so the largest tagged null pointer is: */
const uintptr_t LargestTaggedNullCellPointer = (1 << CellShift) - 1;
/*
* The mark bitmap has one bit per each GC cell. For multi-cell GC things this
* wastes space but allows to avoid expensive devisions by thing's size when
@ -804,6 +807,17 @@ ArenaHeader::getThingSize() const
*/
struct ChunkTrailer
{
/* Construct a Nursery ChunkTrailer. */
ChunkTrailer(JSRuntime* rt, StoreBuffer* sb)
: location(gc::ChunkLocationBitNursery), storeBuffer(sb), runtime(rt)
{}
/* Construct a Tenured heap ChunkTrailer. */
explicit ChunkTrailer(JSRuntime* rt)
: location(gc::ChunkLocationBitTenuredHeap), storeBuffer(nullptr), runtime(rt)
{}
public:
/* The index the chunk in the nursery, or LocationTenuredHeap. */
uint32_t location;
uint32_t padding;
@ -811,11 +825,12 @@ struct ChunkTrailer
/* The store buffer for writes to things in this chunk or nullptr. */
StoreBuffer* storeBuffer;
/* This provides quick access to the runtime from absolutely anywhere. */
JSRuntime* runtime;
};
static_assert(sizeof(ChunkTrailer) == 2 * sizeof(uintptr_t) + sizeof(uint64_t),
"ChunkTrailer size is incorrect.");
static_assert(sizeof(ChunkTrailer) == ChunkTrailerSize,
"ChunkTrailer size must match the API defined size.");
/* The chunk header (located at the end of the chunk to preserve arena alignment). */
struct ChunkInfo
@ -1004,13 +1019,16 @@ struct Chunk
return reinterpret_cast<Chunk*>(addr);
}
static bool withinArenasRange(uintptr_t addr) {
static bool withinValidRange(uintptr_t addr) {
uintptr_t offset = addr & ChunkMask;
return offset < ArenasPerChunk * ArenaSize;
return Chunk::fromAddress(addr)->isNurseryChunk()
? offset < ChunkSize - sizeof(ChunkTrailer)
: offset < ArenasPerChunk * ArenaSize;
}
static size_t arenaIndex(uintptr_t addr) {
MOZ_ASSERT(withinArenasRange(addr));
MOZ_ASSERT(!Chunk::fromAddress(addr)->isNurseryChunk());
MOZ_ASSERT(withinValidRange(addr));
return (addr & ChunkMask) >> ArenaShift;
}
@ -1028,6 +1046,10 @@ struct Chunk
return info.numArenasFree != 0;
}
bool isNurseryChunk() const {
return info.trailer.storeBuffer;
}
ArenaHeader* allocateArena(JSRuntime* rt, JS::Zone* zone, AllocKind kind,
const AutoLockGC& lock);
@ -1127,7 +1149,7 @@ ArenaHeader::address() const
uintptr_t addr = reinterpret_cast<uintptr_t>(this);
MOZ_ASSERT(addr);
MOZ_ASSERT(!(addr & ArenaMask));
MOZ_ASSERT(Chunk::withinArenasRange(addr));
MOZ_ASSERT(Chunk::withinValidRange(addr));
return addr;
}
@ -1296,7 +1318,7 @@ Cell::address() const
{
uintptr_t addr = uintptr_t(this);
MOZ_ASSERT(addr % CellSize == 0);
MOZ_ASSERT(Chunk::withinArenasRange(addr));
MOZ_ASSERT(Chunk::withinValidRange(addr));
return addr;
}

View File

@ -2140,7 +2140,13 @@ js::TenuringTracer::moveObjectToTenured(JSObject* dst, JSObject* src, AllocKind
if (src->is<ArrayObject>())
tenuredSize = srcSize = sizeof(NativeObject);
// Copy the Cell contents.
js_memcpy(dst, src, srcSize);
// Move any hash code attached to the object.
src->zone()->transferUniqueId(dst, src);
// Move the slots and elements, if we need to.
if (src->isNative()) {
NativeObject* ndst = &dst->as<NativeObject>();
NativeObject* nsrc = &src->as<NativeObject>();

View File

@ -426,7 +426,7 @@ ToMarkable(Cell* cell)
MOZ_ALWAYS_INLINE bool
IsNullTaggedPointer(void* p)
{
return uintptr_t(p) < 32;
return uintptr_t(p) <= LargestTaggedNullCellPointer;
}
// HashKeyRef represents a reference to a HashMap key. This should normally

View File

@ -64,6 +64,9 @@ js::Nursery::init(uint32_t maxNurseryBytes)
if (!mallocedBuffers.init())
return false;
if (!cellsWithUid_.init())
return false;
void* heap = MapAlignedPages(nurserySize(), Alignment);
if (!heap)
return false;
@ -648,6 +651,16 @@ js::Nursery::waitBackgroundFreeEnd()
void
js::Nursery::sweep()
{
/* Sweep unique id's in all in-use chunks. */
for (CellsWithUniqueIdSet::Enum e(cellsWithUid_); !e.empty(); e.popFront()) {
JSObject* obj = static_cast<JSObject*>(e.front());
if (!IsForwarded(obj))
obj->zone()->removeUniqueId(obj);
else
MOZ_ASSERT(Forwarded(obj)->zone()->hasUniqueId(Forwarded(obj)));
}
cellsWithUid_.clear();
#ifdef JS_GC_ZEAL
/* Poison the nursery contents so touching a freed object will crash. */
JS_POISON((void*)start(), JS_SWEPT_NURSERY_PATTERN, nurserySize());
@ -665,10 +678,8 @@ js::Nursery::sweep()
{
#ifdef JS_CRASH_DIAGNOSTICS
JS_POISON((void*)start(), JS_SWEPT_NURSERY_PATTERN, allocationEnd() - start());
for (int i = 0; i < numActiveChunks_; ++i) {
chunk(i).trailer.location = gc::ChunkLocationBitNursery;
chunk(i).trailer.runtime = runtime();
}
for (int i = 0; i < numActiveChunks_; ++i)
initChunk(i);
#endif
setCurrentChunk(0);
}

View File

@ -182,6 +182,14 @@ class Nursery
void waitBackgroundFreeEnd();
bool addedUniqueIdToCell(gc::Cell* cell) {
if (!IsInsideNursery(cell) || !isEnabled())
return true;
MOZ_ASSERT(cellsWithUid_.initialized());
MOZ_ASSERT(!cellsWithUid_.has(cell));
return cellsWithUid_.put(cell);
}
size_t sizeOfHeapCommitted() const {
return numActiveChunks_ * gc::ChunkSize;
}
@ -265,6 +273,21 @@ class Nursery
typedef HashMap<void*, void*, PointerHasher<void*, 1>, SystemAllocPolicy> ForwardedBufferMap;
ForwardedBufferMap forwardedBuffers;
/*
* When we assign a unique id to cell in the nursery, that almost always
* means that the cell will be in a hash table, and thus, held live,
* automatically moving the uid from the nursery to its new home in
* tenured. It is possible, if rare, for an object that acquired a uid to
* be dead before the next collection, in which case we need to know to
* remove it when we sweep.
*
* Note: we store the pointers as Cell* here, resulting in an ugly cast in
* sweep. This is because this structure is used to help implement
* stable object hashing and we have to break the cycle somehow.
*/
using CellsWithUniqueIdSet = HashSet<gc::Cell*, PointerHasher<gc::Cell*, 3>, SystemAllocPolicy>;
CellsWithUniqueIdSet cellsWithUid_;
/* The maximum number of bytes allowed to reside in nursery buffers. */
static const size_t MaxNurseryBufferSize = 1024;
@ -286,10 +309,8 @@ class Nursery
}
MOZ_ALWAYS_INLINE void initChunk(int chunkno) {
NurseryChunkLayout& c = chunk(chunkno);
c.trailer.storeBuffer = JS::shadow::Runtime::asShadowRuntime(runtime())->gcStoreBufferPtr();
c.trailer.location = gc::ChunkLocationBitNursery;
c.trailer.runtime = runtime();
gc::StoreBuffer* sb = JS::shadow::Runtime::asShadowRuntime(runtime())->gcStoreBufferPtr();
new (&chunk(chunkno).trailer) gc::ChunkTrailer(runtime(), sb);
}
MOZ_ALWAYS_INLINE void setCurrentChunk(int chunkno) {

View File

@ -13,9 +13,12 @@
#include "jscntxt.h"
#include "ds/SpinLock.h"
#include "ds/SplayTree.h"
#include "gc/FindSCCs.h"
#include "gc/GCRuntime.h"
#include "js/TracingAPI.h"
#include "vm/MallocProvider.h"
#include "vm/TypeInference.h"
namespace js {
@ -58,6 +61,68 @@ class ZoneHeapThreshold
const GCSchedulingTunables& tunables);
};
// Maps a Cell* to a unique, 64bit id. This implementation uses a SplayTree
// instead of a HashMap. While a SplayTree has worse worst-case performance,
// the typical usage of storing stable hashmap keys tends to cluster with
// extremely frequent lookups of the same key repeatedly. Thus, we typically
// get very close to HashMap-like O(1) performance with much denser storage.
class UniqueIdMap
{
struct Pair {
uint64_t uniqueId;
Cell* key;
public:
Pair(Cell* cell, uint64_t uid) : uniqueId(uid), key(cell) {}
Pair(const Pair& other) : uniqueId(other.uniqueId), key(other.key) {}
static ptrdiff_t compare(const Pair& a, const Pair& b) {
return b.key - a.key;
}
};
// Use a relatively small chunk, as many users will not have many entries.
const size_t AllocChunkSize = mozilla::RoundUpPow2(16 * sizeof(Pair));
LifoAlloc alloc;
SplayTree<Pair, Pair> map;
public:
UniqueIdMap() : alloc(AllocChunkSize), map(&alloc) {}
// Returns true if the map is empty.
bool isEmpty() { return map.empty(); }
// Return true if the cell is present in the map.
bool has(Cell* cell) {
return map.maybeLookup(Pair(cell, 0));
}
// Returns whether the cell is present or not. If true, sets the uid.
bool lookup(Cell* cell, uint64_t* uidp) {
Pair tmp(nullptr, 0);
if (!map.contains(Pair(cell, 0), &tmp))
return false;
MOZ_ASSERT(tmp.key == cell);
MOZ_ASSERT(tmp.uniqueId > 0);
*uidp = tmp.uniqueId;
return true;
}
// Inserts a value; returns false on OOM.
bool put(Cell* cell, uint64_t uid) {
MOZ_ASSERT(uid > 0);
return map.insert(Pair(cell, uid));
}
// Remove the given key from the map.
void remove(Cell* cell) {
map.remove(Pair(cell, 0));
}
};
extern uint64_t NextCellUniqueId(JSRuntime* rt);
} // namespace gc
} // namespace js
@ -250,6 +315,13 @@ struct Zone : public JS::shadow::Zone,
return isOnList();
}
// Side map for storing a unique ids for cells, independent of address.
js::gc::UniqueIdMap uniqueIds_;
// Guards the uniqueIds_ map as it is accessed directly from the background
// sweeping thread. This uses a spinlock, since it is normally uncontended.
js::SpinLock uniqueIdsLock_;
public:
bool hasDebuggers() const { return debuggers && debuggers->length(); }
DebuggerVector* getDebuggers() const { return debuggers; }
@ -318,6 +390,74 @@ struct Zone : public JS::shadow::Zone,
mozilla::DebugOnly<unsigned> gcLastZoneGroupIndex;
// Creates a HashNumber based on getUniqueId. Returns false on OOM.
bool getHashCode(js::gc::Cell* cell, js::HashNumber* hashp) {
uint64_t uid;
if (!getUniqueId(cell, &uid))
return false;
*hashp = (uid >> 32) & (uid & 0xFFFFFFFF);
return true;
}
// Puts an existing UID in |uidp|, or creates a new UID for this Cell and
// puts that into |uidp|. Returns false on OOM.
bool getUniqueId(js::gc::Cell* cell, uint64_t* uidp) {
MOZ_ASSERT(uidp);
js::AutoSpinLock lock(uniqueIdsLock_);
// Get an existing uid, if one has been set.
if (uniqueIds_.lookup(cell, uidp))
return true;
// Set a new uid on the cell.
*uidp = js::gc::NextCellUniqueId(runtimeFromAnyThread());
if (!uniqueIds_.put(cell, *uidp))
return false;
// If the cell was in the nursery, hopefully unlikely, then we need to
// tell the nursery about it so that it can sweep the uid if the thing
// does not get tenured.
if (!runtimeFromAnyThread()->gc.nursery.addedUniqueIdToCell(cell))
js::CrashAtUnhandlableOOM("failed to allocate tracking data for a nursery uid");
return true;
}
// Return true if this cell has a UID associated with it.
bool hasUniqueId(js::gc::Cell* cell) {
js::AutoSpinLock lock(uniqueIdsLock_);
uint64_t tmp;
return uniqueIds_.lookup(cell, &tmp);
}
// Transfer an id from another cell. This must only be called on behalf of a
// moving GC. This method is infallible.
void transferUniqueId(js::gc::Cell* tgt, js::gc::Cell* src) {
MOZ_ASSERT(src != tgt);
MOZ_ASSERT(!IsInsideNursery(tgt));
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtimeFromMainThread()));
js::AutoSpinLock lock(uniqueIdsLock_);
// Return early if we do not have a UID set on the source.
uint64_t uid = 0;
if (!uniqueIds_.lookup(src, &uid))
return;
// Remove from the source first to guarantee that at least one node
// will be available in the free pool. This allows us to avoid OOM
// in all cases when transfering uids.
uniqueIds_.remove(src);
MOZ_ASSERT(uid > 0);
mozilla::DebugOnly<bool> ok = uniqueIds_.put(tgt, uid);
MOZ_ASSERT(ok);
}
// Remove any unique id associated with this Cell.
void removeUniqueId(js::gc::Cell* cell) {
js::AutoSpinLock lock(uniqueIdsLock_);
uniqueIds_.remove(cell);
}
private:
js::jit::JitZone* jitZone_;

View File

@ -42,6 +42,7 @@ UNIFIED_SOURCES += [
'testGCMarking.cpp',
'testGCOutOfMemory.cpp',
'testGCStoreBufferRemoval.cpp',
'testGCUniqueId.cpp',
'testGetPropertyDescriptor.cpp',
'testHashTable.cpp',
'testIndexToString.cpp',

View File

@ -0,0 +1,120 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*/
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "gc/GCInternals.h"
#include "gc/Zone.h"
static void
MinimizeHeap(JSRuntime* rt)
{
// The second collection is to force us to wait for the background
// sweeping that the first GC started to finish.
JS_GC(rt);
JS_GC(rt);
js::gc::AutoFinishGC finish(rt);
}
BEGIN_TEST(testGCUID)
{
#ifdef JS_GC_ZEAL
AutoLeaveZeal nozeal(cx);
#endif /* JS_GC_ZEAL */
uint64_t uid = 0;
uint64_t tmp = 0;
// Ensure the heap is as minimal as it can get.
MinimizeHeap(rt);
JS::RootedObject obj(cx, JS_NewPlainObject(cx));
uintptr_t nurseryAddr = uintptr_t(obj.get());
CHECK(obj);
CHECK(js::gc::IsInsideNursery(obj));
// Do not start with an ID.
CHECK(!obj->zone()->hasUniqueId(obj));
// Ensure we can get a new UID.
CHECK(obj->zone()->getUniqueId(obj, &uid));
CHECK(uid > js::gc::LargestTaggedNullCellPointer);
// We should now have an id.
CHECK(obj->zone()->hasUniqueId(obj));
// Calling again should get us the same thing.
CHECK(obj->zone()->getUniqueId(obj, &tmp));
CHECK(uid == tmp);
// Tenure the thing and check that the UID moved with it.
MinimizeHeap(rt);
uintptr_t tenuredAddr = uintptr_t(obj.get());
CHECK(tenuredAddr != nurseryAddr);
CHECK(!js::gc::IsInsideNursery(obj));
CHECK(obj->zone()->hasUniqueId(obj));
CHECK(obj->zone()->getUniqueId(obj, &tmp));
CHECK(uid == tmp);
// Allocate a new nursery thing in the same location and check that we
// removed the prior uid that was attached to the location.
obj = JS_NewPlainObject(cx);
CHECK(obj);
CHECK(uintptr_t(obj.get()) == nurseryAddr);
CHECK(!obj->zone()->hasUniqueId(obj));
// Try to get another tenured object in the same location and check that
// the uid was removed correctly.
obj = nullptr;
MinimizeHeap(rt);
obj = JS_NewPlainObject(cx);
MinimizeHeap(rt);
CHECK(uintptr_t(obj.get()) == tenuredAddr);
CHECK(!obj->zone()->hasUniqueId(obj));
CHECK(obj->zone()->getUniqueId(obj, &tmp));
CHECK(uid != tmp);
uid = tmp;
// Allocate a few arenas worth of objects to ensure we get some compaction.
const static size_t N = 2049;
using ObjectVector = js::TraceableVector<JSObject*>;
JS::Rooted<ObjectVector> vec(cx, ObjectVector(cx));
for (size_t i = 0; i < N; ++i) {
obj = JS_NewPlainObject(cx);
CHECK(obj);
CHECK(vec.append(obj));
}
// Transfer our vector to tenured if it isn't there already.
MinimizeHeap(rt);
// Tear holes in the heap by unrooting the even objects and collecting.
JS::Rooted<ObjectVector> vec2(cx, ObjectVector(cx));
for (size_t i = 0; i < N; ++i) {
if (i % 2 == 1)
vec2.append(vec[i]);
}
vec.clear();
MinimizeHeap(rt);
// Grab the last object in the vector as our object of interest.
obj = vec2.back();
CHECK(obj);
tenuredAddr = uintptr_t(obj.get());
CHECK(obj->zone()->getUniqueId(obj, &uid));
// Force a compaction to move the object and check that the uid moved to
// the new tenured heap location.
JS::PrepareForFullGC(rt);
JS::GCForReason(rt, GC_SHRINK, JS::gcreason::API);
MinimizeHeap(rt);
CHECK(uintptr_t(obj.get()) != tenuredAddr);
CHECK(obj->zone()->hasUniqueId(obj));
CHECK(obj->zone()->getUniqueId(obj, &tmp));
CHECK(uid == tmp);
return true;
}
END_TEST(testGCUID)

View File

@ -824,9 +824,7 @@ Chunk::init(JSRuntime* rt)
/* Initialize the chunk info. */
info.init();
info.trailer.storeBuffer = nullptr;
info.trailer.location = ChunkLocationBitTenuredHeap;
info.trailer.runtime = rt;
new (&info.trailer) ChunkTrailer(rt);
/* The rest of info fields are initialized in pickChunk. */
}
@ -1100,6 +1098,7 @@ GCRuntime::GCRuntime(JSRuntime* rt) :
marker(rt),
usage(nullptr),
maxMallocBytes(0),
nextCellUniqueId_(LargestTaggedNullCellPointer + 1), // Ensure disjoint from null tagged pointers.
numArenasFreeCommitted(0),
verifyPreData(nullptr),
chunkAllocationSinceLastGC(false),
@ -2022,6 +2021,9 @@ RelocateCell(Zone* zone, TenuredCell* src, AllocKind thingKind, size_t thingSize
// Copy source cell contents to destination.
memcpy(dst, src, thingSize);
// Move any uid attached to the object.
src->zone()->transferUniqueId(dst, src);
if (IsObjectAllocKind(thingKind)) {
JSObject* srcObj = static_cast<JSObject*>(static_cast<Cell*>(src));
JSObject* dstObj = static_cast<JSObject*>(static_cast<Cell*>(dst));
@ -7315,6 +7317,12 @@ JS::IsGenerationalGCEnabled(JSRuntime* rt)
return rt->gc.isGenerationalGCEnabled();
}
uint64_t
js::gc::NextCellUniqueId(JSRuntime* rt)
{
return rt->gc.nextCellUniqueId();
}
namespace js {
namespace gc {
namespace MemInfo {

View File

@ -75,6 +75,11 @@ JSObject::finalize(js::FreeOp* fop)
MOZ_ASSERT(CurrentThreadCanAccessRuntime(fop->runtime()));
}
#endif
// Remove any UID attached to this object.
if (zoneFromAnyThread()->hasUniqueId(this))
zoneFromAnyThread()->removeUniqueId(this);
const js::Class* clasp = getClass();
if (clasp->finalize)
clasp->finalize(fop, this);