Trigger the garbage collector when allocating InternalClass objects
As we check the icAllocator's slots on shouldRunGC() we should also check shouldRunGC() when adding slots. Otherwise we might never run the GC when only allocating InternalClasses. In addition, account for the "unmanaged" size of the PropertyAttributes that are part of the InternalClass objects. Those can be large. In cases where an excessive number of large InternalClass objects is created the garbage collector is now invoked frequently, which costs a significant number of CPU cycles, but prevents the memory usage from growing indefinitely. Task-number: QTBUG-58559 Change-Id: Icf102cb6100f6dba212b8bffe1c178897880eda0 Reviewed-by: Lars Knoll <lars.knoll@qt.io>
This commit is contained in:
parent
156066d475
commit
d8110b53ed
|
@ -44,6 +44,7 @@
|
|||
#include "qv4object_p.h"
|
||||
#include "qv4identifiertable_p.h"
|
||||
#include "qv4value_p.h"
|
||||
#include "qv4mm_p.h"
|
||||
|
||||
QT_BEGIN_NAMESPACE
|
||||
|
||||
|
@ -204,7 +205,62 @@ void SharedInternalClassDataPrivate<PropertyKey>::mark(MarkStack *s)
|
|||
data->mark(s);
|
||||
}
|
||||
|
||||
SharedInternalClassDataPrivate<PropertyAttributes>::SharedInternalClassDataPrivate(
|
||||
const SharedInternalClassDataPrivate<PropertyAttributes> &other, uint pos,
|
||||
PropertyAttributes value)
|
||||
: refcount(1),
|
||||
m_alloc(pos + 8),
|
||||
m_size(pos + 1),
|
||||
m_engine(other.m_engine)
|
||||
{
|
||||
m_engine->memoryManager->changeUnmanagedHeapSizeUsage(m_alloc * sizeof(PropertyAttributes));
|
||||
data = new PropertyAttributes[m_alloc];
|
||||
if (other.data)
|
||||
memcpy(data, other.data, (m_size - 1) * sizeof(PropertyAttributes));
|
||||
data[pos] = value;
|
||||
}
|
||||
|
||||
SharedInternalClassDataPrivate<PropertyAttributes>::SharedInternalClassDataPrivate(
|
||||
const SharedInternalClassDataPrivate<PropertyAttributes> &other)
|
||||
: refcount(1),
|
||||
m_alloc(other.m_alloc),
|
||||
m_size(other.m_size),
|
||||
m_engine(other.m_engine)
|
||||
{
|
||||
if (m_alloc) {
|
||||
m_engine->memoryManager->changeUnmanagedHeapSizeUsage(m_alloc * sizeof(PropertyAttributes));
|
||||
data = new PropertyAttributes[m_alloc];
|
||||
memcpy(data, other.data, m_size*sizeof(PropertyAttributes));
|
||||
} else {
|
||||
data = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
SharedInternalClassDataPrivate<PropertyAttributes>::~SharedInternalClassDataPrivate()
|
||||
{
|
||||
m_engine->memoryManager->changeUnmanagedHeapSizeUsage(
|
||||
-qptrdiff(m_alloc * sizeof(PropertyAttributes)));
|
||||
delete [] data;
|
||||
}
|
||||
|
||||
void SharedInternalClassDataPrivate<PropertyAttributes>::grow() {
|
||||
if (!m_alloc) {
|
||||
m_alloc = 4;
|
||||
m_engine->memoryManager->changeUnmanagedHeapSizeUsage(
|
||||
2 * m_alloc * sizeof(PropertyAttributes));
|
||||
} else {
|
||||
m_engine->memoryManager->changeUnmanagedHeapSizeUsage(
|
||||
m_alloc * sizeof(PropertyAttributes));
|
||||
}
|
||||
|
||||
auto *n = new PropertyAttributes[m_alloc * 2];
|
||||
if (data) {
|
||||
memcpy(n, data, m_alloc*sizeof(PropertyAttributes));
|
||||
delete [] data;
|
||||
}
|
||||
data = n;
|
||||
m_alloc *= 2;
|
||||
}
|
||||
|
||||
namespace Heap {
|
||||
|
||||
|
|
|
@ -149,55 +149,31 @@ inline PropertyHash::Entry *PropertyHash::lookup(PropertyKey identifier) const
|
|||
}
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
struct SharedInternalClassDataPrivate {
|
||||
SharedInternalClassDataPrivate(ExecutionEngine *)
|
||||
template<class T>
|
||||
struct SharedInternalClassDataPrivate {};
|
||||
|
||||
template<>
|
||||
struct SharedInternalClassDataPrivate<PropertyAttributes> {
|
||||
SharedInternalClassDataPrivate(ExecutionEngine *engine)
|
||||
: refcount(1),
|
||||
m_alloc(0),
|
||||
m_size(0),
|
||||
data(nullptr)
|
||||
data(nullptr),
|
||||
m_engine(engine)
|
||||
{ }
|
||||
SharedInternalClassDataPrivate(const SharedInternalClassDataPrivate &other)
|
||||
: refcount(1),
|
||||
m_alloc(other.m_alloc),
|
||||
m_size(other.m_size)
|
||||
{
|
||||
if (m_alloc) {
|
||||
data = new T[m_alloc];
|
||||
memcpy(data, other.data, m_size*sizeof(T));
|
||||
}
|
||||
}
|
||||
SharedInternalClassDataPrivate(const SharedInternalClassDataPrivate &other, uint pos, T value)
|
||||
: refcount(1),
|
||||
m_alloc(pos + 8),
|
||||
m_size(pos + 1)
|
||||
{
|
||||
data = new T[m_alloc];
|
||||
if (other.data)
|
||||
memcpy(data, other.data, (m_size - 1)*sizeof(T));
|
||||
data[pos] = value;
|
||||
}
|
||||
~SharedInternalClassDataPrivate() { delete [] data; }
|
||||
SharedInternalClassDataPrivate(const SharedInternalClassDataPrivate<PropertyAttributes> &other);
|
||||
SharedInternalClassDataPrivate(const SharedInternalClassDataPrivate<PropertyAttributes> &other,
|
||||
uint pos, PropertyAttributes value);
|
||||
~SharedInternalClassDataPrivate();
|
||||
|
||||
|
||||
void grow() {
|
||||
if (!m_alloc)
|
||||
m_alloc = 4;
|
||||
T *n = new T[m_alloc * 2];
|
||||
if (data) {
|
||||
memcpy(n, data, m_alloc*sizeof(T));
|
||||
delete [] data;
|
||||
}
|
||||
data = n;
|
||||
m_alloc *= 2;
|
||||
}
|
||||
void grow();
|
||||
|
||||
uint alloc() const { return m_alloc; }
|
||||
uint size() const { return m_size; }
|
||||
void setSize(uint s) { m_size = s; }
|
||||
|
||||
T at(uint i) { Q_ASSERT(data && i < m_alloc); return data[i]; }
|
||||
void set(uint i, T t) { Q_ASSERT(data && i < m_alloc); data[i] = t; }
|
||||
PropertyAttributes at(uint i) { Q_ASSERT(data && i < m_alloc); return data[i]; }
|
||||
void set(uint i, PropertyAttributes t) { Q_ASSERT(data && i < m_alloc); data[i] = t; }
|
||||
|
||||
void mark(MarkStack *) {}
|
||||
|
||||
|
@ -205,7 +181,8 @@ struct SharedInternalClassDataPrivate {
|
|||
private:
|
||||
uint m_alloc;
|
||||
uint m_size;
|
||||
T *data;
|
||||
PropertyAttributes *data;
|
||||
ExecutionEngine *m_engine;
|
||||
};
|
||||
|
||||
template<>
|
||||
|
|
|
@ -93,8 +93,6 @@
|
|||
#include <pthread_np.h>
|
||||
#endif
|
||||
|
||||
#define MIN_UNMANAGED_HEAPSIZE_GC_LIMIT std::size_t(128 * 1024)
|
||||
|
||||
Q_LOGGING_CATEGORY(lcGcStats, "qt.qml.gc.statistics")
|
||||
Q_DECLARE_LOGGING_CATEGORY(lcGcStats)
|
||||
Q_LOGGING_CATEGORY(lcGcAllocatorStats, "qt.qml.gc.allocatorStats")
|
||||
|
@ -759,7 +757,7 @@ MemoryManager::MemoryManager(ExecutionEngine *engine)
|
|||
, hugeItemAllocator(chunkAllocator, engine)
|
||||
, m_persistentValues(new PersistentValueStorage(engine))
|
||||
, m_weakValues(new PersistentValueStorage(engine))
|
||||
, unmanagedHeapSizeGCLimit(MIN_UNMANAGED_HEAPSIZE_GC_LIMIT)
|
||||
, unmanagedHeapSizeGCLimit(MinUnmanagedHeapSizeGCLimit)
|
||||
, aggressiveGC(!qEnvironmentVariableIsEmpty("QV4_MM_AGGRESSIVE_GC"))
|
||||
, gcStats(lcGcStats().isDebugEnabled())
|
||||
, gcCollectorStats(lcGcAllocatorStats().isDebugEnabled())
|
||||
|
@ -779,35 +777,9 @@ Heap::Base *MemoryManager::allocString(std::size_t unmanagedSize)
|
|||
lastAllocRequestedSlots = stringSize >> Chunk::SlotSizeShift;
|
||||
++allocationCount;
|
||||
#endif
|
||||
|
||||
bool didGCRun = false;
|
||||
if (aggressiveGC) {
|
||||
runGC();
|
||||
didGCRun = true;
|
||||
}
|
||||
|
||||
unmanagedHeapSize += unmanagedSize;
|
||||
if (unmanagedHeapSize > unmanagedHeapSizeGCLimit) {
|
||||
if (!didGCRun)
|
||||
runGC();
|
||||
|
||||
if (3*unmanagedHeapSizeGCLimit <= 4*unmanagedHeapSize)
|
||||
// more than 75% full, raise limit
|
||||
unmanagedHeapSizeGCLimit = std::max(unmanagedHeapSizeGCLimit, unmanagedHeapSize) * 2;
|
||||
else if (unmanagedHeapSize * 4 <= unmanagedHeapSizeGCLimit)
|
||||
// less than 25% full, lower limit
|
||||
unmanagedHeapSizeGCLimit = qMax(MIN_UNMANAGED_HEAPSIZE_GC_LIMIT, unmanagedHeapSizeGCLimit/2);
|
||||
didGCRun = true;
|
||||
}
|
||||
|
||||
HeapItem *m = blockAllocator.allocate(stringSize);
|
||||
if (!m) {
|
||||
if (!didGCRun && shouldRunGC())
|
||||
runGC();
|
||||
m = blockAllocator.allocate(stringSize, true);
|
||||
}
|
||||
|
||||
// qDebug() << "allocated string" << m;
|
||||
HeapItem *m = allocate(&blockAllocator, stringSize);
|
||||
memset(m, 0, stringSize);
|
||||
return *m;
|
||||
}
|
||||
|
@ -819,32 +791,11 @@ Heap::Base *MemoryManager::allocData(std::size_t size)
|
|||
++allocationCount;
|
||||
#endif
|
||||
|
||||
bool didRunGC = false;
|
||||
if (aggressiveGC) {
|
||||
runGC();
|
||||
didRunGC = true;
|
||||
}
|
||||
|
||||
Q_ASSERT(size >= Chunk::SlotSize);
|
||||
Q_ASSERT(size % Chunk::SlotSize == 0);
|
||||
|
||||
// qDebug() << "unmanagedHeapSize:" << unmanagedHeapSize << "limit:" << unmanagedHeapSizeGCLimit << "unmanagedSize:" << unmanagedSize;
|
||||
|
||||
if (size > Chunk::DataSize) {
|
||||
HeapItem *h = hugeItemAllocator.allocate(size);
|
||||
// qDebug() << "allocating huge item" << h;
|
||||
return *h;
|
||||
}
|
||||
|
||||
HeapItem *m = blockAllocator.allocate(size);
|
||||
if (!m) {
|
||||
if (!didRunGC && shouldRunGC())
|
||||
runGC();
|
||||
m = blockAllocator.allocate(size, true);
|
||||
}
|
||||
|
||||
HeapItem *m = allocate(&blockAllocator, size);
|
||||
memset(m, 0, size);
|
||||
// qDebug() << "allocating data" << m;
|
||||
return *m;
|
||||
}
|
||||
|
||||
|
|
|
@ -264,13 +264,13 @@ public:
|
|||
size_t getLargeItemsMem() const;
|
||||
|
||||
// called when a JS object grows itself. Specifically: Heap::String::append
|
||||
// and InternalClassDataPrivate<PropertyAttributes>.
|
||||
void changeUnmanagedHeapSizeUsage(qptrdiff delta) { unmanagedHeapSize += delta; }
|
||||
|
||||
template<typename ManagedType>
|
||||
typename ManagedType::Data *allocIC()
|
||||
{
|
||||
size_t size = align(sizeof(typename ManagedType::Data));
|
||||
Heap::Base *b = *icAllocator.allocate(size, true);
|
||||
Heap::Base *b = *allocate(&icAllocator, align(sizeof(typename ManagedType::Data)));
|
||||
return static_cast<typename ManagedType::Data *>(b);
|
||||
}
|
||||
|
||||
|
@ -284,12 +284,52 @@ protected:
|
|||
Heap::Object *allocObjectWithMemberData(const QV4::VTable *vtable, uint nMembers);
|
||||
|
||||
private:
|
||||
enum {
|
||||
MinUnmanagedHeapSizeGCLimit = 128 * 1024
|
||||
};
|
||||
|
||||
void collectFromJSStack(MarkStack *markStack) const;
|
||||
void mark();
|
||||
void sweep(bool lastSweep = false, ClassDestroyStatsCallback classCountPtr = nullptr);
|
||||
bool shouldRunGC() const;
|
||||
void collectRoots(MarkStack *markStack);
|
||||
|
||||
HeapItem *allocate(BlockAllocator *allocator, std::size_t size)
|
||||
{
|
||||
bool didGCRun = false;
|
||||
if (aggressiveGC) {
|
||||
runGC();
|
||||
didGCRun = true;
|
||||
}
|
||||
|
||||
if (unmanagedHeapSize > unmanagedHeapSizeGCLimit) {
|
||||
if (!didGCRun)
|
||||
runGC();
|
||||
|
||||
if (3*unmanagedHeapSizeGCLimit <= 4 * unmanagedHeapSize) {
|
||||
// more than 75% full, raise limit
|
||||
unmanagedHeapSizeGCLimit = std::max(unmanagedHeapSizeGCLimit,
|
||||
unmanagedHeapSize) * 2;
|
||||
} else if (unmanagedHeapSize * 4 <= unmanagedHeapSizeGCLimit) {
|
||||
// less than 25% full, lower limit
|
||||
unmanagedHeapSizeGCLimit = qMax(std::size_t(MinUnmanagedHeapSizeGCLimit),
|
||||
unmanagedHeapSizeGCLimit/2);
|
||||
}
|
||||
didGCRun = true;
|
||||
}
|
||||
|
||||
if (size > Chunk::DataSize)
|
||||
return hugeItemAllocator.allocate(size);
|
||||
|
||||
if (HeapItem *m = allocator->allocate(size))
|
||||
return m;
|
||||
|
||||
if (!didGCRun && shouldRunGC())
|
||||
runGC();
|
||||
|
||||
return allocator->allocate(size, true);
|
||||
}
|
||||
|
||||
public:
|
||||
QV4::ExecutionEngine *engine;
|
||||
ChunkAllocator *chunkAllocator;
|
||||
|
|
|
@ -112,26 +112,40 @@ void tst_qv4mm::accessParentOnDestruction()
|
|||
|
||||
void tst_qv4mm::clearICParent()
|
||||
{
|
||||
QJSEngine engine;
|
||||
QJSValue value = engine.evaluate(
|
||||
"(function() {\n"
|
||||
" var test = Object.create(null);\n"
|
||||
" for (var i = 0; i < 100; i++)\n"
|
||||
" test[(\"key_\"+i)] = true;\n"
|
||||
" for (var i = 0; i < 100; i++)\n"
|
||||
" delete test[\"key_\" + i];\n"
|
||||
" return test;\n"
|
||||
"})();"
|
||||
);
|
||||
engine.collectGarbage();
|
||||
QV4::Value *v4Value = QJSValuePrivate::getValue(&value);
|
||||
QVERIFY(v4Value);
|
||||
QV4::Heap::Object *v4Object = v4Value->toObject(engine.handle());
|
||||
QVERIFY(v4Object);
|
||||
QV4::ExecutionEngine engine;
|
||||
QV4::Scope scope(engine.rootContext());
|
||||
QV4::ScopedObject object(scope, engine.newObject());
|
||||
|
||||
// It should garbage collect the parents of the internalClass,
|
||||
// as those aren't used anywhere else.
|
||||
QCOMPARE(v4Object->internalClass->parent, nullptr);
|
||||
// Keep identifiers in a separate array so that we don't have to allocate them in the loop that
|
||||
// should test the GC on InternalClass allocations.
|
||||
QV4::ScopedArrayObject identifiers(scope, engine.newArrayObject());
|
||||
for (uint i = 0; i < 16 * 1024; ++i) {
|
||||
QV4::Scope scope(&engine);
|
||||
QV4::ScopedString s(scope);
|
||||
s = engine.newIdentifier(QString::fromLatin1("key%1").arg(i));
|
||||
identifiers->push_back(s);
|
||||
|
||||
QV4::ScopedValue v(scope);
|
||||
v->setDouble(i);
|
||||
object->insertMember(s, v);
|
||||
}
|
||||
|
||||
// When allocating the InternalClass objects required for deleting properties, the GC should
|
||||
// eventually run and remove all but the last two.
|
||||
// If we ever manage to avoid allocating the InternalClasses in the first place we will need
|
||||
// to change this test.
|
||||
for (uint i = 0; i < 16 * 1024; ++i) {
|
||||
QV4::Scope scope(&engine);
|
||||
QV4::ScopedString s(scope, identifiers->getIndexed(i));
|
||||
QV4::Scoped<QV4::InternalClass> ic(scope, object->internalClass());
|
||||
QVERIFY(ic->d()->parent != nullptr);
|
||||
object->deleteProperty(s->toPropertyKey());
|
||||
QVERIFY(object->internalClass() != ic->d());
|
||||
QCOMPARE(object->internalClass()->parent, ic->d());
|
||||
if (ic->d()->parent == nullptr)
|
||||
return;
|
||||
}
|
||||
QFAIL("Garbage collector was not triggered by large amount of InternalClasses");
|
||||
}
|
||||
|
||||
QTEST_MAIN(tst_qv4mm)
|
||||
|
|
Loading…
Reference in New Issue