Skip to content

Commit

Permalink
Merge "Re-enable concurrent system weak sweeping." into klp-dev
Browse files Browse the repository at this point in the history
  • Loading branch information
Mathieu Chartier authored and Android (Google) Code Review committed Sep 20, 2013
2 parents ded4f46 + c11d9b8 commit 7d690ba
Show file tree
Hide file tree
Showing 12 changed files with 167 additions and 103 deletions.
67 changes: 18 additions & 49 deletions runtime/gc/collector/mark_sweep.cc
Original file line number Diff line number Diff line change
Expand Up @@ -207,10 +207,6 @@ bool MarkSweep::HandleDirtyObjectsPhase() {
}

ProcessReferences(self);
{
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
SweepSystemWeaks();
}

// Only need to do this if we have the card mark verification on, and only during concurrent GC.
if (GetHeap()->verify_missing_card_marks_ || GetHeap()->verify_pre_gc_heap_||
Expand All @@ -228,6 +224,12 @@ bool MarkSweep::HandleDirtyObjectsPhase() {
// Ensure that nobody inserted items in the live stack after we swapped the stacks.
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());

// Disallow new system weaks to prevent a race which occurs when someone adds a new system
// weak before we sweep them. Since this new system weak may not be marked, the GC may
// incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
// reference to a string that is about to be swept.
Runtime::Current()->DisallowNewSystemWeaks();
return true;
}

Expand Down Expand Up @@ -289,14 +291,16 @@ void MarkSweep::ReclaimPhase() {

if (!IsConcurrent()) {
ProcessReferences(self);
{
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
SweepSystemWeaks();
}
timings_.StartSplit("PreSweepingGcVerification");
heap_->PreSweepingGcVerification(this);
timings_.EndSplit();
} else {
}

{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
SweepSystemWeaks();
}

if (IsConcurrent()) {
Runtime::Current()->AllowNewSystemWeaks();

base::TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
Expand Down Expand Up @@ -1002,13 +1006,7 @@ void MarkSweep::ReMarkRoots() {
}

void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) {
JavaVMExt* vm = Runtime::Current()->GetJavaVM();
WriterMutexLock mu(Thread::Current(), vm->weak_globals_lock);
for (const Object** entry : vm->weak_globals) {
if (!is_marked(*entry, arg)) {
*entry = kClearedJniWeakGlobal;
}
}
Runtime::Current()->GetJavaVM()->SweepWeakGlobals(is_marked, arg);
}

struct ArrayMarkedCheck {
Expand All @@ -1029,31 +1027,8 @@ bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) {
return false;
}

void MarkSweep::SweepSystemWeaksArray(accounting::ObjectStack* allocations) {
Runtime* runtime = Runtime::Current();
// The callbacks check
// !is_marked where is_marked is the callback but we want
// !IsMarked && IsLive
// So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
// Or for swapped (IsLive || !IsMarked).

timings_.StartSplit("SweepSystemWeaksArray");
ArrayMarkedCheck visitor;
visitor.live_stack = allocations;
visitor.mark_sweep = this;
runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedArrayCallback, &visitor);
runtime->GetMonitorList()->SweepMonitorList(IsMarkedArrayCallback, &visitor);
SweepJniWeakGlobals(IsMarkedArrayCallback, &visitor);
timings_.EndSplit();
}

void MarkSweep::SweepSystemWeaks() {
Runtime* runtime = Runtime::Current();
// The callbacks check
// !is_marked where is_marked is the callback but we want
// !IsMarked && IsLive
// So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
// Or for swapped (IsLive || !IsMarked).
timings_.StartSplit("SweepSystemWeaks");
runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this);
runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this);
Expand Down Expand Up @@ -1087,12 +1062,7 @@ void MarkSweep::VerifySystemWeaks() {
// Verify system weaks, uses a special IsMarked callback which always returns true.
runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this);
runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this);

JavaVMExt* vm = runtime->GetJavaVM();
ReaderMutexLock mu(Thread::Current(), vm->weak_globals_lock);
for (const Object** entry : vm->weak_globals) {
VerifyIsLive(*entry);
}
runtime->GetJavaVM()->SweepWeakGlobals(VerifyIsLiveCallback, this);
}

struct SweepCallbackContext {
Expand Down Expand Up @@ -1172,7 +1142,6 @@ void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {

void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
space::DlMallocSpace* space = heap_->GetAllocSpace();

timings_.StartSplit("SweepArray");
// Newly allocated objects MUST be in the alloc space and those are the only objects which we are
// going to free.
Expand Down
4 changes: 0 additions & 4 deletions runtime/gc/collector/mark_sweep.h
Original file line number Diff line number Diff line change
Expand Up @@ -204,10 +204,6 @@ class MarkSweep : public GarbageCollector {
void SweepSystemWeaks()
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);

// Only sweep the weaks which are inside of an allocation stack.
void SweepSystemWeaksArray(accounting::ObjectStack* allocations)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);

static bool VerifyIsLiveCallback(const mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);

Expand Down
24 changes: 22 additions & 2 deletions runtime/intern_table.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,9 @@
namespace art {

InternTable::InternTable()
: intern_table_lock_("InternTable lock"), is_dirty_(false) {}
: intern_table_lock_("InternTable lock"), is_dirty_(false), allow_new_interns_(true),
new_intern_condition_("New intern condition", intern_table_lock_) {
}

size_t InternTable::Size() const {
MutexLock mu(Thread::Current(), intern_table_lock_);
Expand Down Expand Up @@ -111,12 +113,30 @@ static mirror::String* LookupStringFromImage(mirror::String* s)
return NULL;
}

void InternTable::AllowNewInterns() {
Thread* self = Thread::Current();
MutexLock mu(self, intern_table_lock_);
allow_new_interns_ = true;
new_intern_condition_.Broadcast(self);
}

void InternTable::DisallowNewInterns() {
Thread* self = Thread::Current();
MutexLock mu(self, intern_table_lock_);
allow_new_interns_ = false;
}

mirror::String* InternTable::Insert(mirror::String* s, bool is_strong) {
MutexLock mu(Thread::Current(), intern_table_lock_);
Thread* self = Thread::Current();
MutexLock mu(self, intern_table_lock_);

DCHECK(s != NULL);
uint32_t hash_code = s->GetHashCode();

while (UNLIKELY(!allow_new_interns_)) {
new_intern_condition_.WaitHoldingLocks(self);
}

if (is_strong) {
// Check the strong table for a match.
mirror::String* strong = Lookup(strong_interns_, s, hash_code);
Expand Down
5 changes: 5 additions & 0 deletions runtime/intern_table.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,9 @@ class InternTable {

void DumpForSigQuit(std::ostream& os) const;

void DisallowNewInterns() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
void AllowNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);

private:
typedef std::multimap<int32_t, mirror::String*> Table;

Expand All @@ -79,6 +82,8 @@ class InternTable {

mutable Mutex intern_table_lock_;
bool is_dirty_ GUARDED_BY(intern_table_lock_);
bool allow_new_interns_ GUARDED_BY(intern_table_lock_);
ConditionVariable new_intern_condition_ GUARDED_BY(intern_table_lock_);
Table strong_interns_ GUARDED_BY(intern_table_lock_);
Table weak_interns_ GUARDED_BY(intern_table_lock_);
};
Expand Down
90 changes: 63 additions & 27 deletions runtime/jni_internal.cc
Original file line number Diff line number Diff line change
Expand Up @@ -86,14 +86,7 @@ static const size_t kWeakGlobalsMax = 51200; // Arbitrary sanity check. (Must f

static jweak AddWeakGlobalReference(ScopedObjectAccess& soa, Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (obj == NULL) {
return NULL;
}
JavaVMExt* vm = soa.Vm();
IndirectReferenceTable& weak_globals = vm->weak_globals;
WriterMutexLock mu(soa.Self(), vm->weak_globals_lock);
IndirectRef ref = weak_globals.Add(IRT_FIRST_SEGMENT, obj);
return reinterpret_cast<jweak>(ref);
return soa.Vm()->AddWeakGlobalReference(soa.Self(), obj);
}

static bool IsBadJniVersion(int version) {
Expand Down Expand Up @@ -854,17 +847,9 @@ class JNI {
}

static void DeleteWeakGlobalRef(JNIEnv* env, jweak obj) {
if (obj == NULL) {
return;
}
ScopedObjectAccess soa(env);
JavaVMExt* vm = soa.Vm();
IndirectReferenceTable& weak_globals = vm->weak_globals;
WriterMutexLock mu(soa.Self(), vm->weak_globals_lock);

if (!weak_globals.Remove(IRT_FIRST_SEGMENT, obj)) {
LOG(WARNING) << "JNI WARNING: DeleteWeakGlobalRef(" << obj << ") "
<< "failed to find entry";
if (obj != nullptr) {
ScopedObjectAccess soa(env);
soa.Vm()->DeleteWeakGlobalRef(soa.Self(), obj);
}
}

Expand Down Expand Up @@ -2996,10 +2981,12 @@ JavaVMExt::JavaVMExt(Runtime* runtime, Runtime::ParsedOptions* options)
pin_table("pin table", kPinTableInitial, kPinTableMax),
globals_lock("JNI global reference table lock"),
globals(gGlobalsInitial, gGlobalsMax, kGlobal),
weak_globals_lock("JNI weak global reference table lock"),
weak_globals(kWeakGlobalsInitial, kWeakGlobalsMax, kWeakGlobal),
libraries_lock("JNI shared libraries map lock", kLoadLibraryLock),
libraries(new Libraries) {
libraries(new Libraries),
weak_globals_lock_("JNI weak global reference table lock"),
weak_globals_(kWeakGlobalsInitial, kWeakGlobalsMax, kWeakGlobal),
allow_new_weak_globals_(true),
weak_globals_add_condition_("weak globals add condition", weak_globals_lock_) {
functions = unchecked_functions = &gJniInvokeInterface;
if (options->check_jni_) {
SetCheckJniEnabled(true);
Expand All @@ -3010,6 +2997,26 @@ JavaVMExt::~JavaVMExt() {
delete libraries;
}

jweak JavaVMExt::AddWeakGlobalReference(Thread* self, mirror::Object* obj) {
if (obj == nullptr) {
return nullptr;
}
MutexLock mu(self, weak_globals_lock_);
while (UNLIKELY(!allow_new_weak_globals_)) {
weak_globals_add_condition_.WaitHoldingLocks(self);
}
IndirectRef ref = weak_globals_.Add(IRT_FIRST_SEGMENT, obj);
return reinterpret_cast<jweak>(ref);
}

void JavaVMExt::DeleteWeakGlobalRef(Thread* self, jweak obj) {
MutexLock mu(self, weak_globals_lock_);
if (!weak_globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
LOG(WARNING) << "JNI WARNING: DeleteWeakGlobalRef(" << obj << ") "
<< "failed to find entry";
}
}

void JavaVMExt::SetCheckJniEnabled(bool enabled) {
check_jni = enabled;
functions = enabled ? GetCheckJniInvokeInterface() : &gJniInvokeInterface;
Expand All @@ -3031,9 +3038,9 @@ void JavaVMExt::DumpForSigQuit(std::ostream& os) {
os << "; globals=" << globals.Capacity();
}
{
ReaderMutexLock mu(self, weak_globals_lock);
if (weak_globals.Capacity() > 0) {
os << " (plus " << weak_globals.Capacity() << " weak)";
MutexLock mu(self, weak_globals_lock_);
if (weak_globals_.Capacity() > 0) {
os << " (plus " << weak_globals_.Capacity() << " weak)";
}
}
os << '\n';
Expand All @@ -3044,15 +3051,44 @@ void JavaVMExt::DumpForSigQuit(std::ostream& os) {
}
}

void JavaVMExt::DisallowNewWeakGlobals() {
MutexLock mu(Thread::Current(), weak_globals_lock_);
allow_new_weak_globals_ = false;
}

void JavaVMExt::AllowNewWeakGlobals() {
Thread* self = Thread::Current();
MutexLock mu(self, weak_globals_lock_);
allow_new_weak_globals_ = true;
weak_globals_add_condition_.Broadcast(self);
}

void JavaVMExt::SweepWeakGlobals(IsMarkedTester is_marked, void* arg) {
MutexLock mu(Thread::Current(), weak_globals_lock_);
for (const Object** entry : weak_globals_) {
if (!is_marked(*entry, arg)) {
*entry = kClearedJniWeakGlobal;
}
}
}

mirror::Object* JavaVMExt::DecodeWeakGlobal(Thread* self, IndirectRef ref) {
MutexLock mu(self, weak_globals_lock_);
while (UNLIKELY(!allow_new_weak_globals_)) {
weak_globals_add_condition_.WaitHoldingLocks(self);
}
return const_cast<mirror::Object*>(weak_globals_.Get(ref));
}

void JavaVMExt::DumpReferenceTables(std::ostream& os) {
Thread* self = Thread::Current();
{
ReaderMutexLock mu(self, globals_lock);
globals.Dump(os);
}
{
ReaderMutexLock mu(self, weak_globals_lock);
weak_globals.Dump(os);
MutexLock mu(self, weak_globals_lock_);
weak_globals_.Dump(os);
}
{
MutexLock mu(self, pins_lock);
Expand Down
24 changes: 19 additions & 5 deletions runtime/jni_internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@ void InvokeWithArgArray(const ScopedObjectAccess& soa, mirror::ArtMethod* method

int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause);

struct JavaVMExt : public JavaVM {
class JavaVMExt : public JavaVM {
public:
JavaVMExt(Runtime* runtime, Runtime::ParsedOptions* options);
~JavaVMExt();

Expand Down Expand Up @@ -91,6 +92,15 @@ struct JavaVMExt : public JavaVM {

void VisitRoots(RootVisitor*, void*);

void DisallowNewWeakGlobals() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
void AllowNewWeakGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
jweak AddWeakGlobalReference(Thread* self, mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void DeleteWeakGlobalRef(Thread* self, jweak obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SweepWeakGlobals(IsMarkedTester is_marked, void* arg);
mirror::Object* DecodeWeakGlobal(Thread* self, IndirectRef ref);

Runtime* runtime;

// Used for testing. By default, we'll LOG(FATAL) the reason.
Expand All @@ -115,15 +125,19 @@ struct JavaVMExt : public JavaVM {
ReaderWriterMutex globals_lock DEFAULT_MUTEX_ACQUIRED_AFTER;
IndirectReferenceTable globals GUARDED_BY(globals_lock);

// JNI weak global references.
ReaderWriterMutex weak_globals_lock DEFAULT_MUTEX_ACQUIRED_AFTER;
IndirectReferenceTable weak_globals GUARDED_BY(weak_globals_lock);

Mutex libraries_lock DEFAULT_MUTEX_ACQUIRED_AFTER;
Libraries* libraries GUARDED_BY(libraries_lock);

// Used by -Xcheck:jni.
const JNIInvokeInterface* unchecked_functions;

private:
// TODO: Make the other members of this class also private.
// JNI weak global references.
Mutex weak_globals_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
IndirectReferenceTable weak_globals_ GUARDED_BY(weak_globals_lock_);
bool allow_new_weak_globals_ GUARDED_BY(weak_globals_lock_);
ConditionVariable weak_globals_add_condition_ GUARDED_BY(weak_globals_lock_);
};

struct JNIEnvExt : public JNIEnv {
Expand Down
Loading

0 comments on commit 7d690ba

Please sign in to comment.