Skip to content

Commit

Permalink
Bug 1752870 - Multiple-reader/single-writer ExclusiveData variant. r=…
Browse files Browse the repository at this point in the history
…jonco

The indirect stubs code needs to take a lock on the lazy stub tier
when mapping back from code pointer to function index during a
Table::get on a table of funcref.  This code is amazingly hot in one
large wasm application and is almost unusable because of lock
contention caused by this lookup.

However, the tier tables are mostly stable at this point and parallel
lookups are fine, so we need to allow for multiple readers.  The
easiest way to do this is by using a multi-reader/single-writer
ExclusiveData variant, introduced here.

Differential Revision: https://phabricator.services.mozilla.com/D137516
  • Loading branch information
Lars T Hansen committed Feb 2, 2022
1 parent acc20a5 commit 7e909fb
Show file tree
Hide file tree
Showing 5 changed files with 164 additions and 12 deletions.
152 changes: 152 additions & 0 deletions js/src/threading/ExclusiveData.h
Original file line number Diff line number Diff line change
Expand Up @@ -268,6 +268,158 @@ class ExclusiveWaitableData : public ExclusiveData<T> {
Guard lock() const { return Guard(*this); }
};

/**
* Multiple-readers / single-writer variant of ExclusiveData.
*
* Readers call readLock() to obtain a stack-only RAII reader lock, which will
* allow other readers to read concurrently but block writers; the yielded value
* is const. Writers call writeLock() to obtain a ditto writer lock, which
* yields exclusive access to non-const data.
*
* See ExclusiveData and its implementation for more documentation.
*/
template <typename T>
class RWExclusiveData {
mutable Mutex lock_;
mutable ConditionVariable cond_;
mutable T value_;
mutable int readers_;

// We maintain a count of active readers. Writers may enter the critical
// section only when the reader count is zero, so the reader that decrements
// the count to zero must wake up any waiting writers.
//
// There can be multiple writers waiting, so a writer leaving the critical
// section must also wake up any other waiting writers.

void acquireReaderLock() const {
lock_.lock();
readers_++;
lock_.unlock();
}

void releaseReaderLock() const {
lock_.lock();
MOZ_ASSERT(readers_ > 0);
if (--readers_ == 0) {
cond_.notify_all();
}
lock_.unlock();
}

void acquireWriterLock() const {
lock_.lock();
while (readers_ > 0) {
cond_.wait(lock_);
}
}

void releaseWriterLock() const {
cond_.notify_all();
lock_.unlock();
}

public:
RWExclusiveData(const RWExclusiveData&) = delete;
RWExclusiveData& operator=(const RWExclusiveData&) = delete;

/**
* Create a new `RWExclusiveData`, constructing the protected value in place.
*/
template <typename... Args>
explicit RWExclusiveData(const MutexId& id, Args&&... args)
: lock_(id), value_(std::forward<Args>(args)...), readers_(0) {}

class MOZ_STACK_CLASS ReadGuard {
const RWExclusiveData* parent_;
explicit ReadGuard(std::nullptr_t) : parent_(nullptr) {}

public:
ReadGuard(const ReadGuard&) = delete;
ReadGuard& operator=(const ReadGuard&) = delete;

explicit ReadGuard(const RWExclusiveData& parent) : parent_(&parent) {
parent_->acquireReaderLock();
}

ReadGuard(ReadGuard&& rhs) : parent_(rhs.parent_) {
MOZ_ASSERT(&rhs != this, "self-move disallowed!");
rhs.parent_ = nullptr;
}

ReadGuard& operator=(ReadGuard&& rhs) {
this->~ReadGuard();
new (this) ReadGuard(std::move(rhs));
return *this;
}

const T& get() const {
MOZ_ASSERT(parent_);
return parent_->value_;
}

operator const T&() const { return get(); }
const T* operator->() const { return &get(); }

const RWExclusiveData<T>* parent() const {
MOZ_ASSERT(parent_);
return parent_;
}

~ReadGuard() {
if (parent_) {
parent_->releaseReaderLock();
}
}
};

class MOZ_STACK_CLASS WriteGuard {
const RWExclusiveData* parent_;
explicit WriteGuard(std::nullptr_t) : parent_(nullptr) {}

public:
WriteGuard(const WriteGuard&) = delete;
WriteGuard& operator=(const WriteGuard&) = delete;

explicit WriteGuard(const RWExclusiveData& parent) : parent_(&parent) {
parent_->acquireWriterLock();
}

WriteGuard(WriteGuard&& rhs) : parent_(rhs.parent_) {
MOZ_ASSERT(&rhs != this, "self-move disallowed!");
rhs.parent_ = nullptr;
}

WriteGuard& operator=(WriteGuard&& rhs) {
this->~WriteGuard();
new (this) WriteGuard(std::move(rhs));
return *this;
}

T& get() const {
MOZ_ASSERT(parent_);
return parent_->value_;
}

operator T&() const { return get(); }
T* operator->() const { return &get(); }

const RWExclusiveData<T>* parent() const {
MOZ_ASSERT(parent_);
return parent_;
}

~WriteGuard() {
if (parent_) {
parent_->releaseWriterLock();
}
}
};

ReadGuard readLock() const { return ReadGuard(*this); }
WriteGuard writeLock() const { return WriteGuard(*this); }
};

} // namespace js

#endif // threading_ExclusiveData_h
6 changes: 3 additions & 3 deletions js/src/wasm/WasmCode.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1322,7 +1322,7 @@ bool CodeTier::initialize(IsTier2 isTier2, const Code& code,
MOZ_ASSERT(!initialized());
code_ = &code;

MOZ_ASSERT(lazyStubs_.lock()->entryStubsEmpty());
MOZ_ASSERT(lazyStubs_.readLock()->entryStubsEmpty());

// See comments in CodeSegment::initialize() for why this must be last.
if (!segment_->initialize(isTier2, *this, linkData, metadata, *metadata_)) {
Expand Down Expand Up @@ -1372,7 +1372,7 @@ uint8_t* CodeTier::serialize(uint8_t* cursor, const LinkData& linkData) const {
void CodeTier::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
size_t* data) const {
segment_->addSizeOfMisc(mallocSizeOf, code, data);
lazyStubs_.lock()->addSizeOfMisc(mallocSizeOf, code, data);
lazyStubs_.readLock()->addSizeOfMisc(mallocSizeOf, code, data);
*data += metadata_->sizeOfExcludingThis(mallocSizeOf);
}

Expand Down Expand Up @@ -1594,7 +1594,7 @@ const CodeRange* Code::lookupIndirectStubRange(void* pc) const {
// so if we pregenerate indirect stubs for all exported functions
// we can eliminate the lock below.
for (Tier tier : tiers()) {
auto stubs = codeTier(tier).lazyStubs().lock();
auto stubs = codeTier(tier).lazyStubs().readLock();
const CodeRange* result = stubs->lookupRange(pc);
if (result && result->isIndirectStub()) {
return result;
Expand Down
4 changes: 2 additions & 2 deletions js/src/wasm/WasmCode.h
Original file line number Diff line number Diff line change
Expand Up @@ -685,7 +685,7 @@ class CodeTier {
const UniqueModuleSegment segment_;

// Lazy stubs, not serialized.
ExclusiveData<LazyStubTier> lazyStubs_;
RWExclusiveData<LazyStubTier> lazyStubs_;

static const MutexId& mutexForTier(Tier tier) {
if (tier == Tier::Baseline) {
Expand All @@ -707,7 +707,7 @@ class CodeTier {
const Metadata& metadata);

Tier tier() const { return segment_->tier(); }
const ExclusiveData<LazyStubTier>& lazyStubs() const { return lazyStubs_; }
const RWExclusiveData<LazyStubTier>& lazyStubs() const { return lazyStubs_; }
const MetadataTier& metadata() const { return *metadata_.get(); }
const ModuleSegment& segment() const { return *segment_.get(); }
const Code& code() const {
Expand Down
10 changes: 5 additions & 5 deletions js/src/wasm/WasmInstance.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -859,7 +859,7 @@ static int32_t MemoryInit(JSContext* cx, Instance* instance, I dstOffset,

void* Instance::ensureAndGetIndirectStub(Tier tier, uint32_t funcIndex) {
const CodeTier& codeTier = code(tier);
auto stubs = codeTier.lazyStubs().lock();
auto stubs = codeTier.lazyStubs().writeLock();

void* stub_entry = stubs->lookupIndirectStub(funcIndex, tlsData());
if (stub_entry) {
Expand All @@ -885,15 +885,15 @@ bool Instance::createManyIndirectStubs(
}

const CodeTier& codeTier = code(tier);
auto stubs = codeTier.lazyStubs().lock();
auto stubs = codeTier.lazyStubs().writeLock();
return stubs->createManyIndirectStubs(targets, codeTier);
}

void* Instance::getIndirectStub(uint32_t funcIndex, TlsData* targetTlsData,
const Tier tier) const {
MOZ_ASSERT(funcIndex != NullFuncIndex);

auto stubs = code(tier).lazyStubs().lock();
auto stubs = code(tier).lazyStubs().readLock();
return stubs->lookupIndirectStub(funcIndex, targetTlsData);
}

Expand Down Expand Up @@ -2148,7 +2148,7 @@ static bool EnsureEntryStubs(const Instance& instance, uint32_t funcIndex,
//
// Also see doc block for stubs in WasmJS.cpp.

auto stubs = instance.code(tier).lazyStubs().lock();
auto stubs = instance.code(tier).lazyStubs().writeLock();
*interpEntry = stubs->lookupInterpEntry(fe.funcIndex());
if (*interpEntry) {
return true;
Expand All @@ -2169,7 +2169,7 @@ static bool EnsureEntryStubs(const Instance& instance, uint32_t funcIndex,
}

MOZ_RELEASE_ASSERT(prevTier == Tier::Baseline && tier == Tier::Optimized);
auto stubs2 = instance.code(tier).lazyStubs().lock();
auto stubs2 = instance.code(tier).lazyStubs().writeLock();

// If it didn't have a stub in the first tier, background compilation
// shouldn't have made one in the second tier.
Expand Down
4 changes: 2 additions & 2 deletions js/src/wasm/WasmModule.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -186,8 +186,8 @@ bool Module::finishTier2(const LinkData& linkData2,

const MetadataTier& metadataTier1 = metadata(Tier::Baseline);

auto stubs1 = code().codeTier(Tier::Baseline).lazyStubs().lock();
auto stubs2 = borrowedTier2->lazyStubs().lock();
auto stubs1 = code().codeTier(Tier::Baseline).lazyStubs().readLock();
auto stubs2 = borrowedTier2->lazyStubs().writeLock();

MOZ_ASSERT(stubs2->entryStubsEmpty());

Expand Down

0 comments on commit 7e909fb

Please sign in to comment.