WebKit Bugzilla
Attachment 368268 Details for
Bug 197294
: [bmalloc] Follow-up and fixing bug after r244481
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
Patch
bug-197294-20190425141221.patch (text/plain), 14.57 KB, created by
Yusuke Suzuki
on 2019-04-25 14:12:21 PDT
(
hide
)
Description:
Patch
Filename:
MIME Type:
Creator:
Yusuke Suzuki
Created:
2019-04-25 14:12:21 PDT
Size:
14.57 KB
patch
obsolete
>Subversion Revision: 244662 >diff --git a/Source/bmalloc/ChangeLog b/Source/bmalloc/ChangeLog >index b93b1f17399a5d6b7a717c825318a1c17749f638..5bd5b17003817e11d24599fcfcf157ad26b7f546 100644 >--- a/Source/bmalloc/ChangeLog >+++ b/Source/bmalloc/ChangeLog >@@ -1,3 +1,32 @@ >+2019-04-25 Yusuke Suzuki <ysuzuki@apple.com> >+ >+ [bmalloc] Follow-up and fixing bug after r244481 >+ https://bugs.webkit.org/show_bug.cgi?id=197294 >+ >+ Reviewed by NOBODY (OOPS!). >+ >+ This patch includes follow-up after r244481 and bug fixes which is introduced in the refactoring. >+ >+ * bmalloc/IsoAllocator.h: Remove unused function. >+ * bmalloc/IsoAllocatorInlines.h: >+ (bmalloc::IsoAllocator<Config>::allocateSlow): >+ * bmalloc/IsoDeallocatorInlines.h: >+ (bmalloc::IsoDeallocator<Config>::deallocate): >+ * bmalloc/IsoHeapImpl.h: Rename m_usableBits to m_availableShared and add static_assert. >+ * bmalloc/IsoHeapImplInlines.h: Do not clear m_numberOfAllocationsFromSharedInOneCycle etc. in scavenge since IsoHeapImpl::scavenge >+ is not related to thread-local IsoAllocator's status. >+ (bmalloc::IsoHeapImpl<Config>::scavenge): >+ (bmalloc::IsoHeapImpl<Config>::forEachLiveObject): >+ (bmalloc::IsoHeapImpl<Config>::updateAllocationMode): Update m_allocationMode correctly. >+ (bmalloc::IsoHeapImpl<Config>::allocateFromShared): >+ * bmalloc/IsoSharedHeapInlines.h: >+ (bmalloc::computeObjectSizeForSharedCell): >+ (bmalloc::IsoSharedHeap::allocateNew): >+ (bmalloc::IsoSharedHeap::allocateSlow): Add computeObjectSizeForSharedCell. >+ * bmalloc/IsoSharedPage.h: >+ * bmalloc/IsoSharedPageInlines.h: >+ (bmalloc::IsoSharedPage::free): Pass `const std::lock_guard<Mutex>&` in its parameter. >+ > 2019-04-25 Alex Christensen <achristensen@webkit.org> > > Start using C++17 >diff --git a/Source/bmalloc/bmalloc/IsoAllocator.h b/Source/bmalloc/bmalloc/IsoAllocator.h >index 30472c10d3c28b05f8f548a8613002758dbae439..a0f7080cbb1c2171757c3d6df74752ef51bd8d17 100644 >--- a/Source/bmalloc/bmalloc/IsoAllocator.h >+++ b/Source/bmalloc/bmalloc/IsoAllocator.h >@@ -40,8 +40,6 @@ class IsoAllocator { > IsoAllocator(IsoHeapImpl<Config>&); > ~IsoAllocator(); > >- AllocationMode considerUsingSharedAllocation(); >- > void* allocate(bool abortOnFailure); > void scavenge(); > >diff --git a/Source/bmalloc/bmalloc/IsoAllocatorInlines.h b/Source/bmalloc/bmalloc/IsoAllocatorInlines.h >index 606b4b8b3374804706c6ae1d7ef982a037bb8155..5455ffdcf2d0320f33f36c7311f962ef7c78247a 100644 >--- a/Source/bmalloc/bmalloc/IsoAllocatorInlines.h >+++ b/Source/bmalloc/bmalloc/IsoAllocatorInlines.h >@@ -69,7 +69,7 @@ BNO_INLINE void* IsoAllocator<Config>::allocateSlow(bool abortOnFailure) > m_currentPage = nullptr; > m_freeList.clear(); > } >- return m_heap->allocateFromShared(abortOnFailure); >+ return m_heap->allocateFromShared(locker, abortOnFailure); > } > > BASSERT(allocationMode == AllocationMode::Fast); >diff --git a/Source/bmalloc/bmalloc/IsoDeallocatorInlines.h b/Source/bmalloc/bmalloc/IsoDeallocatorInlines.h >index 7103ecb5901b46f4ec34448beb73c3b7db553145..23f32a3d89b25d2c57e1a382bde026885a4e3d9d 100644 >--- a/Source/bmalloc/bmalloc/IsoDeallocatorInlines.h >+++ b/Source/bmalloc/bmalloc/IsoDeallocatorInlines.h >@@ -60,7 +60,7 @@ void IsoDeallocator<Config>::deallocate(api::IsoHeap<Type>& handle, void* ptr) > IsoPageBase* page = IsoPageBase::pageFor(ptr); > if (page->isShared()) { > std::lock_guard<Mutex> locker(*m_lock); >- static_cast<IsoSharedPage*>(page)->free<Config>(handle, ptr); >+ static_cast<IsoSharedPage*>(page)->free<Config>(locker, handle, ptr); > return; > } > >diff --git a/Source/bmalloc/bmalloc/IsoHeapImpl.h b/Source/bmalloc/bmalloc/IsoHeapImpl.h >index a90197a53d23dad671c51471bdee9f9145c7acc5..9ed3cc541b1f4716a3e6990bf1fac9ca2a9b0241 100644 >--- a/Source/bmalloc/bmalloc/IsoHeapImpl.h >+++ b/Source/bmalloc/bmalloc/IsoHeapImpl.h >@@ -60,11 +60,13 @@ class BEXPORT IsoHeapImplBase { > friend class AllIsoHeaps; > > IsoHeapImplBase* m_next { nullptr }; >- std::chrono::steady_clock::time_point m_slowPathTimePoint; >+ std::chrono::steady_clock::time_point m_lastSlowPathTime; > std::array<void*, maxAllocationFromShared> m_sharedCells { }; > unsigned m_numberOfAllocationsFromSharedInOneCycle { 0 }; >- unsigned m_usableBits { maxAllocationFromSharedMask }; >+ unsigned m_availableShared { maxAllocationFromSharedMask }; > AllocationMode m_allocationMode { AllocationMode::Init }; >+ >+ static_assert(sizeof(m_availableShared) * 8 >= maxAllocationFromShared, ""); > }; > > template<typename Config> >@@ -111,7 +113,7 @@ class IsoHeapImpl final : public IsoHeapImplBase { > void isNoLongerFreeable(void* ptr, size_t bytes); > > AllocationMode updateAllocationMode(); >- void* allocateFromShared(bool abortOnFailure); >+ void* allocateFromShared(const std::lock_guard<Mutex>&, bool abortOnFailure); > > // It's almost always the caller's responsibility to grab the lock. This lock comes from the > // PerProcess<IsoTLSDeallocatorEntry<Config>>::get()->lock. That's pretty weird, and we don't >diff --git a/Source/bmalloc/bmalloc/IsoHeapImplInlines.h b/Source/bmalloc/bmalloc/IsoHeapImplInlines.h >index f650280d3be6f2cb85a54b54f8fdaba305f76149..ba410935277c90d7e77215dbbcd9e7f860bef827 100644 >--- a/Source/bmalloc/bmalloc/IsoHeapImplInlines.h >+++ b/Source/bmalloc/bmalloc/IsoHeapImplInlines.h >@@ -109,8 +109,6 @@ void IsoHeapImpl<Config>::scavenge(Vector<DeferredDecommit>& decommits) > directory.scavenge(decommits); > }); > m_directoryHighWatermark = 0; >- m_numberOfAllocationsFromSharedInOneCycle = 0; >- m_allocationMode = AllocationMode::Init; > } > > template<typename Config> >@@ -182,7 +180,7 @@ void IsoHeapImpl<Config>::forEachLiveObject(const Func& func) > }); > for (unsigned index = 0; index < maxAllocationFromShared; ++index) { > void* pointer = m_sharedCells[index]; >- if (pointer && !(m_usableBits & (1U << index))) >+ if (pointer && !(m_availableShared & (1U << index))) > func(pointer); > } > } >@@ -233,55 +231,60 @@ void IsoHeapImpl<Config>::isNoLongerFreeable(void* ptr, size_t bytes) > template<typename Config> > AllocationMode IsoHeapImpl<Config>::updateAllocationMode() > { >- // Exhaust shared free cells, which means we should start activating the fast allocation mode for this type. >- if (!m_usableBits) { >- m_slowPathTimePoint = std::chrono::steady_clock::now(); >- return AllocationMode::Fast; >- } >+ auto getNewAllocationMode = [&] { >+ // Exhaust shared free cells, which means we should start activating the fast allocation mode for this type. >+ if (!m_availableShared) { >+ m_lastSlowPathTime = std::chrono::steady_clock::now(); >+ return AllocationMode::Fast; >+ } > >- switch (m_allocationMode) { >- case AllocationMode::Shared: >- // Currently in the shared allocation mode. Until we exhaust shared free cells, continue using the shared allocation mode. >- // But if we allocate so many shared cells within very short period, we should use the fast allocation mode instead. >- // This avoids the following pathological case. >- // >- // for (int i = 0; i < 1e6; ++i) { >- // auto* ptr = allocate(); >- // ... >- // free(ptr); >- // } >- if (m_numberOfAllocationsFromSharedInOneCycle <= IsoPage<Config>::numObjects) >- return AllocationMode::Shared; >- BFALLTHROUGH; >+ switch (m_allocationMode) { >+ case AllocationMode::Shared: >+ // Currently in the shared allocation mode. Until we exhaust shared free cells, continue using the shared allocation mode. >+ // But if we allocate so many shared cells within very short period, we should use the fast allocation mode instead. >+ // This avoids the following pathological case. >+ // >+ // for (int i = 0; i < 1e6; ++i) { >+ // auto* ptr = allocate(); >+ // ... >+ // free(ptr); >+ // } >+ if (m_numberOfAllocationsFromSharedInOneCycle <= IsoPage<Config>::numObjects) >+ return AllocationMode::Shared; >+ BFALLTHROUGH; > >- case AllocationMode::Fast: { >- // The allocation pattern may change. We should check the allocation rate and decide which mode is more appropriate. >- // If we don't go to the allocation slow path during 1~ seconds, we think the allocation becomes quiescent state. >- auto now = std::chrono::steady_clock::now(); >- if ((now - m_slowPathTimePoint) < std::chrono::seconds(1)) { >- m_slowPathTimePoint = now; >- return AllocationMode::Fast; >+ case AllocationMode::Fast: { >+ // The allocation pattern may change. We should check the allocation rate and decide which mode is more appropriate. >+ // If we don't go to the allocation slow path during ~1 seconds, we think the allocation becomes quiescent state. >+ auto now = std::chrono::steady_clock::now(); >+ if ((now - m_lastSlowPathTime) < std::chrono::seconds(1)) { >+ m_lastSlowPathTime = now; >+ return AllocationMode::Fast; >+ } >+ >+ m_numberOfAllocationsFromSharedInOneCycle = 0; >+ m_lastSlowPathTime = now; >+ return AllocationMode::Shared; > } > >- m_numberOfAllocationsFromSharedInOneCycle = 0; >- m_slowPathTimePoint = now; >- return AllocationMode::Shared; >- } >+ case AllocationMode::Init: >+ m_lastSlowPathTime = std::chrono::steady_clock::now(); >+ return AllocationMode::Shared; >+ } > >- case AllocationMode::Init: >- m_slowPathTimePoint = std::chrono::steady_clock::now(); > return AllocationMode::Shared; >- } >- >- return AllocationMode::Shared; >+ }; >+ AllocationMode allocationMode = getNewAllocationMode(); >+ m_allocationMode = allocationMode; >+ return allocationMode; > } > > template<typename Config> >-void* IsoHeapImpl<Config>::allocateFromShared(bool abortOnFailure) >+void* IsoHeapImpl<Config>::allocateFromShared(const std::lock_guard<Mutex>&, bool abortOnFailure) > { > static constexpr bool verbose = false; > >- unsigned indexPlusOne = __builtin_ffs(m_usableBits); >+ unsigned indexPlusOne = __builtin_ffs(m_availableShared); > BASSERT(indexPlusOne); > unsigned index = indexPlusOne - 1; > void* result = m_sharedCells[index]; >@@ -300,7 +303,7 @@ void* IsoHeapImpl<Config>::allocateFromShared(bool abortOnFailure) > m_sharedCells[index] = result; > } > BASSERT(result); >- m_usableBits &= (~(1U << index)); >+ m_availableShared &= ~(1U << index); > ++m_numberOfAllocationsFromSharedInOneCycle; > return result; > } >diff --git a/Source/bmalloc/bmalloc/IsoSharedHeapInlines.h b/Source/bmalloc/bmalloc/IsoSharedHeapInlines.h >index 3e6356afb7480b30e9a7821097052d3bdb3f015e..d9f7f1166c5ad06f04d8121c4988227bdfd01d04 100644 >--- a/Source/bmalloc/bmalloc/IsoSharedHeapInlines.h >+++ b/Source/bmalloc/bmalloc/IsoSharedHeapInlines.h >@@ -43,11 +43,16 @@ void* VariadicBumpAllocator::allocate(const Func& slowPath) > return slowPath(); > } > >+inline constexpr unsigned computeObjectSizeForSharedCell(unsigned objectSize) >+{ >+ return roundUpToMultipleOf<alignmentForIsoSharedAllocation>(static_cast<uintptr_t>(objectSize)); >+} >+ > template<unsigned passedObjectSize> > void* IsoSharedHeap::allocateNew(bool abortOnFailure) > { > std::lock_guard<Mutex> locker(mutex()); >- constexpr unsigned objectSize = roundUpToMultipleOf<alignmentForIsoSharedAllocation>(static_cast<uintptr_t>(passedObjectSize)); >+ constexpr unsigned objectSize = computeObjectSizeForSharedCell(passedObjectSize); > return m_allocator.template allocate<objectSize>( > [&] () -> void* { > return allocateSlow<passedObjectSize>(abortOnFailure); >@@ -73,7 +78,7 @@ BNO_INLINE void* IsoSharedHeap::allocateSlow(bool abortOnFailure) > m_currentPage = page; > m_allocator = m_currentPage->startAllocating(); > >- constexpr unsigned objectSize = roundUpToMultipleOf<alignmentForIsoSharedAllocation>(static_cast<uintptr_t>(passedObjectSize)); >+ constexpr unsigned objectSize = computeObjectSizeForSharedCell(passedObjectSize); > return m_allocator.allocate<objectSize>([] () { BCRASH(); return nullptr; }); > } > >diff --git a/Source/bmalloc/bmalloc/IsoSharedPage.h b/Source/bmalloc/bmalloc/IsoSharedPage.h >index 224ef20816724583c8ac3f2e52195427a61cda5b..a9e137e414188564d05804490b5437a500e570bb 100644 >--- a/Source/bmalloc/bmalloc/IsoSharedPage.h >+++ b/Source/bmalloc/bmalloc/IsoSharedPage.h >@@ -38,7 +38,7 @@ class IsoSharedPage : public IsoPageBase { > BEXPORT static IsoSharedPage* tryCreate(); > > template<typename Config, typename Type> >- void free(api::IsoHeap<Type>&, void*); >+ void free(const std::lock_guard<Mutex>&, api::IsoHeap<Type>&, void*); > VariadicBumpAllocator startAllocating(); > void stopAllocating(); > >diff --git a/Source/bmalloc/bmalloc/IsoSharedPageInlines.h b/Source/bmalloc/bmalloc/IsoSharedPageInlines.h >index f5730df84381317865e323c8ea5b6f367d368ad5..c4216b51dac9d1e95c3035d6616fa3e7ce5907a9 100644 >--- a/Source/bmalloc/bmalloc/IsoSharedPageInlines.h >+++ b/Source/bmalloc/bmalloc/IsoSharedPageInlines.h >@@ -35,7 +35,7 @@ namespace bmalloc { > // This is because empty IsoSharedPage is still split into various different objects that should keep some part of virtual memory region dedicated. > // We cannot set up bump allocation for such a page. Not freeing IsoSharedPages are OK since IsoSharedPage is only used for the lower tier of IsoHeap. > template<typename Config, typename Type> >-void IsoSharedPage::free(api::IsoHeap<Type>& handle, void* ptr) >+void IsoSharedPage::free(const std::lock_guard<Mutex>&, api::IsoHeap<Type>& handle, void* ptr) > { > auto& heapImpl = handle.impl(); > uint8_t index = *indexSlotFor<Config>(ptr) & IsoHeapImplBase::maxAllocationFromSharedMask; >@@ -43,7 +43,7 @@ void IsoSharedPage::free(api::IsoHeap<Type>& handle, void* ptr) > // If vptr is replaced to the other vptr, we may accidentally chain this pointer to the incorrect HeapImplBase, which totally breaks the IsoHeap's goal. > // To harden that, we validate that this pointer is actually allocated for a specific HeapImplBase here by checking whether this pointer is listed in HeapImplBase's shared cells. > RELEASE_BASSERT(heapImpl.m_sharedCells[index] == ptr); >- heapImpl.m_usableBits |= (1U << index); >+ heapImpl.m_availableShared |= (1U << index); > } > > inline VariadicBumpAllocator IsoSharedPage::startAllocating()
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Flags:
saam
:
review+
Actions:
View
|
Formatted Diff
|
Diff
Attachments on
bug 197294
: 368268