[Libreoffice-commits] core.git: comphelper/source drawinglayer/source include/comphelper package/inc package/source sc/source sw/source vcl/source
Noel Grandin
noel at peralex.com
Mon Jul 18 06:49:36 UTC 2016
comphelper/source/misc/threadpool.cxx | 149 +++++++++++++++++--
drawinglayer/source/primitive2d/sceneprimitive2d.cxx | 9 -
include/comphelper/threadpool.hxx | 19 +-
package/inc/ZipOutputStream.hxx | 4
package/source/zipapi/ZipOutputStream.cxx | 6
package/source/zippackage/ZipPackageStream.cxx | 7
sc/source/filter/excel/xetable.cxx | 9 -
sc/source/filter/oox/workbookfragment.cxx | 76 +++------
sw/source/core/ole/ndole.cxx | 59 +------
vcl/source/bitmap/bitmapscalesuper.cxx | 8 -
10 files changed, 224 insertions(+), 122 deletions(-)
New commits:
commit 76ad32bec8e2c00c21247041b16d9e09e73d2504
Author: Noel Grandin <noel at peralex.com>
Date: Fri Jul 8 14:29:53 2016 +0200
add tagging to ThreadTasks so we don't need more one pool
If more than one place in the code submits tasks to the shared
pool, then waitTillDone() becomes unreliable.
Add a tagging mechanism, so different callsites can wait
on different sets of tasks.
Also try to protect our worker threads against exceptions from
the thread tasks code.
Change-Id: Idde664ab50008d31a2dd73910bb22f50e62ae22f
Reviewed-on: https://gerrit.libreoffice.org/27042
Tested-by: Jenkins <ci at libreoffice.org>
Reviewed-by: Noel Grandin <noelgrandin at gmail.com>
diff --git a/comphelper/source/misc/threadpool.cxx b/comphelper/source/misc/threadpool.cxx
index 8680e00..32170a1 100644
--- a/comphelper/source/misc/threadpool.cxx
+++ b/comphelper/source/misc/threadpool.cxx
@@ -9,6 +9,7 @@
#include <comphelper/threadpool.hxx>
+#include <com/sun/star/uno/Exception.hpp>
#include <rtl/instance.hxx>
#include <rtl/string.hxx>
#include <algorithm>
@@ -17,6 +18,26 @@
namespace comphelper {
+/** prevent waiting for a task from inside a task */
+#if defined DBG_UTIL && defined LINUX
+static thread_local bool gbIsWorkerThread;
+#endif
+
+// used to group thread-tasks for waiting in waitTillDone()
+class COMPHELPER_DLLPUBLIC ThreadTaskTag
+{
+ oslInterlockedCount mnTasksWorking;
+ osl::Condition maTasksComplete;
+
+public:
+ ThreadTaskTag();
+ bool isDone();
+ void waitUntilDone();
+ void onTaskWorkerDone();
+ void onTaskPushed();
+};
+
+
class ThreadPool::ThreadWorker : public salhelper::Thread
{
ThreadPool *mpPool;
@@ -33,11 +54,36 @@ public:
virtual void execute() override
{
+#if defined DBG_UTIL && defined LINUX
+ gbIsWorkerThread = true;
+#endif
ThreadTask *pTask;
while ( ( pTask = waitForWork() ) )
{
- pTask->doWork();
- delete pTask;
+ std::shared_ptr<ThreadTaskTag> pTag(pTask->getTag());
+ try {
+ pTask->doWork();
+ }
+ catch (const std::exception &e)
+ {
+ SAL_WARN("comphelper", "exception in thread worker while calling doWork(): " << e.what());
+ }
+ catch (const css::uno::Exception &e)
+ {
+ SAL_WARN("comphelper", "exception in thread worker while calling doWork(): " << e.Message);
+ }
+ try {
+ delete pTask;
+ }
+ catch (const std::exception &e)
+ {
+ SAL_WARN("comphelper", "exception in thread worker while deleting task: " << e.what());
+ }
+ catch (const css::uno::Exception &e)
+ {
+ SAL_WARN("comphelper", "exception in thread worker while deleting task: " << e.Message);
+ }
+ pTag->onTaskWorkerDone();
}
}
@@ -149,9 +195,27 @@ sal_Int32 ThreadPool::getPreferredConcurrency()
void ThreadPool::waitAndCleanupWorkers()
{
- waitUntilEmpty();
-
osl::ResettableMutexGuard aGuard( maGuard );
+
+ if( maWorkers.empty() )
+ { // no threads at all -> execute the work in-line
+ ThreadTask *pTask;
+ while ( ( pTask = popWork() ) )
+ {
+ std::shared_ptr<ThreadTaskTag> pTag(pTask->getTag());
+ pTask->doWork();
+ delete pTask;
+ pTag->onTaskWorkerDone();
+ }
+ }
+ else
+ {
+ aGuard.clear();
+ maTasksComplete.wait();
+ aGuard.reset();
+ }
+ assert( maTasks.empty() );
+
mbTerminate = true;
while( !maWorkers.empty() )
@@ -173,6 +237,7 @@ void ThreadPool::waitAndCleanupWorkers()
void ThreadPool::pushTask( ThreadTask *pTask )
{
osl::MutexGuard aGuard( maGuard );
+ pTask->mpTag->onTaskPushed();
maTasks.insert( maTasks.begin(), pTask );
// horrible beyond belief:
@@ -205,8 +270,11 @@ void ThreadPool::stopWork()
maTasksComplete.set();
}
-void ThreadPool::waitUntilEmpty()
+void ThreadPool::waitUntilDone(const std::shared_ptr<ThreadTaskTag>& rTag)
{
+#if defined DBG_UTIL && defined LINUX
+ assert(!gbIsWorkerThread && "cannot wait for tasks from inside a task");
+#endif
osl::ResettableMutexGuard aGuard( maGuard );
if( maWorkers.empty() )
@@ -214,17 +282,78 @@ void ThreadPool::waitUntilEmpty()
ThreadTask *pTask;
while ( ( pTask = popWork() ) )
{
+ std::shared_ptr<ThreadTaskTag> pTag(pTask->getTag());
pTask->doWork();
delete pTask;
+ pTag->onTaskWorkerDone();
}
}
- else
+ aGuard.clear();
+ rTag->waitUntilDone();
+}
+
+std::shared_ptr<ThreadTaskTag> ThreadPool::createThreadTaskTag()
+{
+ return std::make_shared<ThreadTaskTag>();
+}
+
+bool ThreadPool::isTaskTagDone(const std::shared_ptr<ThreadTaskTag>& pTag)
+{
+ return pTag->isDone();
+}
+
+
+ThreadTask::ThreadTask(const std::shared_ptr<ThreadTaskTag>& pTag)
+ : mpTag(pTag)
+{
+}
+
+ThreadTaskTag::ThreadTaskTag() : mnTasksWorking(0)
+{
+ maTasksComplete.set();
+}
+
+void ThreadTaskTag::onTaskPushed()
+{
+ oslInterlockedCount n = osl_atomic_increment(&mnTasksWorking);
+ assert( n < 65536 ); // sanity checking
+ (void)n; // avoid -Wunused-variable in release build
+ maTasksComplete.reset();
+}
+
+void ThreadTaskTag::onTaskWorkerDone()
+{
+ sal_Int32 nCount = osl_atomic_decrement(&mnTasksWorking);
+ assert(nCount >= 0);
+ if (nCount == 0)
+ maTasksComplete.set();
+}
+
+void ThreadTaskTag::waitUntilDone()
+{
+#if defined DBG_UTIL && defined LINUX
+ assert(!gbIsWorkerThread && "cannot wait for tasks from inside a task");
+#endif
+
+#ifdef DBG_UTIL
+ // 2 minute timeout in debug mode so our tests fail sooner rather than later
+ osl::Condition::Result rv = maTasksComplete.wait(TimeValue { 2*60, 0 });
+ assert(rv != osl_cond_result_timeout);
+#else
+ // 10 minute timeout in production so the app eventually throws some kind of error
+ if (maTasksComplete.wait(TimeValue { 10*60, 0 }) == osl_cond_result_timeout)
{
- aGuard.clear();
- maTasksComplete.wait();
- aGuard.reset();
+ SAL_DEBUG_TRACE("comphelper::ThreadTaskTag::waitUntilDone() "
+ << "tasksWorking " << mnTasksWorking
+ << "noThreads " << ThreadPool::getPreferredConcurrency());
+ throw std::runtime_error("timeout waiting for threadpool tasks");
}
- assert( maTasks.empty() );
+#endif
+}
+
+bool ThreadTaskTag::isDone()
+{
+ return mnTasksWorking == 0;
}
} // namespace comphelper
diff --git a/drawinglayer/source/primitive2d/sceneprimitive2d.cxx b/drawinglayer/source/primitive2d/sceneprimitive2d.cxx
index 68acb57..b87df27 100644
--- a/drawinglayer/source/primitive2d/sceneprimitive2d.cxx
+++ b/drawinglayer/source/primitive2d/sceneprimitive2d.cxx
@@ -400,9 +400,11 @@ namespace drawinglayer
public:
explicit Executor(
+ std::shared_ptr<comphelper::ThreadTaskTag>& rTag,
processor3d::ZBufferProcessor3D* pZBufferProcessor3D,
const primitive3d::Primitive3DContainer& rChildren3D)
- : mpZBufferProcessor3D(pZBufferProcessor3D),
+ : comphelper::ThreadTask(rTag),
+ mpZBufferProcessor3D(pZBufferProcessor3D),
mrChildren3D(rChildren3D)
{
}
@@ -417,6 +419,7 @@ namespace drawinglayer
std::vector< processor3d::ZBufferProcessor3D* > aProcessors;
const sal_uInt32 nLinesPerThread(aBZPixelRaster.getHeight() / nThreadCount);
+ std::shared_ptr<comphelper::ThreadTaskTag> aTag = comphelper::ThreadPool::createThreadTaskTag();
for(sal_Int32 a(0); a < nThreadCount; a++)
{
@@ -432,11 +435,11 @@ namespace drawinglayer
nLinesPerThread * a,
a + 1 == nThreadCount ? aBZPixelRaster.getHeight() : nLinesPerThread * (a + 1));
aProcessors.push_back(pNewZBufferProcessor3D);
- Executor* pExecutor = new Executor(pNewZBufferProcessor3D, getChildren3D());
+ Executor* pExecutor = new Executor(aTag, pNewZBufferProcessor3D, getChildren3D());
rThreadPool.pushTask(pExecutor);
}
- rThreadPool.waitUntilEmpty();
+ rThreadPool.waitUntilDone(aTag);
}
else
{
diff --git a/include/comphelper/threadpool.hxx b/include/comphelper/threadpool.hxx
index 2f726f0..93f6b59 100644
--- a/include/comphelper/threadpool.hxx
+++ b/include/comphelper/threadpool.hxx
@@ -15,17 +15,24 @@
#include <osl/mutex.hxx>
#include <osl/conditn.hxx>
#include <rtl/ref.hxx>
-#include <vector>
#include <comphelper/comphelperdllapi.h>
+#include <vector>
+#include <memory>
namespace comphelper
{
+class ThreadTaskTag;
+class ThreadPool;
class COMPHELPER_DLLPUBLIC ThreadTask
{
+friend class ThreadPool;
+ std::shared_ptr<ThreadTaskTag> mpTag;
public:
+ ThreadTask(const std::shared_ptr<ThreadTaskTag>& pTag);
virtual ~ThreadTask() {}
virtual void doWork() = 0;
+ const std::shared_ptr<ThreadTaskTag>& getTag() { return mpTag; }
};
/// A very basic thread pool implementation
@@ -36,20 +43,24 @@ public:
/// count for the CPU
static ThreadPool& getSharedOptimalPool();
+ static std::shared_ptr<ThreadTaskTag> createThreadTaskTag();
+
+ static bool isTaskTagDone(const std::shared_ptr<ThreadTaskTag>&);
+
/// returns a configurable max-concurrency
/// limit to avoid spawning an unnecessarily
/// large number of threads on high-core boxes.
/// MAX_CONCURRENCY envar controls the cap.
static sal_Int32 getPreferredConcurrency();
- ThreadPool( sal_Int32 nWorkers );
+ ThreadPool( sal_Int32 nWorkers );
virtual ~ThreadPool();
/// push a new task onto the work queue
void pushTask( ThreadTask *pTask /* takes ownership */ );
- /// wait until all queued tasks are completed
- void waitUntilEmpty();
+ /// wait until all queued tasks associated with the tag are completed
+ void waitUntilDone(const std::shared_ptr<ThreadTaskTag>&);
/// return the number of live worker threads
sal_Int32 getWorkerCount() const { return maWorkers.size(); }
diff --git a/package/inc/ZipOutputStream.hxx b/package/inc/ZipOutputStream.hxx
index 136bc72..1fc4166 100644
--- a/package/inc/ZipOutputStream.hxx
+++ b/package/inc/ZipOutputStream.hxx
@@ -35,10 +35,10 @@ class ZipOutputStream
{
css::uno::Reference< css::io::XOutputStream > m_xStream;
::std::vector < ZipEntry * > m_aZipList;
+ std::shared_ptr<comphelper::ThreadTaskTag> mpThreadTaskTag;
ByteChucker m_aChucker;
ZipEntry *m_pCurrentEntry;
- comphelper::ThreadPool &m_rSharedThreadPool;
std::vector< ZipOutputEntry* > m_aEntries;
::css::uno::Any m_aDeflateException;
@@ -80,6 +80,8 @@ public:
void reduceScheduledThreadsToGivenNumberOrLess(
sal_Int32 nThreads,
sal_Int32 nWaitTimeInTenthSeconds);
+
+ const std::shared_ptr<comphelper::ThreadTaskTag>& getThreadTaskTag() { return mpThreadTaskTag; }
};
#endif
diff --git a/package/source/zipapi/ZipOutputStream.cxx b/package/source/zipapi/ZipOutputStream.cxx
index 41f78b4..7e9ffe5 100644
--- a/package/source/zipapi/ZipOutputStream.cxx
+++ b/package/source/zipapi/ZipOutputStream.cxx
@@ -42,9 +42,9 @@ using namespace com::sun::star::packages::zip::ZipConstants;
*/
ZipOutputStream::ZipOutputStream( const uno::Reference < io::XOutputStream > &xOStream )
: m_xStream(xOStream)
+, mpThreadTaskTag( comphelper::ThreadPool::createThreadTaskTag() )
, m_aChucker(xOStream)
, m_pCurrentEntry(nullptr)
-, m_rSharedThreadPool(comphelper::ThreadPool::getSharedOptimalPool())
{
}
@@ -70,7 +70,7 @@ void ZipOutputStream::setEntry( ZipEntry *pEntry )
void ZipOutputStream::addDeflatingThread( ZipOutputEntry *pEntry, comphelper::ThreadTask *pThread )
{
- m_rSharedThreadPool.pushTask(pThread);
+ comphelper::ThreadPool::getSharedOptimalPool().pushTask(pThread);
m_aEntries.push_back(pEntry);
}
@@ -178,7 +178,7 @@ void ZipOutputStream::finish()
assert(!m_aZipList.empty() && "Zip file must have at least one entry!");
// Wait for all threads to finish & write
- m_rSharedThreadPool.waitUntilEmpty();
+ comphelper::ThreadPool::getSharedOptimalPool().waitUntilDone(mpThreadTaskTag);
// consume all processed entries
consumeAllScheduledThreadEntries();
diff --git a/package/source/zippackage/ZipPackageStream.cxx b/package/source/zippackage/ZipPackageStream.cxx
index 5efb145..e72c399 100644
--- a/package/source/zippackage/ZipPackageStream.cxx
+++ b/package/source/zippackage/ZipPackageStream.cxx
@@ -466,9 +466,10 @@ class DeflateThread: public comphelper::ThreadTask
uno::Reference< io::XInputStream > mxInStream;
public:
- DeflateThread( ZipOutputEntry *pEntry,
+ DeflateThread( const std::shared_ptr<comphelper::ThreadTaskTag>& pTag, ZipOutputEntry *pEntry,
const uno::Reference< io::XInputStream >& xInStream )
- : mpEntry(pEntry)
+ : comphelper::ThreadTask(pTag)
+ , mpEntry(pEntry)
, mxInStream(xInStream)
{}
@@ -849,7 +850,7 @@ bool ZipPackageStream::saveChild(
// Start a new thread deflating this zip entry
ZipOutputEntry *pZipEntry = new ZipOutputEntry(
m_xContext, *pTempEntry, this, bToBeEncrypted);
- rZipOut.addDeflatingThread( pZipEntry, new DeflateThread(pZipEntry, xStream) );
+ rZipOut.addDeflatingThread( pZipEntry, new DeflateThread(rZipOut.getThreadTaskTag(), pZipEntry, xStream) );
}
else
{
diff --git a/sc/source/filter/excel/xetable.cxx b/sc/source/filter/excel/xetable.cxx
index 29f636e..f7cba24 100644
--- a/sc/source/filter/excel/xetable.cxx
+++ b/sc/source/filter/excel/xetable.cxx
@@ -2116,8 +2116,10 @@ class RowFinalizeTask : public comphelper::ThreadTask
const ScfUInt16Vec& mrColXFIndexes;
std::vector< XclExpRow * > maRows;
public:
- RowFinalizeTask( const ScfUInt16Vec& rColXFIndexes,
+ RowFinalizeTask( const std::shared_ptr<comphelper::ThreadTaskTag> pTag,
+ const ScfUInt16Vec& rColXFIndexes,
bool bProgress ) :
+ comphelper::ThreadTask( pTag ),
mbProgress( bProgress ),
mrColXFIndexes( rColXFIndexes ) {}
virtual ~RowFinalizeTask() {}
@@ -2152,9 +2154,10 @@ void XclExpRowBuffer::Finalize( XclExpDefaultRowData& rDefRowData, const ScfUInt
else
{
comphelper::ThreadPool &rPool = comphelper::ThreadPool::getSharedOptimalPool();
+ std::shared_ptr<comphelper::ThreadTaskTag> pTag = comphelper::ThreadPool::createThreadTaskTag();
std::vector<RowFinalizeTask*> aTasks(nThreads, nullptr);
for ( size_t i = 0; i < nThreads; i++ )
- aTasks[ i ] = new RowFinalizeTask( rColXFIndexes, i == 0 );
+ aTasks[ i ] = new RowFinalizeTask( pTag, rColXFIndexes, i == 0 );
RowMap::iterator itr, itrBeg = maRowMap.begin(), itrEnd = maRowMap.end();
size_t nIdx = 0;
@@ -2167,7 +2170,7 @@ void XclExpRowBuffer::Finalize( XclExpDefaultRowData& rDefRowData, const ScfUInt
// Progress bar updates must be synchronous to avoid deadlock
aTasks[0]->doWork();
- rPool.waitUntilEmpty();
+ rPool.waitUntilDone(pTag);
}
// *** Default row format *** ---------------------------------------------
diff --git a/sc/source/filter/oox/workbookfragment.cxx b/sc/source/filter/oox/workbookfragment.cxx
index 6e6e101..88f6948 100644
--- a/sc/source/filter/oox/workbookfragment.cxx
+++ b/sc/source/filter/oox/workbookfragment.cxx
@@ -225,9 +225,11 @@ class WorkerThread : public comphelper::ThreadTask
rtl::Reference<FragmentHandler> mxHandler;
public:
- WorkerThread( WorkbookFragment& rWorkbookHandler,
+ WorkerThread( const std::shared_ptr<comphelper::ThreadTaskTag> pTag,
+ WorkbookFragment& rWorkbookHandler,
const rtl::Reference<FragmentHandler>& xHandler,
sal_Int32 &rSheetsLeft ) :
+ comphelper::ThreadTask( pTag ),
mrSheetsLeft( rSheetsLeft ),
mrWorkbookHandler( rWorkbookHandler ),
mxHandler( xHandler )
@@ -309,54 +311,38 @@ public:
void importSheetFragments( WorkbookFragment& rWorkbookHandler, SheetFragmentVector& rSheets )
{
- sal_Int32 nThreads = std::min( rSheets.size(), (size_t) comphelper::ThreadPool::getPreferredConcurrency() );
-
Reference< XComponentContext > xContext = comphelper::getProcessComponentContext();
- const char *pEnv;
- if( ( pEnv = getenv( "SC_IMPORT_THREADS" ) ) )
- nThreads = rtl_str_toInt32( pEnv, 10 );
-
- if( nThreads != 0 )
- {
- // test sequential read in this mode
- if( nThreads < 0)
- nThreads = 0;
- comphelper::ThreadPool aPool( nThreads );
-
- sal_Int32 nSheetsLeft = 0;
- ProgressBarTimer aProgressUpdater;
- SheetFragmentVector::iterator it = rSheets.begin(), itEnd = rSheets.end();
- for( ; it != itEnd; ++it )
- {
- // getting at the WorksheetGlobals is rather unpleasant
- IWorksheetProgress *pProgress = WorksheetHelper::getWorksheetInterface( it->first );
- pProgress->setCustomRowProgress(
- aProgressUpdater.wrapProgress(
- pProgress->getRowProgress() ) );
- aPool.pushTask( new WorkerThread( rWorkbookHandler, it->second,
- /* ref */ nSheetsLeft ) );
- nSheetsLeft++;
- }
-
- // coverity[loop_top] - this isn't an infinite loop where nSheetsLeft gets decremented by the above threads
- while( nSheetsLeft > 0)
- {
- // This is a much more controlled re-enterancy hazard than
- // allowing a yield deeper inside the filter code for progress
- // bar updating.
- Application::Yield();
- }
- aPool.waitUntilEmpty();
+ // test sequential read in this mode
+ comphelper::ThreadPool &rSharedPool = comphelper::ThreadPool::getSharedOptimalPool();
+ std::shared_ptr<comphelper::ThreadTaskTag> pTag = comphelper::ThreadPool::createThreadTaskTag();
- // threads joined in ThreadPool destructor
- }
- else // single threaded iteration
+ sal_Int32 nSheetsLeft = 0;
+ ProgressBarTimer aProgressUpdater;
+ SheetFragmentVector::iterator it = rSheets.begin(), itEnd = rSheets.end();
+ for( ; it != itEnd; ++it )
{
- SheetFragmentVector::iterator it = rSheets.begin(), itEnd = rSheets.end();
- for( ; it != itEnd; ++it )
- rWorkbookHandler.importOoxFragment( it->second );
- }
+ // getting at the WorksheetGlobals is rather unpleasant
+ IWorksheetProgress *pProgress = WorksheetHelper::getWorksheetInterface( it->first );
+ pProgress->setCustomRowProgress(
+ aProgressUpdater.wrapProgress(
+ pProgress->getRowProgress() ) );
+ rSharedPool.pushTask( new WorkerThread( pTag, rWorkbookHandler, it->second,
+ /* ref */ nSheetsLeft ) );
+ nSheetsLeft++;
+ }
+
+ // coverity[loop_top] - this isn't an infinite loop where nSheetsLeft gets decremented by the above threads
+ while( nSheetsLeft > 0)
+ {
+ // This is a much more controlled re-enterancy hazard than
+ // allowing a yield deeper inside the filter code for progress
+ // bar updating.
+ Application::Yield();
+ }
+ rSharedPool.waitUntilDone(pTag);
+
+ // threads joined in ThreadPool destructor
}
}
diff --git a/sw/source/core/ole/ndole.cxx b/sw/source/core/ole/ndole.cxx
index 9863cc9..4160892 100644
--- a/sw/source/core/ole/ndole.cxx
+++ b/sw/source/core/ole/ndole.cxx
@@ -643,38 +643,9 @@ bool SwOLENode::IsChart() const
return bIsChart;
}
-// due to some problems in test cases with the SharedOptimalPool, use
-// an own instance of comphelper::ThreadPool. Problem is that other
-// usages of getSharedOptimalPool() may interfere if more than one
-// pool user calls waitUntilEmpty().
-//
-// It gets created on-demand and will be available during LO's
-// lifetime for loading chart models used in writer in parallel. It
-// would be possible to add a usage count, then trigger a timer and
-// clean it up (due to lifetime issues), but that's probably overkill.
-// It gets created on demand, is ready for global reuse and makes no
-// harm (not much ressources needed)
-static comphelper::ThreadPool* pLocalPool = nullptr;
-
-comphelper::ThreadPool* getLocalThreadPool()
-{
- if (pLocalPool)
- {
- return pLocalPool;
- }
-
- if (0 == comphelper::ThreadPool::getSharedOptimalPool().getWorkerCount())
- {
- return nullptr;
- }
-
- pLocalPool = new comphelper::ThreadPool(comphelper::ThreadPool::getSharedOptimalPool().getWorkerCount());
- return pLocalPool;
-}
-
namespace { class DeflateThread; }
-/// Holder for local data for a parallely-executed task to load a chart model
+/// Holder for local data for a parallel-executed task to load a chart model
class DeflateData
{
private:
@@ -685,21 +656,20 @@ private:
drawinglayer::primitive2d::Primitive2DContainer maPrimitive2DSequence;
basegfx::B2DRange maRange;
- // set from the WorkerThread when done
- std::atomic< bool> mbFinished;
-
// evtl.set from the SwOLEObj destructor when a WorkerThread is still active
// since it is not possible to kill it - let it terminate and delete the
// data working on itself
std::atomic< bool> mbKilled;
+ std::shared_ptr<comphelper::ThreadTaskTag> mpTag;
+
public:
DeflateData(const uno::Reference< frame::XModel >& rXModel)
: maXModel(rXModel),
maPrimitive2DSequence(),
maRange(),
- mbFinished(false),
- mbKilled(false)
+ mbKilled(false),
+ mpTag( comphelper::ThreadPool::createThreadTaskTag() )
{
}
@@ -715,12 +685,12 @@ public:
bool isFinished() const
{
- return mbFinished;
+ return comphelper::ThreadPool::isTaskTagDone(mpTag);
}
void waitFinished()
{
- while(!mbFinished && !mbKilled)
+ while(!isFinished() && !mbKilled)
{
// need to wait until the load in progress is finished.
// to do so, Application::Yield() is needed since the execution
@@ -729,6 +699,7 @@ public:
// the running import
Application::Yield();
}
+ comphelper::ThreadPool::getSharedOptimalPool().waitUntilDone(mpTag);
}
};
@@ -742,7 +713,7 @@ class DeflateThread : public comphelper::ThreadTask
public:
DeflateThread(DeflateData& rDeflateData)
- : mrDeflateData(rDeflateData)
+ : comphelper::ThreadTask(rDeflateData.mpTag), mrDeflateData(rDeflateData)
{
}
@@ -758,7 +729,6 @@ private:
// model no longer needed and done
mrDeflateData.maXModel.clear();
- mrDeflateData.mbFinished = true;
}
catch (const uno::Exception&)
{
@@ -1081,16 +1051,11 @@ drawinglayer::primitive2d::Primitive2DContainer SwOLEObj::tryToGetChartContentAs
if(aXModel.is())
{
- // loaded using own instance of comphelper::ThreadPool,
- // see getLocalThreadPool(). Disable via bool below if
- // trouble surfaces somewhere
-
// disabled fro now, need to check deeper
- static bool bAnynchronousLoadingAllowed = false;
+ static bool bAsynchronousLoadingAllowed = false;
if(bSynchron ||
- !bAnynchronousLoadingAllowed ||
- nullptr == getLocalThreadPool())
+ !bAsynchronousLoadingAllowed)
{
// load chart synchron in this Thread
m_aPrimitive2DSequence = ChartHelper::tryToGetChartContentAsPrimitive2DSequence(
@@ -1106,7 +1071,7 @@ drawinglayer::primitive2d::Primitive2DContainer SwOLEObj::tryToGetChartContentAs
{
m_pDeflateData = new DeflateData(aXModel);
DeflateThread* pNew = new DeflateThread(*m_pDeflateData);
- getLocalThreadPool()->pushTask(pNew);
+ comphelper::ThreadPool::getSharedOptimalPool().pushTask(pNew);
}
}
}
diff --git a/vcl/source/bitmap/bitmapscalesuper.cxx b/vcl/source/bitmap/bitmapscalesuper.cxx
index 9fb1f44..ff01aae 100644
--- a/vcl/source/bitmap/bitmapscalesuper.cxx
+++ b/vcl/source/bitmap/bitmapscalesuper.cxx
@@ -89,7 +89,8 @@ class ScaleTask : public comphelper::ThreadTask
ScaleRangeFn mpFn;
std::vector< ScaleRangeContext > maStrips;
public:
- explicit ScaleTask( ScaleRangeFn pFn ) : mpFn( pFn ) {}
+ explicit ScaleTask( const std::shared_ptr<comphelper::ThreadTaskTag>& pTag, ScaleRangeFn pFn )
+ : comphelper::ThreadTask(pTag), mpFn( pFn ) {}
void push( ScaleRangeContext &aRC ) { maStrips.push_back( aRC ); }
virtual void doWork() override
{
@@ -1000,6 +1001,7 @@ bool BitmapScaleSuper::filter(Bitmap& rBitmap)
{
// partition and queue work
comphelper::ThreadPool &rShared = comphelper::ThreadPool::getSharedOptimalPool();
+ std::shared_ptr<comphelper::ThreadTaskTag> pTag = comphelper::ThreadPool::createThreadTaskTag();
sal_uInt32 nThreads = rShared.getWorkerCount();
assert( nThreads > 0 );
sal_uInt32 nStrips = ((nEndY - nStartY) + SCALE_THREAD_STRIP - 1) / SCALE_THREAD_STRIP;
@@ -1008,7 +1010,7 @@ bool BitmapScaleSuper::filter(Bitmap& rBitmap)
long nStripY = nStartY;
for ( sal_uInt32 t = 0; t < nThreads - 1; t++ )
{
- ScaleTask *pTask = new ScaleTask( pScaleRangeFn );
+ ScaleTask *pTask = new ScaleTask( pTag, pScaleRangeFn );
for ( sal_uInt32 j = 0; j < nStripsPerThread; j++ )
{
ScaleRangeContext aRC( &aContext, nStripY );
@@ -1020,7 +1022,7 @@ bool BitmapScaleSuper::filter(Bitmap& rBitmap)
// finish any remaining bits here
pScaleRangeFn( aContext, nStripY, nEndY );
- rShared.waitUntilEmpty();
+ rShared.waitUntilDone(pTag);
SAL_INFO("vcl.gdi", "All threaded scaling tasks complete");
}
More information about the Libreoffice-commits
mailing list