Browse Source

Replace hsMutex with std::mutex

Michael Hansen 10 years ago
parent
commit
2947acb2c8
  1. 33
      Sources/Plasma/CoreLib/hsThread.cpp
  2. 47
      Sources/Plasma/CoreLib/hsThread.h
  3. 26
      Sources/Plasma/CoreLib/hsThread_Mac.cpp
  4. 81
      Sources/Plasma/CoreLib/hsThread_Unix.cpp
  5. 36
      Sources/Plasma/CoreLib/hsThread_Win.cpp
  6. 31
      Sources/Plasma/FeatureLib/pfPatcher/pfPatcher.cpp
  7. 109
      Sources/Plasma/NucleusLib/pnDispatch/plDispatch.cpp
  8. 10
      Sources/Plasma/NucleusLib/pnDispatch/plDispatch.h
  9. 80
      Sources/Plasma/NucleusLib/pnNetCli/pnNcChannel.cpp
  10. 22
      Sources/Plasma/PubUtilLib/plAudioCore/plSoundBuffer.cpp
  11. 10
      Sources/Plasma/PubUtilLib/plAudioCore/plSoundBuffer.h
  12. 8
      Sources/Plasma/PubUtilLib/plFile/plStreamSource.cpp
  13. 4
      Sources/Plasma/PubUtilLib/plFile/plStreamSource.h
  14. 2
      Sources/Plasma/PubUtilLib/plMath/plAvg.cpp
  15. 6
      Sources/Plasma/PubUtilLib/plMath/plAvg.h
  16. 3
      Sources/Plasma/PubUtilLib/plStatusLog/plStatusLog.h

33
Sources/Plasma/CoreLib/hsThread.cpp

@ -57,14 +57,14 @@ hsReaderWriterLock::hsReaderWriterLock(Callback * cb)
void hsReaderWriterLock::LockForReading() void hsReaderWriterLock::LockForReading()
{ {
if ( fCallback ) if ( fCallback )
fCallback->OnLockingForRead( this ); fCallback->OnLockingForRead(this);
fReaderCountLock.Lock(); {
fReaderLock.Lock(); std::lock_guard<std::mutex> lock_count(fReaderCountLock);
fReaderCount++; std::lock_guard<std::mutex> lock(fReaderLock);
if ( fReaderCount==1 ) fReaderCount++;
fWriterSema.Wait(); if (fReaderCount == 1)
fReaderLock.Unlock(); fWriterSema.Wait();
fReaderCountLock.Unlock(); }
if ( fCallback ) if ( fCallback )
fCallback->OnLockedForRead( this ); fCallback->OnLockedForRead( this );
} }
@ -72,12 +72,13 @@ void hsReaderWriterLock::LockForReading()
void hsReaderWriterLock::UnlockForReading() void hsReaderWriterLock::UnlockForReading()
{ {
if ( fCallback ) if ( fCallback )
fCallback->OnUnlockingForRead( this ); fCallback->OnUnlockingForRead(this);
fReaderLock.Lock(); {
fReaderCount--; std::lock_guard<std::mutex> lock(fReaderLock);
if ( fReaderCount==0 ) fReaderCount--;
fWriterSema.Signal(); if (fReaderCount == 0)
fReaderLock.Unlock(); fWriterSema.Signal();
}
if ( fCallback ) if ( fCallback )
fCallback->OnUnlockedForRead( this ); fCallback->OnUnlockedForRead( this );
} }
@ -86,7 +87,7 @@ void hsReaderWriterLock::LockForWriting()
{ {
if ( fCallback ) if ( fCallback )
fCallback->OnLockingForWrite( this ); fCallback->OnLockingForWrite( this );
fReaderCountLock.Lock(); fReaderCountLock.lock();
fWriterSema.Wait(); fWriterSema.Wait();
hsAssert( fReaderCount==0, "Locked for writing, but fReaderCount>0" ); hsAssert( fReaderCount==0, "Locked for writing, but fReaderCount>0" );
if ( fCallback ) if ( fCallback )
@ -98,7 +99,7 @@ void hsReaderWriterLock::UnlockForWriting()
if ( fCallback ) if ( fCallback )
fCallback->OnUnlockingForWrite( this ); fCallback->OnUnlockingForWrite( this );
fWriterSema.Signal(); fWriterSema.Signal();
fReaderCountLock.Unlock(); fReaderCountLock.unlock();
if ( fCallback ) if ( fCallback )
fCallback->OnUnlockedForWrite( this ); fCallback->OnUnlockedForWrite( this );
} }

47
Sources/Plasma/CoreLib/hsThread.h

@ -43,6 +43,7 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
#define hsThread_Defined #define hsThread_Defined
#include "HeadSpin.h" #include "HeadSpin.h"
#include <mutex>
typedef uint32_t hsMilliseconds; typedef uint32_t hsMilliseconds;
@ -106,44 +107,6 @@ public:
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
class hsMutex {
#if HS_BUILD_FOR_WIN32
HANDLE fMutexH;
#elif HS_BUILD_FOR_UNIX
pthread_mutex_t fPMutex;
#endif
public:
hsMutex();
virtual ~hsMutex();
#ifdef HS_BUILD_FOR_WIN32
HANDLE GetHandle() const { return fMutexH; }
#endif
void Lock();
bool TryLock();
void Unlock();
};
class hsTempMutexLock {
hsMutex* fMutex;
public:
hsTempMutexLock(hsMutex* mutex) : fMutex(mutex)
{
fMutex->Lock();
}
hsTempMutexLock(hsMutex& mutex) : fMutex(&mutex)
{
fMutex->Lock();
}
~hsTempMutexLock()
{
fMutex->Unlock();
}
};
//////////////////////////////////////////////////////////////////////////////
class hsSemaphore { class hsSemaphore {
#if HS_BUILD_FOR_WIN32 #if HS_BUILD_FOR_WIN32
HANDLE fSemaH; HANDLE fSemaH;
@ -181,8 +144,8 @@ class hsEvent
#else #else
enum { kRead, kWrite }; enum { kRead, kWrite };
int fFds[2]; int fFds[2];
hsMutex fWaitLock; std::mutex fWaitLock;
hsMutex fSignalLock; std::mutex fSignalLock;
#endif // PSEUDO_EVENT #endif // PSEUDO_EVENT
#elif HS_BUILD_FOR_WIN32 #elif HS_BUILD_FOR_WIN32
HANDLE fEvent; HANDLE fEvent;
@ -231,8 +194,8 @@ public:
private: private:
int fReaderCount; int fReaderCount;
hsMutex fReaderCountLock; std::mutex fReaderCountLock;
hsMutex fReaderLock; std::mutex fReaderLock;
hsSemaphore fWriterSema; hsSemaphore fWriterSema;
Callback * fCallback; Callback * fCallback;
}; };

26
Sources/Plasma/CoreLib/hsThread_Mac.cpp

@ -115,32 +115,6 @@ void hsThread::ThreadYield()
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
hsMutex::hsMutex()
{
OSStatus status = ::MPCreateCriticalRegion(&fCriticalRegion);
hsThrowIfOSErr(status);
}
hsMutex::~hsMutex()
{
OSStatus status = ::MPDeleteCriticalRegion(fCriticalRegion);
hsThrowIfOSErr(status);
}
void hsMutex::Lock()
{
OSStatus status = ::MPEnterCriticalRegion(fCriticalRegion, kDurationForever);
hsThrowIfOSErr(status);
}
void hsMutex::Unlock()
{
OSStatus status = ::MPExitCriticalRegion(fCriticalRegion);
hsThrowIfOSErr(status);
}
//////////////////////////////////////////////////////////////////////////////
hsSemaphore::hsSemaphore(int initialValue) hsSemaphore::hsSemaphore(int initialValue)
{ {
OSStatus status = MPCreateSemaphore(kPosInfinity32, initialValue, &fSemaId); OSStatus status = MPCreateSemaphore(kPosInfinity32, initialValue, &fSemaId);

81
Sources/Plasma/CoreLib/hsThread_Unix.cpp

@ -190,83 +190,6 @@ static void InitEventLoggingFile()
#endif #endif
hsMutex::hsMutex()
{
#ifdef MUTEX_TIMING
InitMutexTimerFile();
#endif
// create mutex attributes
pthread_mutexattr_t attr;
int status = ::pthread_mutexattr_init(&attr);
hsThrowIfOSErr(status);
// make the mutex attributes recursive
status = ::pthread_mutexattr_settype(&attr,PTHREAD_MUTEX_RECURSIVE);
hsThrowIfOSErr(status);
//init the mutex
status = ::pthread_mutex_init(&fPMutex, &attr);
hsThrowIfOSErr(status);
// destroy the attributes
status = ::pthread_mutexattr_destroy(&attr);
hsThrowIfOSErr(status);
}
hsMutex::~hsMutex()
{
int status = ::pthread_mutex_destroy(&fPMutex);
hsThrowIfOSErr(status);
}
void hsMutex::Lock()
{
#ifdef MUTEX_TIMING
# ifndef HS_DEBUGGING
timeval tv;
hsWide start;
gettimeofday( &tv, nil );
start.Mul( tv.tv_sec, 1000000 )->Add( tv.tv_usec );
# endif
#endif
int status = ::pthread_mutex_lock(&fPMutex);
hsThrowIfOSErr(status);
#ifdef MUTEX_TIMING
# ifndef HS_DEBUGGING
hsWide diff;
gettimeofday( &tv, nil );
diff.Mul( tv.tv_sec, 1000000 )->Add( tv.tv_usec )->Sub( &start )->Div( 1000000 );
double duration = diff.AsDouble();
if ( gMutexTimerFile && duration>0.005 )
{
time_t t;
time( &t );
struct tm *now = localtime( &t );
char tmp[30];
strftime( tmp, 30, "%c", now );
fprintf( gMutexTimerFile, "[%s] [%lu:%lu] %f\n", tmp, getpid(), hsThread::GetMyThreadId(), duration );
}
# endif
#endif
}
bool hsMutex::TryLock()
{
int status = ::pthread_mutex_trylock(&fPMutex);
hsThrowIfOSErr(status);
return status==EBUSY?false:true;
}
void hsMutex::Unlock()
{
int status = ::pthread_mutex_unlock(&fPMutex);
hsThrowIfOSErr(status);
}
///////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////
hsSemaphore::hsSemaphore(int initialValue, const char* name) hsSemaphore::hsSemaphore(int initialValue, const char* name)
@ -538,7 +461,7 @@ hsEvent::~hsEvent()
bool hsEvent::Wait( hsMilliseconds timeToWait ) bool hsEvent::Wait( hsMilliseconds timeToWait )
{ {
hsTempMutexLock lock( fWaitLock ); std::lock_guard<std::mutex> lock(fWaitLock);
fd_set fdset; fd_set fdset;
FD_ZERO( &fdset ); FD_ZERO( &fdset );
@ -572,7 +495,7 @@ bool hsEvent::Wait( hsMilliseconds timeToWait )
void hsEvent::Signal() void hsEvent::Signal()
{ {
hsTempMutexLock lock( fSignalLock ); std::lock_guard<std::mutex> lock(fSignalLock);
write( fFds[kWrite], "*", 1 ); write( fFds[kWrite], "*", 1 );
} }

36
Sources/Plasma/CoreLib/hsThread_Win.cpp

@ -135,42 +135,6 @@ void hsThread::ThreadYield()
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
hsMutex::hsMutex()
{
fMutexH = ::CreateMutex(nil, false, nil);
if (fMutexH == nil)
throw hsOSException(-1);
}
hsMutex::~hsMutex()
{
::CloseHandle(fMutexH);
}
void hsMutex::Lock()
{
DWORD state = ::WaitForSingleObject(fMutexH, INFINITE);
hsAssert(state != WAIT_FAILED,"hsMutex::Lock -> Wait Failed");
hsAssert(state != WAIT_ABANDONED,"hsMutex::Lock -> Abandoned Mutex");
hsAssert(state != WAIT_TIMEOUT,"hsMutex::Lock -> Infinite Timeout expired?");
}
bool hsMutex::TryLock()
{
DWORD state = ::WaitForSingleObject(fMutexH, 0);
hsAssert(state != WAIT_ABANDONED,"hsMutex::TryLock -> Abandoned Mutex");
return state == WAIT_OBJECT_0?true:false;
}
void hsMutex::Unlock()
{
BOOL result = ::ReleaseMutex(fMutexH);
hsAssert(result != 0, "hsMutex::Unlock Failed!");
}
//////////////////////////////////////////////////////////////////////////////
hsSemaphore::hsSemaphore(int initialValue, const char *name) hsSemaphore::hsSemaphore(int initialValue, const char *name)
{ {
fSemaH = ::CreateSemaphore(nil, initialValue, kPosInfinity32, name); fSemaH = ::CreateSemaphore(nil, initialValue, kPosInfinity32, name);

31
Sources/Plasma/FeatureLib/pfPatcher/pfPatcher.cpp

@ -42,6 +42,7 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
#include <algorithm> #include <algorithm>
#include <deque> #include <deque>
#include <mutex>
#include "pfPatcher.h" #include "pfPatcher.h"
@ -101,8 +102,8 @@ struct pfPatcherWorker : public hsThread
std::deque<Request> fRequests; std::deque<Request> fRequests;
std::deque<NetCliFileManifestEntry> fQueuedFiles; std::deque<NetCliFileManifestEntry> fQueuedFiles;
hsMutex fRequestMut; std::mutex fRequestMut;
hsMutex fFileMut; std::mutex fFileMut;
hsSemaphore fFileSignal; hsSemaphore fFileSignal;
pfPatcher::CompletionFunc fOnComplete; pfPatcher::CompletionFunc fOnComplete;
@ -243,7 +244,7 @@ static void IGotAuthFileList(ENetError result, void* param, const NetCliAuthFile
// so everything goes directly into the Requests deque because AuthSrv lists // so everything goes directly into the Requests deque because AuthSrv lists
// don't have any hashes attached. WHY did eap think this was a good idea?!?! // don't have any hashes attached. WHY did eap think this was a good idea?!?!
{ {
hsTempMutexLock lock(patcher->fRequestMut); std::lock_guard<std::mutex> lock(patcher->fRequestMut);
for (unsigned i = 0; i < infoCount; ++i) { for (unsigned i = 0; i < infoCount; ++i) {
PatcherLogYellow("\tEnqueuing Legacy File '%S'", infoArr[i].filename); PatcherLogYellow("\tEnqueuing Legacy File '%S'", infoArr[i].filename);
@ -268,7 +269,7 @@ static void IHandleManifestDownload(pfPatcherWorker* patcher, const wchar_t grou
{ {
PatcherLogGreen("\tDownloaded Manifest '%S'", group); PatcherLogGreen("\tDownloaded Manifest '%S'", group);
{ {
hsTempMutexLock lock(patcher->fFileMut); std::lock_guard<std::mutex> lock(patcher->fFileMut);
for (unsigned i = 0; i < entryCount; ++i) for (unsigned i = 0; i < entryCount; ++i)
patcher->fQueuedFiles.push_back(manifest[i]); patcher->fQueuedFiles.push_back(manifest[i]);
patcher->fFileSignal.Signal(); patcher->fFileSignal.Signal();
@ -287,7 +288,7 @@ static void IPreloaderManifestDownloadCB(ENetError result, void* param, const wc
// so, we need to ask the AuthSrv about our game code // so, we need to ask the AuthSrv about our game code
{ {
hsTempMutexLock lock(patcher->fRequestMut); std::lock_guard<std::mutex> lock(patcher->fRequestMut);
patcher->fRequests.push_back(pfPatcherWorker::Request(plString::Null, pfPatcherWorker::Request::kPythonList)); patcher->fRequests.push_back(pfPatcherWorker::Request(plString::Null, pfPatcherWorker::Request::kPythonList));
patcher->fRequests.push_back(pfPatcherWorker::Request(plString::Null, pfPatcherWorker::Request::kSdlList)); patcher->fRequests.push_back(pfPatcherWorker::Request(plString::Null, pfPatcherWorker::Request::kSdlList));
} }
@ -341,7 +342,7 @@ pfPatcherWorker::pfPatcherWorker() :
pfPatcherWorker::~pfPatcherWorker() pfPatcherWorker::~pfPatcherWorker()
{ {
{ {
hsTempMutexLock lock(fRequestMut); std::lock_guard<std::mutex> lock(fRequestMut);
std::for_each(fRequests.begin(), fRequests.end(), std::for_each(fRequests.begin(), fRequests.end(),
[] (const Request& req) { [] (const Request& req) {
if (req.fStream) req.fStream->Close(); if (req.fStream) req.fStream->Close();
@ -352,7 +353,7 @@ pfPatcherWorker::~pfPatcherWorker()
} }
{ {
hsTempMutexLock lock(fFileMut); std::lock_guard<std::mutex> lock(fFileMut);
fQueuedFiles.clear(); fQueuedFiles.clear();
} }
} }
@ -386,7 +387,7 @@ void pfPatcherWorker::EndPatch(ENetError result, const plString& msg)
bool pfPatcherWorker::IssueRequest() bool pfPatcherWorker::IssueRequest()
{ {
hsTempMutexLock lock(fRequestMut); std::lock_guard<std::mutex> lock(fRequestMut);
if (fRequests.empty()) { if (fRequests.empty()) {
fRequestActive = false; fRequestActive = false;
fFileSignal.Signal(); // make sure the patch thread doesn't deadlock! fFileSignal.Signal(); // make sure the patch thread doesn't deadlock!
@ -451,7 +452,7 @@ hsError pfPatcherWorker::Run()
do { do {
fFileSignal.Wait(); fFileSignal.Wait();
hsTempMutexLock fileLock(fFileMut); std::lock_guard<std::mutex> fileLock(fFileMut);
if (!fQueuedFiles.empty()) { if (!fQueuedFiles.empty()) {
ProcessFile(); ProcessFile();
continue; continue;
@ -515,8 +516,10 @@ void pfPatcherWorker::ProcessFile()
pfPatcherStream* s = new pfPatcherStream(this, dlName, entry); pfPatcherStream* s = new pfPatcherStream(this, dlName, entry);
s->Open(clName, "wb"); s->Open(clName, "wb");
hsTempMutexLock lock(fRequestMut); {
fRequests.push_back(Request(dlName, Request::kFile, s)); std::lock_guard<std::mutex> lock(fRequestMut);
fRequests.push_back(Request(dlName, Request::kFile, s));
}
fQueuedFiles.pop_front(); fQueuedFiles.pop_front();
if (!fRequestActive) if (!fRequestActive)
@ -615,19 +618,19 @@ void pfPatcher::OnSelfPatch(FileDownloadFunc cb)
void pfPatcher::RequestGameCode() void pfPatcher::RequestGameCode()
{ {
hsTempMutexLock lock(fWorker->fRequestMut); std::lock_guard<std::mutex> lock(fWorker->fRequestMut);
fWorker->fRequests.push_back(pfPatcherWorker::Request("SecurePreloader", pfPatcherWorker::Request::kSecurePreloader)); fWorker->fRequests.push_back(pfPatcherWorker::Request("SecurePreloader", pfPatcherWorker::Request::kSecurePreloader));
} }
void pfPatcher::RequestManifest(const plString& mfs) void pfPatcher::RequestManifest(const plString& mfs)
{ {
hsTempMutexLock lock(fWorker->fRequestMut); std::lock_guard<std::mutex> lock(fWorker->fRequestMut);
fWorker->fRequests.push_back(pfPatcherWorker::Request(mfs, pfPatcherWorker::Request::kManifest)); fWorker->fRequests.push_back(pfPatcherWorker::Request(mfs, pfPatcherWorker::Request::kManifest));
} }
void pfPatcher::RequestManifest(const std::vector<plString>& mfs) void pfPatcher::RequestManifest(const std::vector<plString>& mfs)
{ {
hsTempMutexLock lock(fWorker->fRequestMut); std::lock_guard<std::mutex> lock(fWorker->fRequestMut);
std::for_each(mfs.begin(), mfs.end(), std::for_each(mfs.begin(), mfs.end(),
[&] (const plString& name) { [&] (const plString& name) {
fWorker->fRequests.push_back(pfPatcherWorker::Request(name, pfPatcherWorker::Request::kManifest)); fWorker->fRequests.push_back(pfPatcherWorker::Request(name, pfPatcherWorker::Request::kManifest));

109
Sources/Plasma/NucleusLib/pnDispatch/plDispatch.cpp

@ -84,7 +84,7 @@ public:
uint32_t GetNumReceivers() const { return fReceivers.GetCount(); } uint32_t GetNumReceivers() const { return fReceivers.GetCount(); }
}; };
int32_t plDispatch::fNumBufferReq = 0; int32_t plDispatch::fNumBufferReq = 0;
bool plDispatch::fMsgActive = false; bool plDispatch::fMsgActive = false;
plMsgWrap* plDispatch::fMsgCurrent = nil; plMsgWrap* plDispatch::fMsgCurrent = nil;
plMsgWrap* plDispatch::fMsgHead = nil; plMsgWrap* plDispatch::fMsgHead = nil;
@ -92,8 +92,8 @@ plMsgWrap* plDispatch::fMsgTail = nil;
hsTArray<plMessage*> plDispatch::fMsgWatch; hsTArray<plMessage*> plDispatch::fMsgWatch;
MsgRecieveCallback plDispatch::fMsgRecieveCallback = nil; MsgRecieveCallback plDispatch::fMsgRecieveCallback = nil;
hsMutex plDispatch::fMsgCurrentMutex; // mutex for fMsgCurrent std::recursive_mutex plDispatch::fMsgCurrentMutex; // mutex for fMsgCurrent
hsMutex plDispatch::fMsgDispatchLock; // mutex for IMsgDispatch std::mutex plDispatch::fMsgDispatchLock; // mutex for IMsgDispatch
plDispatch::plDispatch() plDispatch::plDispatch()
@ -227,18 +227,19 @@ bool plDispatch::IListeningForExactType(uint16_t hClass)
void plDispatch::IMsgEnqueue(plMsgWrap* msgWrap, bool async) void plDispatch::IMsgEnqueue(plMsgWrap* msgWrap, bool async)
{ {
fMsgCurrentMutex.Lock(); {
std::lock_guard<std::recursive_mutex> lock(fMsgCurrentMutex);
#ifdef HS_DEBUGGING #ifdef HS_DEBUGGING
if( msgWrap->fMsg->HasBCastFlag(plMessage::kMsgWatch) ) if (msgWrap->fMsg->HasBCastFlag(plMessage::kMsgWatch))
fMsgWatch.Append(msgWrap->fMsg); fMsgWatch.Append(msgWrap->fMsg);
#endif // HS_DEBUGGING #endif // HS_DEBUGGING
if( fMsgTail ) if (fMsgTail)
fMsgTail = IInsertToQueue(&fMsgTail->fNext, msgWrap); fMsgTail = IInsertToQueue(&fMsgTail->fNext, msgWrap);
else else
fMsgTail = IInsertToQueue(&fMsgHead, msgWrap); fMsgTail = IInsertToQueue(&fMsgHead, msgWrap);
fMsgCurrentMutex.Unlock(); }
if( !async ) if( !async )
// Test for fMsgActive in IMsgDispatch(), properly wrapped inside a mutex -mcn // Test for fMsgActive in IMsgDispatch(), properly wrapped inside a mutex -mcn
@ -248,25 +249,22 @@ void plDispatch::IMsgEnqueue(plMsgWrap* msgWrap, bool async)
// On starts deferring msg delivery until buffering is set to off again. // On starts deferring msg delivery until buffering is set to off again.
bool plDispatch::SetMsgBuffering(bool on) bool plDispatch::SetMsgBuffering(bool on)
{ {
fMsgCurrentMutex.Lock();
if( on )
{ {
hsAssert(fNumBufferReq || !fMsgActive, "Can't start deferring message delivery while delivering messages. See mf"); std::lock_guard<std::recursive_mutex> lock(fMsgCurrentMutex);
if( !fNumBufferReq && fMsgActive ) if (on)
{ {
fMsgCurrentMutex.Unlock(); hsAssert(fNumBufferReq || !fMsgActive, "Can't start deferring message delivery while delivering messages. See mf");
return false; if (!fNumBufferReq && fMsgActive)
} return false;
fNumBufferReq++; fNumBufferReq++;
fMsgActive = true; fMsgActive = true;
fMsgCurrentMutex.Unlock(); }
} else if (!--fNumBufferReq)
else if( !--fNumBufferReq ) {
{ fMsgActive = false;
fMsgActive = false; IMsgDispatch();
fMsgCurrentMutex.Unlock(); }
IMsgDispatch();
} }
hsAssert(fNumBufferReq >= 0, "Mismatched number of on/off dispatch buffering requests"); hsAssert(fNumBufferReq >= 0, "Mismatched number of on/off dispatch buffering requests");
@ -275,25 +273,25 @@ bool plDispatch::SetMsgBuffering(bool on)
void plDispatch::IMsgDispatch() void plDispatch::IMsgDispatch()
{ {
if( !fMsgDispatchLock.TryLock() ) if (!fMsgDispatchLock.try_lock())
return; return;
if( fMsgActive ) if( fMsgActive )
{ {
fMsgDispatchLock.Unlock(); fMsgDispatchLock.unlock();
return; return;
} }
fMsgActive = true; fMsgActive = true;
int responseLevel=0; int responseLevel=0;
fMsgCurrentMutex.Lock(); fMsgCurrentMutex.lock();
plMsgWrap* origTail = fMsgTail; plMsgWrap* origTail = fMsgTail;
while((fMsgCurrent = fMsgHead)) while((fMsgCurrent = fMsgHead))
{ {
IDequeue(&fMsgHead, &fMsgTail); IDequeue(&fMsgHead, &fMsgTail);
fMsgCurrentMutex.Unlock(); fMsgCurrentMutex.unlock();
plMessage* msg = fMsgCurrent->fMsg; plMessage* msg = fMsgCurrent->fMsg;
bool nonLocalMsg = msg && msg->HasBCastFlag(plMessage::kNetNonLocal); bool nonLocalMsg = msg && msg->HasBCastFlag(plMessage::kNetNonLocal);
@ -402,16 +400,16 @@ void plDispatch::IMsgDispatch()
// } // }
// } // }
fMsgCurrentMutex.Lock(); fMsgCurrentMutex.lock();
delete fMsgCurrent; delete fMsgCurrent;
// TEMP // TEMP
fMsgCurrent = (class plMsgWrap *)0xdeadc0de; fMsgCurrent = (class plMsgWrap *)0xdeadc0de;
} }
fMsgCurrentMutex.Unlock(); fMsgCurrentMutex.unlock();
fMsgActive = false; fMsgActive = false;
fMsgDispatchLock.Unlock(); fMsgDispatchLock.unlock();
} }
// //
@ -419,12 +417,12 @@ void plDispatch::IMsgDispatch()
// //
bool plDispatch::IMsgNetPropagate(plMessage* msg) bool plDispatch::IMsgNetPropagate(plMessage* msg)
{ {
fMsgCurrentMutex.Lock(); {
std::lock_guard<std::recursive_mutex> lock(fMsgCurrentMutex);
// Make sure cascaded messages all have the same net flags
plNetClientApp::InheritNetMsgFlags(fMsgCurrent ? fMsgCurrent->fMsg : nil, msg, false);
fMsgCurrentMutex.Unlock(); // Make sure cascaded messages all have the same net flags
plNetClientApp::InheritNetMsgFlags(fMsgCurrent ? fMsgCurrent->fMsg : nil, msg, false);
}
// Decide if msg should go out over the network. // Decide if msg should go out over the network.
// If kNetForce is used, this message should always go out over the network, even if it's already // If kNetForce is used, this message should always go out over the network, even if it's already
@ -511,10 +509,9 @@ void plDispatch::MsgQueue(plMessage* msg)
{ {
if (fQueuedMsgOn) if (fQueuedMsgOn)
{ {
fQueuedMsgListMutex.Lock(); std::lock_guard<std::mutex> lock(fQueuedMsgListMutex);
hsAssert(msg,"Message missing"); hsAssert(msg,"Message missing");
fQueuedMsgList.push_back(msg); fQueuedMsgList.push_back(msg);
fQueuedMsgListMutex.Unlock();
} }
else else
MsgSend(msg, false); MsgSend(msg, false);
@ -522,23 +519,23 @@ void plDispatch::MsgQueue(plMessage* msg)
void plDispatch::MsgQueueProcess() void plDispatch::MsgQueueProcess()
{ {
// Process all messages on Queue, unlock while sending them // Process all messages on Queue, unlock while sending them
// this would allow other threads to put new messages on the list while we send() // this would allow other threads to put new messages on the list while we send()
while (1) bool empty = false;
{ while (!empty)
plMessage * pMsg = nil; {
fQueuedMsgListMutex.Lock(); plMessage * pMsg = nullptr;
int size = fQueuedMsgList.size(); {
if (size) std::lock_guard<std::mutex> lock(fQueuedMsgListMutex);
{ pMsg = fQueuedMsgList.front(); empty = fQueuedMsgList.empty();
fQueuedMsgList.pop_front(); if (!empty)
{
pMsg = fQueuedMsgList.front();
fQueuedMsgList.pop_front();
}
} }
fQueuedMsgListMutex.Unlock();
if (pMsg) if (pMsg)
{ MsgSend(pMsg, false); MsgSend(pMsg, false);
}
if (!size)
break;
} }
} }

10
Sources/Plasma/NucleusLib/pnDispatch/plDispatch.h

@ -42,7 +42,9 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
#ifndef plDispatch_inc #ifndef plDispatch_inc
#define plDispatch_inc #define plDispatch_inc
#include <list> #include <list>
#include <mutex>
#include "hsTemplates.h" #include "hsTemplates.h"
#include "plgDispatch.h" #include "plgDispatch.h"
#include "hsThread.h" #include "hsThread.h"
@ -74,10 +76,10 @@ protected:
hsKeyedObject* fOwner; hsKeyedObject* fOwner;
plMsgWrap* fFutureMsgQueue; plMsgWrap* fFutureMsgQueue;
static int32_t fNumBufferReq; static int32_t fNumBufferReq;
static plMsgWrap* fMsgCurrent; static plMsgWrap* fMsgCurrent;
static hsMutex fMsgCurrentMutex; // mutex for above static std::recursive_mutex fMsgCurrentMutex; // mutex for above
static hsMutex fMsgDispatchLock; // mutex for IMsgDispatch static std::mutex fMsgDispatchLock; // mutex for IMsgDispatch
static plMsgWrap* fMsgHead; static plMsgWrap* fMsgHead;
static plMsgWrap* fMsgTail; static plMsgWrap* fMsgTail;
static bool fMsgActive; static bool fMsgActive;
@ -86,7 +88,7 @@ protected:
hsTArray<plTypeFilter*> fRegisteredExactTypes; hsTArray<plTypeFilter*> fRegisteredExactTypes;
std::list<plMessage*> fQueuedMsgList; std::list<plMessage*> fQueuedMsgList;
hsMutex fQueuedMsgListMutex; // mutex for above std::mutex fQueuedMsgListMutex; // mutex for above
bool fQueuedMsgOn; // Turns on or off Queued Messages, Plugins need them off bool fQueuedMsgOn; // Turns on or off Queued Messages, Plugins need them off
hsKeyedObject* IGetOwner() { return fOwner; } hsKeyedObject* IGetOwner() { return fOwner; }

80
Sources/Plasma/NucleusLib/pnNetCli/pnNcChannel.cpp

@ -46,8 +46,8 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
***/ ***/
#include "Pch.h" #include "Pch.h"
#include "hsThread.h"
#include <list> #include <list>
#include <mutex>
#include "hsRefCnt.h" #include "hsRefCnt.h"
#pragma hdrstop #pragma hdrstop
@ -61,16 +61,24 @@ namespace pnNetCli {
***/ ***/
struct ChannelCrit { struct ChannelCrit {
~ChannelCrit (); ~ChannelCrit();
ChannelCrit (); ChannelCrit() : m_init(true) { }
inline void Enter () { m_critsect.Lock(); }
inline void Leave () { m_critsect.Unlock(); } inline void lock()
inline void EnterSafe () { if (m_init) m_critsect.Lock(); } {
inline void LeaveSafe () { if (m_init) m_critsect.Unlock(); } hsAssert(m_init, "Bad things have happened.");
m_critsect.lock();
}
inline void unlock()
{
hsAssert(m_init, "Bad things have happened.");
m_critsect.unlock();
}
private: private:
bool m_init; bool m_init;
hsMutex m_critsect; std::mutex m_critsect;
}; };
struct NetMsgChannel : hsRefCnt { struct NetMsgChannel : hsRefCnt {
@ -100,14 +108,10 @@ static std::list<NetMsgChannel*>* s_channels;
* *
***/ ***/
//===========================================================================
ChannelCrit::ChannelCrit () {
m_init = true;
}
//=========================================================================== //===========================================================================
ChannelCrit::~ChannelCrit () { ChannelCrit::~ChannelCrit () {
EnterSafe(); std::lock_guard<ChannelCrit> lock(*this);
if (s_channels) { if (s_channels) {
while (s_channels->size()) { while (s_channels->size()) {
NetMsgChannel* const channel = s_channels->front(); NetMsgChannel* const channel = s_channels->front();
@ -118,7 +122,6 @@ ChannelCrit::~ChannelCrit () {
delete s_channels; delete s_channels;
s_channels = nil; s_channels = nil;
} }
LeaveSafe();
} }
@ -298,15 +301,14 @@ NetMsgChannel * NetMsgChannelLock (
uint32_t * largestRecv uint32_t * largestRecv
) { ) {
NetMsgChannel * channel; NetMsgChannel * channel;
s_channelCrit.Enter(); std::lock_guard<ChannelCrit> lock(s_channelCrit);
if (nil != (channel = FindChannel_CS(protocol, server))) { if (nullptr != (channel = FindChannel_CS(protocol, server))) {
*largestRecv = channel->m_largestRecv; *largestRecv = channel->m_largestRecv;
channel->Ref("ChannelLock"); channel->Ref("ChannelLock");
} }
else { else {
*largestRecv = 0; *largestRecv = 0;
} }
s_channelCrit.Leave();
return channel; return channel;
} }
@ -314,11 +316,9 @@ NetMsgChannel * NetMsgChannelLock (
void NetMsgChannelUnlock ( void NetMsgChannelUnlock (
NetMsgChannel * channel NetMsgChannel * channel
) { ) {
s_channelCrit.Enter(); std::lock_guard<ChannelCrit> lock(s_channelCrit);
{
channel->UnRef("ChannelLock"); channel->UnRef("ChannelLock");
}
s_channelCrit.Leave();
} }
//============================================================================ //============================================================================
@ -388,33 +388,31 @@ void NetMsgProtocolRegister (
const plBigNum& dh_xa, // client: dh_x server: dh_a const plBigNum& dh_xa, // client: dh_x server: dh_a
const plBigNum& dh_n const plBigNum& dh_n
) { ) {
s_channelCrit.EnterSafe(); std::lock_guard<ChannelCrit> lock(s_channelCrit);
{
NetMsgChannel * channel = FindOrCreateChannel_CS(protocol, server);
// make sure no connections have been established on this protocol, otherwise NetMsgChannel * channel = FindOrCreateChannel_CS(protocol, server);
// we'll be modifying a live data structure; NetCli's don't lock their protocol
// to operate on it once they have linked to it!
ASSERT(channel->RefCnt() == 1);
channel->m_dh_g = dh_g; // make sure no connections have been established on this protocol, otherwise
channel->m_dh_xa = dh_xa; // we'll be modifying a live data structure; NetCli's don't lock their protocol
channel->m_dh_n = dh_n; // to operate on it once they have linked to it!
ASSERT(channel->RefCnt() == 1);
if (sendMsgCount) channel->m_dh_g = dh_g;
AddSendMsgs_CS(channel, sendMsgs, sendMsgCount); channel->m_dh_xa = dh_xa;
if (recvMsgCount) channel->m_dh_n = dh_n;
AddRecvMsgs_CS(channel, recvMsgs, recvMsgCount);
} if (sendMsgCount)
s_channelCrit.LeaveSafe(); AddSendMsgs_CS(channel, sendMsgs, sendMsgCount);
if (recvMsgCount)
AddRecvMsgs_CS(channel, recvMsgs, recvMsgCount);
} }
//=========================================================================== //===========================================================================
void NetMsgProtocolDestroy (uint32_t protocol, bool server) { void NetMsgProtocolDestroy (uint32_t protocol, bool server) {
s_channelCrit.EnterSafe(); std::lock_guard<ChannelCrit> lock(s_channelCrit);
if (NetMsgChannel* channel = FindChannel_CS(protocol, server)) { if (NetMsgChannel* channel = FindChannel_CS(protocol, server)) {
s_channels->remove(channel); s_channels->remove(channel);
channel->UnRef("ChannelLink"); channel->UnRef("ChannelLink");
} }
s_channelCrit.LeaveSafe();
} }

22
Sources/Plasma/PubUtilLib/plAudioCore/plSoundBuffer.cpp

@ -90,12 +90,13 @@ hsError plSoundPreloader::Run()
while (fRunning) while (fRunning)
{ {
fCritSect.Lock();
while (fBuffers.GetCount())
{ {
templist.Append(fBuffers.Pop()); std::lock_guard<std::mutex> lock(fCritSect);
while (fBuffers.GetCount())
{
templist.Append(fBuffers.Pop());
}
} }
fCritSect.Unlock();
if (templist.GetCount() == 0) if (templist.GetCount() == 0)
{ {
@ -130,14 +131,15 @@ hsError plSoundPreloader::Run()
} }
// we need to be sure that all buffers are removed from our load list when shutting this thread down or we will hang, // we need to be sure that all buffers are removed from our load list when shutting this thread down or we will hang,
// since the sound buffer will wait to be destroyed until it is marked as loaded // since the sound buffer will wait to be destroyed until it is marked as loaded
fCritSect.Lock();
while (fBuffers.GetCount())
{ {
plSoundBuffer* buf = fBuffers.Pop(); std::lock_guard<std::mutex> lock(fCritSect);
buf->SetLoaded(true); while (fBuffers.GetCount())
{
plSoundBuffer* buf = fBuffers.Pop();
buf->SetLoaded(true);
}
} }
fCritSect.Unlock();
return hsOK; return hsOK;
} }

10
Sources/Plasma/PubUtilLib/plAudioCore/plSoundBuffer.h

@ -57,6 +57,7 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
#include "plAudioFileReader.h" #include "plAudioFileReader.h"
#include "hsThread.h" #include "hsThread.h"
#include "plFileSystem.h" #include "plFileSystem.h"
#include <mutex>
//// Class Definition //////////////////////////////////////////////////////// //// Class Definition ////////////////////////////////////////////////////////
@ -165,7 +166,7 @@ protected:
hsTArray<plSoundBuffer*> fBuffers; hsTArray<plSoundBuffer*> fBuffers;
hsEvent fEvent; hsEvent fEvent;
bool fRunning; bool fRunning;
hsMutex fCritSect; std::mutex fCritSect;
public: public:
virtual hsError Run(); virtual hsError Run();
@ -184,9 +185,10 @@ public:
bool IsRunning() const { return fRunning; } bool IsRunning() const { return fRunning; }
void AddBuffer(plSoundBuffer* buffer) { void AddBuffer(plSoundBuffer* buffer) {
fCritSect.Lock(); {
fBuffers.Push(buffer); std::lock_guard<std::mutex> lock(fCritSect);
fCritSect.Unlock(); fBuffers.Push(buffer);
}
fEvent.Signal(); fEvent.Signal();
} }

8
Sources/Plasma/PubUtilLib/plFile/plStreamSource.cpp

@ -56,7 +56,7 @@ plStreamSource::plStreamSource()
void plStreamSource::ICleanup() void plStreamSource::ICleanup()
{ {
hsTempMutexLock lock(fMutex); std::lock_guard<std::mutex> lock(fMutex);
// loop through all the file data records, and delete the streams // loop through all the file data records, and delete the streams
decltype(fFileData.begin()) curData; decltype(fFileData.begin()) curData;
@ -72,7 +72,7 @@ void plStreamSource::ICleanup()
hsStream* plStreamSource::GetFile(const plFileName& filename) hsStream* plStreamSource::GetFile(const plFileName& filename)
{ {
hsTempMutexLock lock(fMutex); std::lock_guard<std::mutex> lock(fMutex);
plFileName sFilename = filename.Normalize('/'); plFileName sFilename = filename.Normalize('/');
if (fFileData.find(sFilename) == fFileData.end()) if (fFileData.find(sFilename) == fFileData.end())
@ -112,7 +112,7 @@ std::vector<plFileName> plStreamSource::GetListOfNames(const plFileName& dir, co
{ {
plFileName sDir = dir.Normalize('/'); plFileName sDir = dir.Normalize('/');
hsAssert(ext.CharAt(0) != '.', "Don't add a dot"); hsAssert(ext.CharAt(0) != '.', "Don't add a dot");
hsTempMutexLock lock(fMutex); std::lock_guard<std::mutex> lock(fMutex);
// loop through all the file data records, and create the list // loop through all the file data records, and create the list
std::vector<plFileName> retVal; std::vector<plFileName> retVal;
@ -142,7 +142,7 @@ bool plStreamSource::InsertFile(const plFileName& filename, hsStream* stream)
{ {
plFileName sFilename = filename.Normalize('/'); plFileName sFilename = filename.Normalize('/');
hsTempMutexLock lock(fMutex); std::lock_guard<std::mutex> lock(fMutex);
if (fFileData.find(sFilename) != fFileData.end()) if (fFileData.find(sFilename) != fFileData.end())
return false; // duplicate entry, return failure return false; // duplicate entry, return failure

4
Sources/Plasma/PubUtilLib/plFile/plStreamSource.h

@ -43,8 +43,8 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
#define plStreamSource_h_inc #define plStreamSource_h_inc
#include <map> #include <map>
#include <mutex>
#include "hsStream.h" #include "hsStream.h"
#include "hsThread.h"
// A class for holding and accessing file streams. The preloader will insert // A class for holding and accessing file streams. The preloader will insert
// files in here once they are loaded. In internal builds, if a requested file // files in here once they are loaded. In internal builds, if a requested file
@ -60,7 +60,7 @@ private:
hsStream* fStream; // we own this pointer, so clean it up hsStream* fStream; // we own this pointer, so clean it up
}; };
std::map<plFileName, fileData, plFileName::less_i> fFileData; // key is filename std::map<plFileName, fileData, plFileName::less_i> fFileData; // key is filename
hsMutex fMutex; std::mutex fMutex;
uint32_t fServerKey[4]; uint32_t fServerKey[4];
void ICleanup(); // closes all file pointers and cleans up after itself void ICleanup(); // closes all file pointers and cleans up after itself

2
Sources/Plasma/PubUtilLib/plMath/plAvg.cpp

@ -55,7 +55,7 @@ const float TimeBasedAvgRing<T>::kPercision = 0.001;
template <class T> template <class T>
void TimeBasedAvgRing<T>::AddItem(T value, double time) void TimeBasedAvgRing<T>::AddItem(T value, double time)
{ {
hsTempMutexLock lock( fLock ); std::lock_guard<std::mutex> lock(fLock);
if ( fList.empty() ) if ( fList.empty() )
{ {

6
Sources/Plasma/PubUtilLib/plMath/plAvg.h

@ -44,9 +44,7 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
#include "HeadSpin.h" #include "HeadSpin.h"
#include <list> #include <list>
#include <mutex>
#include "hsThread.h"
// A Time based Value Averaging class // A Time based Value Averaging class
// implemented in a ring buffer // implemented in a ring buffer
@ -91,7 +89,7 @@ private:
float fMaxAvg; float fMaxAvg;
double fTotal; double fTotal;
TimeListIterator fRingStart, fRingEnd; TimeListIterator fRingStart, fRingEnd;
hsMutex fLock; std::mutex fLock;
public: public:
TimeBasedAvgRing():fLen(0.f),fAvg(0.f),fMaxAvg(0.f),fTotal(0.0) {} TimeBasedAvgRing():fLen(0.f),fAvg(0.f),fMaxAvg(0.f),fTotal(0.0) {}

3
Sources/Plasma/PubUtilLib/plStatusLog/plStatusLog.h

@ -69,7 +69,6 @@ class plPipeline;
// really be visible at any given time. // really be visible at any given time.
class plStatusLogMgr; class plStatusLogMgr;
class hsMutex;
class plStatusLogDrawerStub; class plStatusLogDrawerStub;
class plStatusLog class plStatusLog
{ {
@ -206,8 +205,6 @@ class plStatusLogMgr
static plFileName IGetBasePath(); static plFileName IGetBasePath();
hsMutex fMutex; // To make multithreaded-safe
public: public:
enum enum

Loading…
Cancel
Save