Browse Source

Merge pull request #443 from zrax/cxx11_synch

C++11 synchronization primitives
Adam Johnson 10 years ago
parent
commit
b1e93f92f4
  1. 1
      Sources/Plasma/CoreLib/CMakeLists.txt
  2. 108
      Sources/Plasma/CoreLib/hsThread.cpp
  3. 205
      Sources/Plasma/CoreLib/hsThread.h
  4. 34
      Sources/Plasma/CoreLib/hsThread_Mac.cpp
  5. 274
      Sources/Plasma/CoreLib/hsThread_Unix.cpp
  6. 88
      Sources/Plasma/CoreLib/hsThread_Win.cpp
  7. 4
      Sources/Plasma/FeatureLib/pfCrashHandler/plCrashBase.cpp
  8. 4
      Sources/Plasma/FeatureLib/pfCrashHandler/plCrashBase.h
  9. 31
      Sources/Plasma/FeatureLib/pfPatcher/pfPatcher.cpp
  10. 24
      Sources/Plasma/NucleusLib/pnAsyncCoreExe/pnAceIo.cpp
  11. 96
      Sources/Plasma/NucleusLib/pnDispatch/plDispatch.cpp
  12. 10
      Sources/Plasma/NucleusLib/pnDispatch/plDispatch.h
  13. 80
      Sources/Plasma/NucleusLib/pnNetCli/pnNcChannel.cpp
  14. 22
      Sources/Plasma/PubUtilLib/plAudioCore/plSoundBuffer.cpp
  15. 10
      Sources/Plasma/PubUtilLib/plAudioCore/plSoundBuffer.h
  16. 8
      Sources/Plasma/PubUtilLib/plFile/plStreamSource.cpp
  17. 4
      Sources/Plasma/PubUtilLib/plFile/plStreamSource.h
  18. 2
      Sources/Plasma/PubUtilLib/plMath/plAvg.cpp
  19. 6
      Sources/Plasma/PubUtilLib/plMath/plAvg.h
  20. 28
      Sources/Plasma/PubUtilLib/plNetGameLib/Private/plNglFile.cpp
  21. 4
      Sources/Plasma/PubUtilLib/plStatusLog/plStatusLog.cpp
  22. 5
      Sources/Plasma/PubUtilLib/plStatusLog/plStatusLog.h

1
Sources/Plasma/CoreLib/CMakeLists.txt

@ -26,7 +26,6 @@ set(CoreLib_SOURCES
hsStream.cpp
hsStringTokenizer.cpp
hsTemplates.cpp
hsThread.cpp
hsWide.cpp
pcSmallRect.cpp
plFileSystem.cpp

108
Sources/Plasma/CoreLib/hsThread.cpp

@ -1,108 +0,0 @@
/*==LICENSE==*
CyanWorlds.com Engine - MMOG client, server and tools
Copyright (C) 2011 Cyan Worlds, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Additional permissions under GNU GPL version 3 section 7
If you modify this Program, or any covered work, by linking or
combining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK,
NVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent
JPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK
(or a modified version of those libraries),
containing parts covered by the terms of the Bink SDK EULA, 3ds Max EULA,
PhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG
JPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the
licensors of this Program grant you additional
permission to convey the resulting work. Corresponding Source for a
non-source form of such a combination shall include the source code for
the parts of OpenSSL and IJG JPEG Library used as well as that of the covered
work.
You can contact Cyan Worlds, Inc. by email legal@cyan.com
or by snail mail at:
Cyan Worlds, Inc.
14617 N Newport Hwy
Mead, WA 99021
*==LICENSE==*/
#ifndef CoreLib_Thread
#define CoreLib_Thread
#include "hsThread.h"
//////////////////////////////////////////////////////////////////////////////
hsReaderWriterLock::hsReaderWriterLock(Callback * cb)
: fReaderCount( 0 )
, fWriterSema( 1 )
, fCallback( cb )
{
}
void hsReaderWriterLock::LockForReading()
{
if ( fCallback )
fCallback->OnLockingForRead( this );
fReaderCountLock.Lock();
fReaderLock.Lock();
fReaderCount++;
if ( fReaderCount==1 )
fWriterSema.Wait();
fReaderLock.Unlock();
fReaderCountLock.Unlock();
if ( fCallback )
fCallback->OnLockedForRead( this );
}
void hsReaderWriterLock::UnlockForReading()
{
if ( fCallback )
fCallback->OnUnlockingForRead( this );
fReaderLock.Lock();
fReaderCount--;
if ( fReaderCount==0 )
fWriterSema.Signal();
fReaderLock.Unlock();
if ( fCallback )
fCallback->OnUnlockedForRead( this );
}
void hsReaderWriterLock::LockForWriting()
{
if ( fCallback )
fCallback->OnLockingForWrite( this );
fReaderCountLock.Lock();
fWriterSema.Wait();
hsAssert( fReaderCount==0, "Locked for writing, but fReaderCount>0" );
if ( fCallback )
fCallback->OnLockedForWrite( this );
}
void hsReaderWriterLock::UnlockForWriting()
{
if ( fCallback )
fCallback->OnUnlockingForWrite( this );
fWriterSema.Signal();
fReaderCountLock.Unlock();
if ( fCallback )
fCallback->OnUnlockedForWrite( this );
}
#endif // CoreLib_Thread

205
Sources/Plasma/CoreLib/hsThread.h

@ -43,6 +43,9 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
#define hsThread_Defined
#include "HeadSpin.h"
#include <atomic>
#include <mutex>
#include <condition_variable>
typedef uint32_t hsMilliseconds;
@ -105,46 +108,44 @@ public:
};
//////////////////////////////////////////////////////////////////////////////
class hsSemaphore
{
std::mutex fMutex;
std::condition_variable fCondition;
unsigned fValue;
class hsMutex {
#if HS_BUILD_FOR_WIN32
HANDLE fMutexH;
#elif HS_BUILD_FOR_UNIX
pthread_mutex_t fPMutex;
#endif
public:
hsMutex();
virtual ~hsMutex();
#ifdef HS_BUILD_FOR_WIN32
HANDLE GetHandle() const { return fMutexH; }
#endif
void Lock();
bool TryLock();
void Unlock();
};
hsSemaphore(unsigned initial = 0) : fValue(initial) { }
class hsTempMutexLock {
hsMutex* fMutex;
public:
hsTempMutexLock(hsMutex* mutex) : fMutex(mutex)
inline void Wait()
{
fMutex->Lock();
std::unique_lock<std::mutex> lock(fMutex);
fCondition.wait(lock, [this]() { return fValue > 0; });
--fValue;
}
hsTempMutexLock(hsMutex& mutex) : fMutex(&mutex)
template <class _Rep, class _Period>
inline bool Wait(const std::chrono::duration<_Rep, _Period> &duration)
{
fMutex->Lock();
std::unique_lock<std::mutex> lock(fMutex);
bool result = fCondition.wait_for(lock, duration, [this]() { return fValue > 0; });
if (result)
--fValue;
return result;
}
~hsTempMutexLock()
inline void Signal()
{
fMutex->Unlock();
std::unique_lock<std::mutex> lock(fMutex);
++fValue;
fCondition.notify_one();
}
};
//////////////////////////////////////////////////////////////////////////////
class hsSemaphore {
class hsGlobalSemaphore {
#if HS_BUILD_FOR_WIN32
HANDLE fSemaH;
#elif HS_BUILD_FOR_UNIX
@ -154,49 +155,41 @@ class hsSemaphore {
#else
pthread_mutex_t fPMutex;
pthread_cond_t fPCond;
int32_t fCounter;
int32_t fCounter;
#endif
#endif
public:
hsSemaphore(int initialValue=0, const char* name=nil);
~hsSemaphore();
hsGlobalSemaphore(int initialValue = 0, const char* name = nullptr);
~hsGlobalSemaphore();
#ifdef HS_BUILD_FOR_WIN32
HANDLE GetHandle() const { return fSemaH; }
#endif
bool TryWait();
bool Wait(hsMilliseconds timeToWait = kPosInfinity32);
void Signal();
bool Wait(hsMilliseconds timeToWait = kPosInfinity32);
void Signal();
};
//////////////////////////////////////////////////////////////////////////////
class hsEvent
{
#if HS_BUILD_FOR_UNIX
#ifndef PSEUDO_EVENT
pthread_mutex_t fMutex;
pthread_cond_t fCond;
bool fTriggered;
#else
enum { kRead, kWrite };
int fFds[2];
hsMutex fWaitLock;
hsMutex fSignalLock;
#endif // PSEUDO_EVENT
#elif HS_BUILD_FOR_WIN32
HANDLE fEvent;
#endif
std::mutex fMutex;
std::condition_variable fCondition;
public:
hsEvent();
~hsEvent();
hsEvent() { }
#ifdef HS_BUILD_FOR_WIN32
HANDLE GetHandle() const { return fEvent; }
#endif
inline void Wait()
{
std::unique_lock<std::mutex> lock(fMutex);
fCondition.wait(lock);
}
bool Wait(hsMilliseconds timeToWait = kPosInfinity32);
void Signal();
inline void Signal()
{
std::unique_lock<std::mutex> lock(fMutex);
fCondition.notify_one();
}
};
//////////////////////////////////////////////////////////////////////////////
@ -212,64 +205,80 @@ public:
class hsReaderWriterLock
{
public:
struct Callback
{
virtual void OnLockingForRead( hsReaderWriterLock * lock ) {}
virtual void OnLockedForRead( hsReaderWriterLock * lock ) {}
virtual void OnUnlockingForRead( hsReaderWriterLock * lock ) {}
virtual void OnUnlockedForRead( hsReaderWriterLock * lock ) {}
virtual void OnLockingForWrite( hsReaderWriterLock * lock ) {}
virtual void OnLockedForWrite( hsReaderWriterLock * lock ) {}
virtual void OnUnlockingForWrite( hsReaderWriterLock * lock ) {}
virtual void OnUnlockedForWrite( hsReaderWriterLock * lock ) {}
};
hsReaderWriterLock(Callback * cb=nullptr);
void LockForReading();
void UnlockForReading();
void LockForWriting();
void UnlockForWriting();
hsReaderWriterLock() : fReaderCount(0), fWriterSem(1) { }
private:
int fReaderCount;
hsMutex fReaderCountLock;
hsMutex fReaderLock;
hsSemaphore fWriterSema;
Callback * fCallback;
};
class hsLockForReading
{
hsReaderWriterLock * fLock;
public:
hsLockForReading( hsReaderWriterLock & lock ): fLock( &lock )
void LockForReading()
{
fLock->LockForReading();
// Don't allow us to start reading if there's still an active writer
std::lock_guard<std::mutex> lock(fReaderLock);
fReaderCount++;
if (fReaderCount == 1) {
// Block writers from starting (wait is a misnomer here)
fWriterSem.Wait();
}
}
hsLockForReading( hsReaderWriterLock * lock ): fLock( lock )
void UnlockForReading()
{
fLock->LockForReading();
fReaderCount--;
if (fReaderCount == 0)
fWriterSem.Signal();
}
~hsLockForReading()
void LockForWriting()
{
fLock->UnlockForReading();
// Blocks new readers from starting
fReaderLock.lock();
// Wait until all readers are done
fWriterSem.Wait();
}
};
class hsLockForWriting
{
hsReaderWriterLock * fLock;
public:
hsLockForWriting( hsReaderWriterLock & lock ): fLock( &lock )
void UnlockForWriting()
{
fLock->LockForWriting();
fWriterSem.Signal();
fReaderLock.unlock();
}
hsLockForWriting( hsReaderWriterLock * lock ): fLock( lock )
std::atomic<int> fReaderCount;
std::mutex fReaderLock;
hsSemaphore fWriterSem;
friend class hsLockForReading;
friend class hsLockForWriting;
};
class hsLockForReading
{
hsReaderWriterLock& fLock;
public:
hsLockForReading(hsReaderWriterLock& lock) : fLock(lock)
{
fLock.LockForReading();
}
~hsLockForReading()
{
fLock->LockForWriting();
fLock.UnlockForReading();
}
};
class hsLockForWriting
{
hsReaderWriterLock& fLock;
public:
hsLockForWriting(hsReaderWriterLock& lock) : fLock(lock)
{
fLock.LockForWriting();
}
~hsLockForWriting()
{
fLock->UnlockForWriting();
fLock.UnlockForWriting();
}
};

34
Sources/Plasma/CoreLib/hsThread_Mac.cpp

@ -115,45 +115,19 @@ void hsThread::ThreadYield()
//////////////////////////////////////////////////////////////////////////////
hsMutex::hsMutex()
{
OSStatus status = ::MPCreateCriticalRegion(&fCriticalRegion);
hsThrowIfOSErr(status);
}
hsMutex::~hsMutex()
{
OSStatus status = ::MPDeleteCriticalRegion(fCriticalRegion);
hsThrowIfOSErr(status);
}
void hsMutex::Lock()
{
OSStatus status = ::MPEnterCriticalRegion(fCriticalRegion, kDurationForever);
hsThrowIfOSErr(status);
}
void hsMutex::Unlock()
{
OSStatus status = ::MPExitCriticalRegion(fCriticalRegion);
hsThrowIfOSErr(status);
}
//////////////////////////////////////////////////////////////////////////////
hsSemaphore::hsSemaphore(int initialValue)
hsGlobalSemaphore::hsGlobalSemaphore(int initialValue)
{
OSStatus status = MPCreateSemaphore(kPosInfinity32, initialValue, &fSemaId);
hsThrowIfOSErr(status);
}
hsSemaphore::~hsSemaphore()
hsGlobalSemaphore::~hsGlobalSemaphore()
{
OSStatus status = MPDeleteSemaphore(fSemaId);
hsThrowIfOSErr(status);
}
bool hsSemaphore::Wait(hsMilliseconds timeToWait)
bool hsGlobalSemaphore::Wait(hsMilliseconds timeToWait)
{
Duration duration;
@ -171,7 +145,7 @@ bool hsSemaphore::Wait(hsMilliseconds timeToWait)
return true;
}
void hsSemaphore::Signal()
void hsGlobalSemaphore::Signal()
{
OSStatus status = MPSignalSemaphore(fSemaId);
hsThrowIfOSErr(status);

274
Sources/Plasma/CoreLib/hsThread_Unix.cpp

@ -190,86 +190,9 @@ static void InitEventLoggingFile()
#endif
hsMutex::hsMutex()
{
#ifdef MUTEX_TIMING
InitMutexTimerFile();
#endif
// create mutex attributes
pthread_mutexattr_t attr;
int status = ::pthread_mutexattr_init(&attr);
hsThrowIfOSErr(status);
// make the mutex attributes recursive
status = ::pthread_mutexattr_settype(&attr,PTHREAD_MUTEX_RECURSIVE);
hsThrowIfOSErr(status);
//init the mutex
status = ::pthread_mutex_init(&fPMutex, &attr);
hsThrowIfOSErr(status);
// destroy the attributes
status = ::pthread_mutexattr_destroy(&attr);
hsThrowIfOSErr(status);
}
hsMutex::~hsMutex()
{
int status = ::pthread_mutex_destroy(&fPMutex);
hsThrowIfOSErr(status);
}
void hsMutex::Lock()
{
#ifdef MUTEX_TIMING
# ifndef HS_DEBUGGING
timeval tv;
hsWide start;
gettimeofday( &tv, nil );
start.Mul( tv.tv_sec, 1000000 )->Add( tv.tv_usec );
# endif
#endif
int status = ::pthread_mutex_lock(&fPMutex);
hsThrowIfOSErr(status);
#ifdef MUTEX_TIMING
# ifndef HS_DEBUGGING
hsWide diff;
gettimeofday( &tv, nil );
diff.Mul( tv.tv_sec, 1000000 )->Add( tv.tv_usec )->Sub( &start )->Div( 1000000 );
double duration = diff.AsDouble();
if ( gMutexTimerFile && duration>0.005 )
{
time_t t;
time( &t );
struct tm *now = localtime( &t );
char tmp[30];
strftime( tmp, 30, "%c", now );
fprintf( gMutexTimerFile, "[%s] [%lu:%lu] %f\n", tmp, getpid(), hsThread::GetMyThreadId(), duration );
}
# endif
#endif
}
bool hsMutex::TryLock()
{
int status = ::pthread_mutex_trylock(&fPMutex);
hsThrowIfOSErr(status);
return status==EBUSY?false:true;
}
void hsMutex::Unlock()
{
int status = ::pthread_mutex_unlock(&fPMutex);
hsThrowIfOSErr(status);
}
/////////////////////////////////////////////////////////////////////////////
hsSemaphore::hsSemaphore(int initialValue, const char* name)
hsGlobalSemaphore::hsGlobalSemaphore(int initialValue, const char* name)
{
#ifdef USE_SEMA
fPSema = nil;
@ -299,7 +222,7 @@ hsSemaphore::hsSemaphore(int initialValue, const char* name)
#endif
}
hsSemaphore::~hsSemaphore()
hsGlobalSemaphore::~hsGlobalSemaphore()
{
#ifdef USE_SEMA
int status = 0;
@ -319,22 +242,7 @@ hsSemaphore::~hsSemaphore()
#endif
}
bool hsSemaphore::TryWait()
{
#ifdef USE_SEMA
int status = ::sem_trywait(fPSema);
if (status != 0) {
return errno != EAGAIN;
}
return true;
#else
int status = ::pthread_mutex_trylock(&fPMutex);
hsThrowIfOSErr(status);
return status==EBUSY ? false : true;
#endif
}
bool hsSemaphore::Wait(hsMilliseconds timeToWait)
bool hsGlobalSemaphore::Wait(hsMilliseconds timeToWait)
{
#ifdef USE_SEMA // SHOULDN'T THIS USE timeToWait??!?!? -rje
// shouldn't this use sem_timedwait? -dpogue (2012-03-04)
@ -386,7 +294,7 @@ EXIT:
#endif
}
void hsSemaphore::Signal()
void hsGlobalSemaphore::Signal()
{
#ifdef USE_SEMA
int status = sem_post(fPSema);
@ -405,180 +313,6 @@ void hsSemaphore::Signal()
#endif
}
///////////////////////////////////////////////////////////////
#ifndef PSEUDO_EVENT
hsEvent::hsEvent() : fTriggered(false)
{
#ifdef EVENT_LOGGING
InitEventLoggingFile();
#endif
int status = ::pthread_mutex_init(&fMutex, nil);
hsAssert(status == 0, "hsEvent Mutex Init");
hsThrowIfOSErr(status);
// fCond = PTHREAD_COND_INITIALIZER;
status = ::pthread_cond_init(&fCond, nil);
hsAssert(status == 0, "hsEvent Cond Init");
hsThrowIfOSErr(status);
}
hsEvent::~hsEvent()
{
int status = ::pthread_cond_destroy(&fCond);
hsAssert(status == 0, "hsEvent Cond De-Init");
hsThrowIfOSErr(status);
status = ::pthread_mutex_destroy(&fMutex);
hsAssert(status == 0, "hsEvent Mutex De-Init");
hsThrowIfOSErr(status);
}
bool hsEvent::Wait(hsMilliseconds timeToWait)
{
bool retVal = true;
int status = ::pthread_mutex_lock(&fMutex);
hsAssert(status == 0, "hsEvent Mutex Lock");
hsThrowIfOSErr(status);
#ifdef EVENT_LOGGING
fprintf(gEventLoggingFile,"Event: %p - In Wait (pre trig check), Triggered: %d, t=%f\n",this,fTriggered,hsTimer::GetSeconds());
#endif
if ( !fTriggered )
{
if (timeToWait == kPosInfinity32)
{
status = ::pthread_cond_wait(&fCond, &fMutex);
hsAssert(status == 0, "hsEvent Cond Wait");
hsThrowIfOSErr(status);
}
else
{ timespec spec;
int result;
result = ::clock_gettime(CLOCK_REALTIME, &spec);
hsThrowIfFalse(result == 0);
spec.tv_sec += timeToWait / 1000;
spec.tv_nsec += (timeToWait % 1000) * 1000 * 1000;
while (spec.tv_nsec >= 1000 * 1000 * 1000)
{ spec.tv_sec += 1;
spec.tv_nsec -= 1000 * 1000 * 1000;
}
status = ::pthread_cond_timedwait(&fCond, &fMutex, &spec);
if (status == ETIMEDOUT)
{
// It's a conditional paired with a variable!
// Pthread docs all use a variable in conjunction with the conditional
retVal = fTriggered;
status = 0;
#ifdef EVENT_LOGGING
fprintf(gEventLoggingFile,"Event: %p - In Wait (wait timed out), Triggered: %d, t=%f\n",this,fTriggered,hsTimer::GetSeconds());
#endif
}
else
{
#ifdef EVENT_LOGGING
fprintf(gEventLoggingFile,"Event: %p - In Wait (wait recvd signal), Triggered: %d, t=%f\n",this,fTriggered,hsTimer::GetSeconds());
#endif
}
hsAssert(status == 0, "hsEvent Cond Wait");
hsThrowIfOSErr(status);
}
}
else
{
#ifdef EVENT_LOGGING
fprintf(gEventLoggingFile,"Event: %p - In Wait (post triggerd), Triggered: %d, t=%f\n",this,fTriggered,hsTimer::GetSeconds());
#endif
}
fTriggered = false;
status = ::pthread_mutex_unlock(&fMutex);
hsAssert(status == 0, "hsEvent Mutex Unlock");
hsThrowIfOSErr(status);
return retVal;
}
void hsEvent::Signal()
{
int status = ::pthread_mutex_lock(&fMutex);
hsAssert(status == 0, "hsEvent Mutex Lock");
hsThrowIfOSErr(status);
#ifdef EVENT_LOGGING
fprintf(gEventLoggingFile,"Event: %p - In Signal, Triggered: %d, t=%f\n",this,fTriggered,hsTimer::GetSeconds());
#endif
fTriggered = true;
status = ::pthread_cond_broadcast(&fCond);
hsAssert(status == 0, "hsEvent Cond Broadcast");
hsThrowIfOSErr(status);
status = ::pthread_mutex_unlock(&fMutex);
hsAssert(status == 0, "hsEvent Mutex Unlock");
hsThrowIfOSErr(status);
}
#else
hsEvent::hsEvent()
{
pipe( fFds );
}
hsEvent::~hsEvent()
{
close( fFds[kRead] );
close( fFds[kWrite] );
}
bool hsEvent::Wait( hsMilliseconds timeToWait )
{
hsTempMutexLock lock( fWaitLock );
fd_set fdset;
FD_ZERO( &fdset );
FD_SET( fFds[kRead], &fdset );
int ans;
if( timeToWait==kPosInfinity32 )
{
ans = select( fFds[kRead]+1, &fdset, nil, nil, nil );
}
else
{
struct timeval tv;
tv.tv_sec = timeToWait / 1000;
tv.tv_usec = ( timeToWait % 1000 ) * 1000;
ans = select( fFds[kRead]+1, &fdset, nil, nil, &tv );
}
bool signaled = false;
if ( ans>0 )
{
char buf[2];
int n = read( fFds[kRead], buf, 1 );
signaled = ( n==1 );
}
return signaled;
}
void hsEvent::Signal()
{
hsTempMutexLock lock( fSignalLock );
write( fFds[kWrite], "*", 1 );
}
#endif
void hsSleep::Sleep(uint32_t millis)
{
uint32_t secs = millis / 1000;

88
Sources/Plasma/CoreLib/hsThread_Win.cpp

@ -135,62 +135,19 @@ void hsThread::ThreadYield()
//////////////////////////////////////////////////////////////////////////////
hsMutex::hsMutex()
{
fMutexH = ::CreateMutex(nil, false, nil);
if (fMutexH == nil)
throw hsOSException(-1);
}
hsMutex::~hsMutex()
{
::CloseHandle(fMutexH);
}
void hsMutex::Lock()
{
DWORD state = ::WaitForSingleObject(fMutexH, INFINITE);
hsAssert(state != WAIT_FAILED,"hsMutex::Lock -> Wait Failed");
hsAssert(state != WAIT_ABANDONED,"hsMutex::Lock -> Abandoned Mutex");
hsAssert(state != WAIT_TIMEOUT,"hsMutex::Lock -> Infinite Timeout expired?");
}
bool hsMutex::TryLock()
{
DWORD state = ::WaitForSingleObject(fMutexH, 0);
hsAssert(state != WAIT_ABANDONED,"hsMutex::TryLock -> Abandoned Mutex");
return state == WAIT_OBJECT_0?true:false;
}
void hsMutex::Unlock()
{
BOOL result = ::ReleaseMutex(fMutexH);
hsAssert(result != 0, "hsMutex::Unlock Failed!");
}
//////////////////////////////////////////////////////////////////////////////
hsSemaphore::hsSemaphore(int initialValue, const char *name)
hsGlobalSemaphore::hsGlobalSemaphore(int initialValue, const char *name)
{
fSemaH = ::CreateSemaphore(nil, initialValue, kPosInfinity32, name);
if (fSemaH == nil)
throw hsOSException(-1);
}
hsSemaphore::~hsSemaphore()
hsGlobalSemaphore::~hsGlobalSemaphore()
{
::CloseHandle(fSemaH);
}
bool hsSemaphore::TryWait()
{
DWORD result = ::WaitForSingleObject(fSemaH, 0);
hsAssert(result != WAIT_ABANDONED, "hsSemaphore -> Abandoned Semaphore");
return result == WAIT_OBJECT_0;
}
bool hsSemaphore::Wait(hsMilliseconds timeToWait)
bool hsGlobalSemaphore::Wait(hsMilliseconds timeToWait)
{
if (timeToWait == kPosInfinity32)
timeToWait = INFINITE;
@ -205,50 +162,13 @@ bool hsSemaphore::Wait(hsMilliseconds timeToWait)
}
}
void hsSemaphore::Signal()
void hsGlobalSemaphore::Signal()
{
::ReleaseSemaphore(fSemaH, 1, nil);
}
///////////////////////////////////////////////////////////////
hsEvent::hsEvent()
{
fEvent = ::CreateEvent(nil,true,false,nil);
if (fEvent == nil)
throw hsOSException(-1);
}
hsEvent::~hsEvent()
{
::CloseHandle(fEvent);
}
bool hsEvent::Wait(hsMilliseconds timeToWait)
{
if (timeToWait == kPosInfinity32)
timeToWait = INFINITE;
DWORD result =::WaitForSingleObject(fEvent, timeToWait);
if (result == WAIT_OBJECT_0)
{
::ResetEvent(fEvent);
return true;
}
else
{ hsThrowIfFalse(result == WAIT_TIMEOUT);
return false;
}
}
void hsEvent::Signal()
{
::SetEvent(fEvent);
}
///////////////////////////////////////////////////////////////
void hsSleep::Sleep(uint32_t millis)
{
::Sleep(millis);

4
Sources/Plasma/FeatureLib/pfCrashHandler/plCrashBase.cpp

@ -53,8 +53,8 @@ void plCrashBase::IInit(const char* file)
{
char sema[128];
snprintf(sema, arrsize(sema), "%s-%s", file, CRASH_NOTIFY_SUFFIX);
fCrashed = new hsSemaphore(0, sema);
fCrashed = new hsGlobalSemaphore(0, sema);
snprintf(sema, arrsize(sema), "%s-%s", file, CRASH_HANDLE_SUFFIX);
fHandled = new hsSemaphore(0, sema);
fHandled = new hsGlobalSemaphore(0, sema);
}

4
Sources/Plasma/FeatureLib/pfCrashHandler/plCrashBase.h

@ -48,8 +48,8 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
class plCrashBase
{
protected:
hsSemaphore* fCrashed;
hsSemaphore* fHandled;
hsGlobalSemaphore* fCrashed;
hsGlobalSemaphore* fHandled;
~plCrashBase();
void IInit(const char* file);

31
Sources/Plasma/FeatureLib/pfPatcher/pfPatcher.cpp

@ -42,6 +42,7 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
#include <algorithm>
#include <deque>
#include <mutex>
#include "pfPatcher.h"
@ -101,8 +102,8 @@ struct pfPatcherWorker : public hsThread
std::deque<Request> fRequests;
std::deque<NetCliFileManifestEntry> fQueuedFiles;
hsMutex fRequestMut;
hsMutex fFileMut;
std::mutex fRequestMut;
std::mutex fFileMut;
hsSemaphore fFileSignal;
pfPatcher::CompletionFunc fOnComplete;
@ -243,7 +244,7 @@ static void IGotAuthFileList(ENetError result, void* param, const NetCliAuthFile
// so everything goes directly into the Requests deque because AuthSrv lists
// don't have any hashes attached. WHY did eap think this was a good idea?!?!
{
hsTempMutexLock lock(patcher->fRequestMut);
std::lock_guard<std::mutex> lock(patcher->fRequestMut);
for (unsigned i = 0; i < infoCount; ++i) {
PatcherLogYellow("\tEnqueuing Legacy File '%S'", infoArr[i].filename);
@ -268,7 +269,7 @@ static void IHandleManifestDownload(pfPatcherWorker* patcher, const wchar_t grou
{
PatcherLogGreen("\tDownloaded Manifest '%S'", group);
{
hsTempMutexLock lock(patcher->fFileMut);
std::lock_guard<std::mutex> lock(patcher->fFileMut);
for (unsigned i = 0; i < entryCount; ++i)
patcher->fQueuedFiles.push_back(manifest[i]);
patcher->fFileSignal.Signal();
@ -287,7 +288,7 @@ static void IPreloaderManifestDownloadCB(ENetError result, void* param, const wc
// so, we need to ask the AuthSrv about our game code
{
hsTempMutexLock lock(patcher->fRequestMut);
std::lock_guard<std::mutex> lock(patcher->fRequestMut);
patcher->fRequests.push_back(pfPatcherWorker::Request(plString::Null, pfPatcherWorker::Request::kPythonList));
patcher->fRequests.push_back(pfPatcherWorker::Request(plString::Null, pfPatcherWorker::Request::kSdlList));
}
@ -341,7 +342,7 @@ pfPatcherWorker::pfPatcherWorker() :
pfPatcherWorker::~pfPatcherWorker()
{
{
hsTempMutexLock lock(fRequestMut);
std::lock_guard<std::mutex> lock(fRequestMut);
std::for_each(fRequests.begin(), fRequests.end(),
[] (const Request& req) {
if (req.fStream) req.fStream->Close();
@ -352,7 +353,7 @@ pfPatcherWorker::~pfPatcherWorker()
}
{
hsTempMutexLock lock(fFileMut);
std::lock_guard<std::mutex> lock(fFileMut);
fQueuedFiles.clear();
}
}
@ -386,7 +387,7 @@ void pfPatcherWorker::EndPatch(ENetError result, const plString& msg)
bool pfPatcherWorker::IssueRequest()
{
hsTempMutexLock lock(fRequestMut);
std::lock_guard<std::mutex> lock(fRequestMut);
if (fRequests.empty()) {
fRequestActive = false;
fFileSignal.Signal(); // make sure the patch thread doesn't deadlock!
@ -451,7 +452,7 @@ hsError pfPatcherWorker::Run()
do {
fFileSignal.Wait();
hsTempMutexLock fileLock(fFileMut);
std::lock_guard<std::mutex> fileLock(fFileMut);
if (!fQueuedFiles.empty()) {
ProcessFile();
continue;
@ -515,8 +516,10 @@ void pfPatcherWorker::ProcessFile()
pfPatcherStream* s = new pfPatcherStream(this, dlName, entry);
s->Open(clName, "wb");
hsTempMutexLock lock(fRequestMut);
fRequests.push_back(Request(dlName, Request::kFile, s));
{
std::lock_guard<std::mutex> lock(fRequestMut);
fRequests.push_back(Request(dlName, Request::kFile, s));
}
fQueuedFiles.pop_front();
if (!fRequestActive)
@ -615,19 +618,19 @@ void pfPatcher::OnSelfPatch(FileDownloadFunc cb)
void pfPatcher::RequestGameCode()
{
hsTempMutexLock lock(fWorker->fRequestMut);
std::lock_guard<std::mutex> lock(fWorker->fRequestMut);
fWorker->fRequests.push_back(pfPatcherWorker::Request("SecurePreloader", pfPatcherWorker::Request::kSecurePreloader));
}
void pfPatcher::RequestManifest(const plString& mfs)
{
hsTempMutexLock lock(fWorker->fRequestMut);
std::lock_guard<std::mutex> lock(fWorker->fRequestMut);
fWorker->fRequests.push_back(pfPatcherWorker::Request(mfs, pfPatcherWorker::Request::kManifest));
}
void pfPatcher::RequestManifest(const std::vector<plString>& mfs)
{
hsTempMutexLock lock(fWorker->fRequestMut);
std::lock_guard<std::mutex> lock(fWorker->fRequestMut);
std::for_each(mfs.begin(), mfs.end(),
[&] (const plString& name) {
fWorker->fRequests.push_back(pfPatcherWorker::Request(name, pfPatcherWorker::Request::kManifest));

24
Sources/Plasma/NucleusLib/pnAsyncCoreExe/pnAceIo.cpp

@ -340,11 +340,8 @@ void AsyncSocketRegisterNotifyProc (
ct->productId = productId;
ct->flags = kConnHashFlagsIgnore;
s_notifyProcLock.LockForWriting();
{
s_notifyProcs.Add(ct);
}
s_notifyProcLock.UnlockForWriting();
hsLockForWriting lock(s_notifyProcLock);
s_notifyProcs.Add(ct);
}
//===========================================================================
@ -365,8 +362,9 @@ void AsyncSocketUnregisterNotifyProc (
hash.flags = kConnHashFlagsExactMatch;
ISocketConnType * scan;
s_notifyProcLock.LockForWriting();
{
hsLockForWriting lock(s_notifyProcLock);
scan = s_notifyProcs.Find(hash);
for (; scan; scan = s_notifyProcs.FindNext(hash, scan)) {
if (scan->notifyProc != notifyProc)
@ -377,7 +375,6 @@ void AsyncSocketUnregisterNotifyProc (
break;
}
}
s_notifyProcLock.UnlockForWriting();
// perform memory deallocation outside the lock
delete scan;
@ -403,12 +400,13 @@ FAsyncNotifySocketProc AsyncSocketFindNotifyProc (
// Lookup notifyProc based on connType
FAsyncNotifySocketProc proc;
s_notifyProcLock.LockForReading();
if (const ISocketConnType * scan = s_notifyProcs.Find(hash))
proc = scan->notifyProc;
else
proc = nil;
s_notifyProcLock.UnlockForReading();
{
hsLockForReading lock(s_notifyProcLock);
if (const ISocketConnType * scan = s_notifyProcs.Find(hash))
proc = scan->notifyProc;
else
proc = nullptr;
}
if (!proc)
break;

96
Sources/Plasma/NucleusLib/pnDispatch/plDispatch.cpp

@ -84,7 +84,7 @@ public:
uint32_t GetNumReceivers() const { return fReceivers.GetCount(); }
};
int32_t plDispatch::fNumBufferReq = 0;
int32_t plDispatch::fNumBufferReq = 0;
bool plDispatch::fMsgActive = false;
plMsgWrap* plDispatch::fMsgCurrent = nil;
plMsgWrap* plDispatch::fMsgHead = nil;
@ -92,8 +92,8 @@ plMsgWrap* plDispatch::fMsgTail = nil;
hsTArray<plMessage*> plDispatch::fMsgWatch;
MsgRecieveCallback plDispatch::fMsgRecieveCallback = nil;
hsMutex plDispatch::fMsgCurrentMutex; // mutex for fMsgCurrent
hsMutex plDispatch::fMsgDispatchLock; // mutex for IMsgDispatch
std::mutex plDispatch::fMsgCurrentMutex; // mutex for fMsgCurrent
std::mutex plDispatch::fMsgDispatchLock; // mutex for IMsgDispatch
plDispatch::plDispatch()
@ -227,18 +227,19 @@ bool plDispatch::IListeningForExactType(uint16_t hClass)
void plDispatch::IMsgEnqueue(plMsgWrap* msgWrap, bool async)
{
fMsgCurrentMutex.Lock();
{
std::lock_guard<std::mutex> lock(fMsgCurrentMutex);
#ifdef HS_DEBUGGING
if( msgWrap->fMsg->HasBCastFlag(plMessage::kMsgWatch) )
fMsgWatch.Append(msgWrap->fMsg);
if (msgWrap->fMsg->HasBCastFlag(plMessage::kMsgWatch))
fMsgWatch.Append(msgWrap->fMsg);
#endif // HS_DEBUGGING
if( fMsgTail )
fMsgTail = IInsertToQueue(&fMsgTail->fNext, msgWrap);
else
fMsgTail = IInsertToQueue(&fMsgHead, msgWrap);
fMsgCurrentMutex.Unlock();
if (fMsgTail)
fMsgTail = IInsertToQueue(&fMsgTail->fNext, msgWrap);
else
fMsgTail = IInsertToQueue(&fMsgHead, msgWrap);
}
if( !async )
// Test for fMsgActive in IMsgDispatch(), properly wrapped inside a mutex -mcn
@ -248,24 +249,20 @@ void plDispatch::IMsgEnqueue(plMsgWrap* msgWrap, bool async)
// On starts deferring msg delivery until buffering is set to off again.
bool plDispatch::SetMsgBuffering(bool on)
{
fMsgCurrentMutex.Lock();
if( on )
std::unique_lock<std::mutex> lock(fMsgCurrentMutex);
if (on)
{
hsAssert(fNumBufferReq || !fMsgActive, "Can't start deferring message delivery while delivering messages. See mf");
if( !fNumBufferReq && fMsgActive )
{
fMsgCurrentMutex.Unlock();
if (!fNumBufferReq && fMsgActive)
return false;
}
fNumBufferReq++;
fMsgActive = true;
fMsgCurrentMutex.Unlock();
}
else if( !--fNumBufferReq )
else if (!--fNumBufferReq)
{
fMsgActive = false;
fMsgCurrentMutex.Unlock();
lock.unlock();
IMsgDispatch();
}
hsAssert(fNumBufferReq >= 0, "Mismatched number of on/off dispatch buffering requests");
@ -275,25 +272,23 @@ bool plDispatch::SetMsgBuffering(bool on)
void plDispatch::IMsgDispatch()
{
if( !fMsgDispatchLock.TryLock() )
std::unique_lock<std::mutex> dispatchLock(fMsgDispatchLock, std::try_to_lock);
if (!dispatchLock.owns_lock())
return;
if( fMsgActive )
{
fMsgDispatchLock.Unlock();
if (fMsgActive)
return;
}
fMsgActive = true;
int responseLevel=0;
fMsgCurrentMutex.Lock();
std::unique_lock<std::mutex> msgCurrentLock(fMsgCurrentMutex);
plMsgWrap* origTail = fMsgTail;
while((fMsgCurrent = fMsgHead))
{
IDequeue(&fMsgHead, &fMsgTail);
fMsgCurrentMutex.Unlock();
msgCurrentLock.unlock();
plMessage* msg = fMsgCurrent->fMsg;
bool nonLocalMsg = msg && msg->HasBCastFlag(plMessage::kNetNonLocal);
@ -402,16 +397,14 @@ void plDispatch::IMsgDispatch()
// }
// }
fMsgCurrentMutex.Lock();
msgCurrentLock.lock();
delete fMsgCurrent;
// TEMP
fMsgCurrent = (class plMsgWrap *)0xdeadc0de;
}
fMsgCurrentMutex.Unlock();
fMsgActive = false;
fMsgDispatchLock.Unlock();
}
//
@ -419,12 +412,12 @@ void plDispatch::IMsgDispatch()
//
bool plDispatch::IMsgNetPropagate(plMessage* msg)
{
fMsgCurrentMutex.Lock();
// Make sure cascaded messages all have the same net flags
plNetClientApp::InheritNetMsgFlags(fMsgCurrent ? fMsgCurrent->fMsg : nil, msg, false);
{
std::lock_guard<std::mutex> lock(fMsgCurrentMutex);
fMsgCurrentMutex.Unlock();
// Make sure cascaded messages all have the same net flags
plNetClientApp::InheritNetMsgFlags(fMsgCurrent ? fMsgCurrent->fMsg : nil, msg, false);
}
// Decide if msg should go out over the network.
// If kNetForce is used, this message should always go out over the network, even if it's already
@ -511,10 +504,9 @@ void plDispatch::MsgQueue(plMessage* msg)
{
if (fQueuedMsgOn)
{
fQueuedMsgListMutex.Lock();
std::lock_guard<std::mutex> lock(fQueuedMsgListMutex);
hsAssert(msg,"Message missing");
fQueuedMsgList.push_back(msg);
fQueuedMsgListMutex.Unlock();
}
else
MsgSend(msg, false);
@ -522,23 +514,23 @@ void plDispatch::MsgQueue(plMessage* msg)
void plDispatch::MsgQueueProcess()
{
// Process all messages on Queue, unlock while sending them
// this would allow other threads to put new messages on the list while we send()
while (1)
{
plMessage * pMsg = nil;
fQueuedMsgListMutex.Lock();
int size = fQueuedMsgList.size();
if (size)
{ pMsg = fQueuedMsgList.front();
fQueuedMsgList.pop_front();
// Process all messages on Queue, unlock while sending them
// this would allow other threads to put new messages on the list while we send()
bool empty = false;
while (!empty)
{
plMessage * pMsg = nullptr;
{
std::lock_guard<std::mutex> lock(fQueuedMsgListMutex);
empty = fQueuedMsgList.empty();
if (!empty)
{
pMsg = fQueuedMsgList.front();
fQueuedMsgList.pop_front();
}
}
fQueuedMsgListMutex.Unlock();
if (pMsg)
{ MsgSend(pMsg, false);
}
if (!size)
break;
MsgSend(pMsg, false);
}
}

10
Sources/Plasma/NucleusLib/pnDispatch/plDispatch.h

@ -42,7 +42,9 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
#ifndef plDispatch_inc
#define plDispatch_inc
#include <list>
#include <mutex>
#include "hsTemplates.h"
#include "plgDispatch.h"
#include "hsThread.h"
@ -74,10 +76,10 @@ protected:
hsKeyedObject* fOwner;
plMsgWrap* fFutureMsgQueue;
static int32_t fNumBufferReq;
static int32_t fNumBufferReq;
static plMsgWrap* fMsgCurrent;
static hsMutex fMsgCurrentMutex; // mutex for above
static hsMutex fMsgDispatchLock; // mutex for IMsgDispatch
static std::mutex fMsgCurrentMutex; // mutex for above
static std::mutex fMsgDispatchLock; // mutex for IMsgDispatch
static plMsgWrap* fMsgHead;
static plMsgWrap* fMsgTail;
static bool fMsgActive;
@ -86,7 +88,7 @@ protected:
hsTArray<plTypeFilter*> fRegisteredExactTypes;
std::list<plMessage*> fQueuedMsgList;
hsMutex fQueuedMsgListMutex; // mutex for above
std::mutex fQueuedMsgListMutex; // mutex for above
bool fQueuedMsgOn; // Turns on or off Queued Messages, Plugins need them off
hsKeyedObject* IGetOwner() { return fOwner; }

80
Sources/Plasma/NucleusLib/pnNetCli/pnNcChannel.cpp

@ -46,8 +46,8 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
***/
#include "Pch.h"
#include "hsThread.h"
#include <list>
#include <mutex>
#include "hsRefCnt.h"
#pragma hdrstop
@ -61,16 +61,24 @@ namespace pnNetCli {
***/
struct ChannelCrit {
~ChannelCrit ();
ChannelCrit ();
inline void Enter () { m_critsect.Lock(); }
inline void Leave () { m_critsect.Unlock(); }
inline void EnterSafe () { if (m_init) m_critsect.Lock(); }
inline void LeaveSafe () { if (m_init) m_critsect.Unlock(); }
~ChannelCrit();
ChannelCrit() : m_init(true) { }
inline void lock()
{
hsAssert(m_init, "Bad things have happened.");
m_critsect.lock();
}
inline void unlock()
{
hsAssert(m_init, "Bad things have happened.");
m_critsect.unlock();
}
private:
bool m_init;
hsMutex m_critsect;
std::mutex m_critsect;
};
struct NetMsgChannel : hsRefCnt {
@ -100,14 +108,10 @@ static std::list<NetMsgChannel*>* s_channels;
*
***/
//===========================================================================
ChannelCrit::ChannelCrit () {
m_init = true;
}
//===========================================================================
ChannelCrit::~ChannelCrit () {
EnterSafe();
std::lock_guard<ChannelCrit> lock(*this);
if (s_channels) {
while (s_channels->size()) {
NetMsgChannel* const channel = s_channels->front();
@ -118,7 +122,6 @@ ChannelCrit::~ChannelCrit () {
delete s_channels;
s_channels = nil;
}
LeaveSafe();
}
@ -298,15 +301,14 @@ NetMsgChannel * NetMsgChannelLock (
uint32_t * largestRecv
) {
NetMsgChannel * channel;
s_channelCrit.Enter();
if (nil != (channel = FindChannel_CS(protocol, server))) {
std::lock_guard<ChannelCrit> lock(s_channelCrit);
if (nullptr != (channel = FindChannel_CS(protocol, server))) {
*largestRecv = channel->m_largestRecv;
channel->Ref("ChannelLock");
}
else {
*largestRecv = 0;
}
s_channelCrit.Leave();
return channel;
}
@ -314,11 +316,9 @@ NetMsgChannel * NetMsgChannelLock (
void NetMsgChannelUnlock (
NetMsgChannel * channel
) {
s_channelCrit.Enter();
{
channel->UnRef("ChannelLock");
}
s_channelCrit.Leave();
std::lock_guard<ChannelCrit> lock(s_channelCrit);
channel->UnRef("ChannelLock");
}
//============================================================================
@ -388,33 +388,31 @@ void NetMsgProtocolRegister (
const plBigNum& dh_xa, // client: dh_x server: dh_a
const plBigNum& dh_n
) {
s_channelCrit.EnterSafe();
{
NetMsgChannel * channel = FindOrCreateChannel_CS(protocol, server);
std::lock_guard<ChannelCrit> lock(s_channelCrit);
// make sure no connections have been established on this protocol, otherwise
// we'll be modifying a live data structure; NetCli's don't lock their protocol
// to operate on it once they have linked to it!
ASSERT(channel->RefCnt() == 1);
NetMsgChannel * channel = FindOrCreateChannel_CS(protocol, server);
channel->m_dh_g = dh_g;
channel->m_dh_xa = dh_xa;
channel->m_dh_n = dh_n;
// make sure no connections have been established on this protocol, otherwise
// we'll be modifying a live data structure; NetCli's don't lock their protocol
// to operate on it once they have linked to it!
ASSERT(channel->RefCnt() == 1);
if (sendMsgCount)
AddSendMsgs_CS(channel, sendMsgs, sendMsgCount);
if (recvMsgCount)
AddRecvMsgs_CS(channel, recvMsgs, recvMsgCount);
}
s_channelCrit.LeaveSafe();
channel->m_dh_g = dh_g;
channel->m_dh_xa = dh_xa;
channel->m_dh_n = dh_n;
if (sendMsgCount)
AddSendMsgs_CS(channel, sendMsgs, sendMsgCount);
if (recvMsgCount)
AddRecvMsgs_CS(channel, recvMsgs, recvMsgCount);
}
//===========================================================================
void NetMsgProtocolDestroy (uint32_t protocol, bool server) {
s_channelCrit.EnterSafe();
std::lock_guard<ChannelCrit> lock(s_channelCrit);
if (NetMsgChannel* channel = FindChannel_CS(protocol, server)) {
s_channels->remove(channel);
channel->UnRef("ChannelLink");
}
s_channelCrit.LeaveSafe();
}

22
Sources/Plasma/PubUtilLib/plAudioCore/plSoundBuffer.cpp

@ -90,12 +90,13 @@ hsError plSoundPreloader::Run()
while (fRunning)
{
fCritSect.Lock();
while (fBuffers.GetCount())
{
templist.Append(fBuffers.Pop());
std::lock_guard<std::mutex> lock(fCritSect);
while (fBuffers.GetCount())
{
templist.Append(fBuffers.Pop());
}
}
fCritSect.Unlock();
if (templist.GetCount() == 0)
{
@ -130,14 +131,15 @@ hsError plSoundPreloader::Run()
}
// we need to be sure that all buffers are removed from our load list when shutting this thread down or we will hang,
// since the sound buffer will wait to be destroyed until it is marked as loaded
fCritSect.Lock();
while (fBuffers.GetCount())
// since the sound buffer will wait to be destroyed until it is marked as loaded
{
plSoundBuffer* buf = fBuffers.Pop();
buf->SetLoaded(true);
std::lock_guard<std::mutex> lock(fCritSect);
while (fBuffers.GetCount())
{
plSoundBuffer* buf = fBuffers.Pop();
buf->SetLoaded(true);
}
}
fCritSect.Unlock();
return hsOK;
}

10
Sources/Plasma/PubUtilLib/plAudioCore/plSoundBuffer.h

@ -57,6 +57,7 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
#include "plAudioFileReader.h"
#include "hsThread.h"
#include "plFileSystem.h"
#include <mutex>
//// Class Definition ////////////////////////////////////////////////////////
@ -165,7 +166,7 @@ protected:
hsTArray<plSoundBuffer*> fBuffers;
hsEvent fEvent;
bool fRunning;
hsMutex fCritSect;
std::mutex fCritSect;
public:
virtual hsError Run();
@ -184,9 +185,10 @@ public:
bool IsRunning() const { return fRunning; }
void AddBuffer(plSoundBuffer* buffer) {
fCritSect.Lock();
fBuffers.Push(buffer);
fCritSect.Unlock();
{
std::lock_guard<std::mutex> lock(fCritSect);
fBuffers.Push(buffer);
}
fEvent.Signal();
}

8
Sources/Plasma/PubUtilLib/plFile/plStreamSource.cpp

@ -56,7 +56,7 @@ plStreamSource::plStreamSource()
void plStreamSource::ICleanup()
{
hsTempMutexLock lock(fMutex);
std::lock_guard<std::mutex> lock(fMutex);
// loop through all the file data records, and delete the streams
decltype(fFileData.begin()) curData;
@ -72,7 +72,7 @@ void plStreamSource::ICleanup()
hsStream* plStreamSource::GetFile(const plFileName& filename)
{
hsTempMutexLock lock(fMutex);
std::lock_guard<std::mutex> lock(fMutex);
plFileName sFilename = filename.Normalize('/');
if (fFileData.find(sFilename) == fFileData.end())
@ -112,7 +112,7 @@ std::vector<plFileName> plStreamSource::GetListOfNames(const plFileName& dir, co
{
plFileName sDir = dir.Normalize('/');
hsAssert(ext.CharAt(0) != '.', "Don't add a dot");
hsTempMutexLock lock(fMutex);
std::lock_guard<std::mutex> lock(fMutex);
// loop through all the file data records, and create the list
std::vector<plFileName> retVal;
@ -142,7 +142,7 @@ bool plStreamSource::InsertFile(const plFileName& filename, hsStream* stream)
{
plFileName sFilename = filename.Normalize('/');
hsTempMutexLock lock(fMutex);
std::lock_guard<std::mutex> lock(fMutex);
if (fFileData.find(sFilename) != fFileData.end())
return false; // duplicate entry, return failure

4
Sources/Plasma/PubUtilLib/plFile/plStreamSource.h

@ -43,8 +43,8 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
#define plStreamSource_h_inc
#include <map>
#include <mutex>
#include "hsStream.h"
#include "hsThread.h"
// A class for holding and accessing file streams. The preloader will insert
// files in here once they are loaded. In internal builds, if a requested file
@ -60,7 +60,7 @@ private:
hsStream* fStream; // we own this pointer, so clean it up
};
std::map<plFileName, fileData, plFileName::less_i> fFileData; // key is filename
hsMutex fMutex;
std::mutex fMutex;
uint32_t fServerKey[4];
void ICleanup(); // closes all file pointers and cleans up after itself

2
Sources/Plasma/PubUtilLib/plMath/plAvg.cpp

@ -55,7 +55,7 @@ const float TimeBasedAvgRing<T>::kPercision = 0.001;
template <class T>
void TimeBasedAvgRing<T>::AddItem(T value, double time)
{
hsTempMutexLock lock( fLock );
std::lock_guard<std::mutex> lock(fLock);
if ( fList.empty() )
{

6
Sources/Plasma/PubUtilLib/plMath/plAvg.h

@ -44,9 +44,7 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
#include "HeadSpin.h"
#include <list>
#include "hsThread.h"
#include <mutex>
// A Time based Value Averaging class
// implemented in a ring buffer
@ -91,7 +89,7 @@ private:
float fMaxAvg;
double fTotal;
TimeListIterator fRingStart, fRingEnd;
hsMutex fLock;
std::mutex fLock;
public:
TimeBasedAvgRing():fLen(0.f),fAvg(0.f),fMaxAvg(0.f),fTotal(0.0) {}

28
Sources/Plasma/PubUtilLib/plNetGameLib/Private/plNglFile.cpp

@ -283,12 +283,11 @@ static void UnlinkAndAbandonConn_CS (CliFileConn * conn) {
needsDecref = false;
}
else {
conn->sockLock.LockForReading();
hsLockForReading lock(conn->sockLock);
if (conn->sock) {
AsyncSocketDisconnect(conn->sock, true);
needsDecref = false;
}
conn->sockLock.UnlockForReading();
}
if (needsDecref) {
conn->UnRef("Lifetime");
@ -311,9 +310,8 @@ static void NotifyConnSocketConnect (CliFileConn * conn) {
}
else
{
conn->sockLock.LockForReading();
hsLockForReading lock(conn->sockLock);
AsyncSocketDisconnect(conn->sock, true);
conn->sockLock.UnlockForReading();
}
}
s_critsect.Leave();
@ -468,9 +466,8 @@ static bool SocketNotifyCallback (
*userState = conn;
s_critsect.Enter();
{
conn->sockLock.LockForWriting();
hsLockForWriting lock(conn->sockLock);
conn->sock = sock;
conn->sockLock.UnlockForWriting();
conn->cancelId = 0;
}
s_critsect.Leave();
@ -697,9 +694,11 @@ void CliFileConn::AutoPing () {
Ref("PingTimer");
timerCritsect.Enter();
{
sockLock.LockForReading();
unsigned timerPeriod = sock ? 0 : kAsyncTimeInfinite;
sockLock.UnlockForReading();
unsigned timerPeriod;
{
hsLockForReading lock(sockLock);
timerPeriod = sock ? 0 : kAsyncTimeInfinite;
}
AsyncTimerCreate(
&pingTimer,
@ -725,7 +724,8 @@ void CliFileConn::StopAutoPing () {
//============================================================================
void CliFileConn::TimerPing () {
sockLock.LockForReading();
hsLockForReading lock(sockLock);
for (;;) {
if (!sock) // make sure it exists
break;
@ -752,18 +752,16 @@ void CliFileConn::TimerPing () {
}
break;
}
sockLock.UnlockForReading();
}
//============================================================================
void CliFileConn::Destroy () {
AsyncSocket oldSock = nil;
sockLock.LockForWriting();
{
hsLockForWriting lock(sockLock);
SWAP(oldSock, sock);
}
sockLock.UnlockForWriting();
if (oldSock)
AsyncSocketDelete(oldSock);
@ -772,11 +770,11 @@ void CliFileConn::Destroy () {
//============================================================================
void CliFileConn::Send (const void * data, unsigned bytes) {
sockLock.LockForReading();
hsLockForReading lock(sockLock);
if (sock) {
AsyncSocketSend(sock, data, bytes);
}
sockLock.UnlockForReading();
}
//============================================================================

4
Sources/Plasma/PubUtilLib/plStatusLog/plStatusLog.cpp

@ -277,14 +277,14 @@ plStatusLog::plStatusLog( uint8_t numDisplayLines, const plFileName &filename, u
if (filename.IsValid())
{
fFilename = filename;
fSema = new hsSemaphore(1, fFilename.AsString().c_str());
fSema = new hsGlobalSemaphore(1, fFilename.AsString().c_str());
}
else
{
fFilename = "";
flags |= kDontWriteFile;
fSema = new hsSemaphore(1);
fSema = new hsGlobalSemaphore(1);
}
fOrigFlags = fFlags = flags;

5
Sources/Plasma/PubUtilLib/plStatusLog/plStatusLog.h

@ -69,7 +69,6 @@ class plPipeline;
// really be visible at any given time.
class plStatusLogMgr;
class hsMutex;
class plStatusLogDrawerStub;
class plStatusLog
{
@ -87,7 +86,7 @@ class plStatusLog
plFileName fFilename;
char** fLines;
uint32_t* fColors;
hsSemaphore* fSema;
hsGlobalSemaphore* fSema;
FILE* fFileHandle;
uint32_t fSize;
bool fForceLog;
@ -206,8 +205,6 @@ class plStatusLogMgr
static plFileName IGetBasePath();
hsMutex fMutex; // To make multithreaded-safe
public:
enum

Loading…
Cancel
Save