mirror of
https://foundry.openuru.org/gitblit/r/CWE-ou-minkata.git
synced 2025-07-17 18:59:09 +00:00
Replace hsMutex with std::mutex
This commit is contained in:
@ -57,14 +57,14 @@ hsReaderWriterLock::hsReaderWriterLock(Callback * cb)
|
||||
void hsReaderWriterLock::LockForReading()
|
||||
{
|
||||
if ( fCallback )
|
||||
fCallback->OnLockingForRead( this );
|
||||
fReaderCountLock.Lock();
|
||||
fReaderLock.Lock();
|
||||
fReaderCount++;
|
||||
if ( fReaderCount==1 )
|
||||
fWriterSema.Wait();
|
||||
fReaderLock.Unlock();
|
||||
fReaderCountLock.Unlock();
|
||||
fCallback->OnLockingForRead(this);
|
||||
{
|
||||
std::lock_guard<std::mutex> lock_count(fReaderCountLock);
|
||||
std::lock_guard<std::mutex> lock(fReaderLock);
|
||||
fReaderCount++;
|
||||
if (fReaderCount == 1)
|
||||
fWriterSema.Wait();
|
||||
}
|
||||
if ( fCallback )
|
||||
fCallback->OnLockedForRead( this );
|
||||
}
|
||||
@ -72,12 +72,13 @@ void hsReaderWriterLock::LockForReading()
|
||||
void hsReaderWriterLock::UnlockForReading()
|
||||
{
|
||||
if ( fCallback )
|
||||
fCallback->OnUnlockingForRead( this );
|
||||
fReaderLock.Lock();
|
||||
fReaderCount--;
|
||||
if ( fReaderCount==0 )
|
||||
fWriterSema.Signal();
|
||||
fReaderLock.Unlock();
|
||||
fCallback->OnUnlockingForRead(this);
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(fReaderLock);
|
||||
fReaderCount--;
|
||||
if (fReaderCount == 0)
|
||||
fWriterSema.Signal();
|
||||
}
|
||||
if ( fCallback )
|
||||
fCallback->OnUnlockedForRead( this );
|
||||
}
|
||||
@ -86,7 +87,7 @@ void hsReaderWriterLock::LockForWriting()
|
||||
{
|
||||
if ( fCallback )
|
||||
fCallback->OnLockingForWrite( this );
|
||||
fReaderCountLock.Lock();
|
||||
fReaderCountLock.lock();
|
||||
fWriterSema.Wait();
|
||||
hsAssert( fReaderCount==0, "Locked for writing, but fReaderCount>0" );
|
||||
if ( fCallback )
|
||||
@ -98,7 +99,7 @@ void hsReaderWriterLock::UnlockForWriting()
|
||||
if ( fCallback )
|
||||
fCallback->OnUnlockingForWrite( this );
|
||||
fWriterSema.Signal();
|
||||
fReaderCountLock.Unlock();
|
||||
fReaderCountLock.unlock();
|
||||
if ( fCallback )
|
||||
fCallback->OnUnlockedForWrite( this );
|
||||
}
|
||||
|
@ -43,6 +43,7 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
|
||||
#define hsThread_Defined
|
||||
|
||||
#include "HeadSpin.h"
|
||||
#include <mutex>
|
||||
|
||||
typedef uint32_t hsMilliseconds;
|
||||
|
||||
@ -106,44 +107,6 @@ public:
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
class hsMutex {
|
||||
#if HS_BUILD_FOR_WIN32
|
||||
HANDLE fMutexH;
|
||||
#elif HS_BUILD_FOR_UNIX
|
||||
pthread_mutex_t fPMutex;
|
||||
#endif
|
||||
public:
|
||||
hsMutex();
|
||||
virtual ~hsMutex();
|
||||
|
||||
#ifdef HS_BUILD_FOR_WIN32
|
||||
HANDLE GetHandle() const { return fMutexH; }
|
||||
#endif
|
||||
|
||||
void Lock();
|
||||
bool TryLock();
|
||||
void Unlock();
|
||||
};
|
||||
|
||||
class hsTempMutexLock {
|
||||
hsMutex* fMutex;
|
||||
public:
|
||||
hsTempMutexLock(hsMutex* mutex) : fMutex(mutex)
|
||||
{
|
||||
fMutex->Lock();
|
||||
}
|
||||
hsTempMutexLock(hsMutex& mutex) : fMutex(&mutex)
|
||||
{
|
||||
fMutex->Lock();
|
||||
}
|
||||
~hsTempMutexLock()
|
||||
{
|
||||
fMutex->Unlock();
|
||||
}
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
class hsSemaphore {
|
||||
#if HS_BUILD_FOR_WIN32
|
||||
HANDLE fSemaH;
|
||||
@ -181,8 +144,8 @@ class hsEvent
|
||||
#else
|
||||
enum { kRead, kWrite };
|
||||
int fFds[2];
|
||||
hsMutex fWaitLock;
|
||||
hsMutex fSignalLock;
|
||||
std::mutex fWaitLock;
|
||||
std::mutex fSignalLock;
|
||||
#endif // PSEUDO_EVENT
|
||||
#elif HS_BUILD_FOR_WIN32
|
||||
HANDLE fEvent;
|
||||
@ -231,8 +194,8 @@ public:
|
||||
|
||||
private:
|
||||
int fReaderCount;
|
||||
hsMutex fReaderCountLock;
|
||||
hsMutex fReaderLock;
|
||||
std::mutex fReaderCountLock;
|
||||
std::mutex fReaderLock;
|
||||
hsSemaphore fWriterSema;
|
||||
Callback * fCallback;
|
||||
};
|
||||
|
@ -115,32 +115,6 @@ void hsThread::ThreadYield()
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
hsMutex::hsMutex()
|
||||
{
|
||||
OSStatus status = ::MPCreateCriticalRegion(&fCriticalRegion);
|
||||
hsThrowIfOSErr(status);
|
||||
}
|
||||
|
||||
hsMutex::~hsMutex()
|
||||
{
|
||||
OSStatus status = ::MPDeleteCriticalRegion(fCriticalRegion);
|
||||
hsThrowIfOSErr(status);
|
||||
}
|
||||
|
||||
void hsMutex::Lock()
|
||||
{
|
||||
OSStatus status = ::MPEnterCriticalRegion(fCriticalRegion, kDurationForever);
|
||||
hsThrowIfOSErr(status);
|
||||
}
|
||||
|
||||
void hsMutex::Unlock()
|
||||
{
|
||||
OSStatus status = ::MPExitCriticalRegion(fCriticalRegion);
|
||||
hsThrowIfOSErr(status);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
hsSemaphore::hsSemaphore(int initialValue)
|
||||
{
|
||||
OSStatus status = MPCreateSemaphore(kPosInfinity32, initialValue, &fSemaId);
|
||||
|
@ -190,83 +190,6 @@ static void InitEventLoggingFile()
|
||||
|
||||
#endif
|
||||
|
||||
hsMutex::hsMutex()
|
||||
{
|
||||
|
||||
#ifdef MUTEX_TIMING
|
||||
InitMutexTimerFile();
|
||||
#endif
|
||||
|
||||
// create mutex attributes
|
||||
pthread_mutexattr_t attr;
|
||||
int status = ::pthread_mutexattr_init(&attr);
|
||||
hsThrowIfOSErr(status);
|
||||
|
||||
// make the mutex attributes recursive
|
||||
status = ::pthread_mutexattr_settype(&attr,PTHREAD_MUTEX_RECURSIVE);
|
||||
hsThrowIfOSErr(status);
|
||||
|
||||
//init the mutex
|
||||
status = ::pthread_mutex_init(&fPMutex, &attr);
|
||||
hsThrowIfOSErr(status);
|
||||
|
||||
// destroy the attributes
|
||||
status = ::pthread_mutexattr_destroy(&attr);
|
||||
hsThrowIfOSErr(status);
|
||||
}
|
||||
|
||||
hsMutex::~hsMutex()
|
||||
{
|
||||
int status = ::pthread_mutex_destroy(&fPMutex);
|
||||
hsThrowIfOSErr(status);
|
||||
}
|
||||
|
||||
void hsMutex::Lock()
|
||||
{
|
||||
#ifdef MUTEX_TIMING
|
||||
# ifndef HS_DEBUGGING
|
||||
timeval tv;
|
||||
hsWide start;
|
||||
gettimeofday( &tv, nil );
|
||||
start.Mul( tv.tv_sec, 1000000 )->Add( tv.tv_usec );
|
||||
# endif
|
||||
#endif
|
||||
|
||||
int status = ::pthread_mutex_lock(&fPMutex);
|
||||
hsThrowIfOSErr(status);
|
||||
|
||||
#ifdef MUTEX_TIMING
|
||||
# ifndef HS_DEBUGGING
|
||||
hsWide diff;
|
||||
gettimeofday( &tv, nil );
|
||||
diff.Mul( tv.tv_sec, 1000000 )->Add( tv.tv_usec )->Sub( &start )->Div( 1000000 );
|
||||
double duration = diff.AsDouble();
|
||||
if ( gMutexTimerFile && duration>0.005 )
|
||||
{
|
||||
time_t t;
|
||||
time( &t );
|
||||
struct tm *now = localtime( &t );
|
||||
char tmp[30];
|
||||
strftime( tmp, 30, "%c", now );
|
||||
fprintf( gMutexTimerFile, "[%s] [%lu:%lu] %f\n", tmp, getpid(), hsThread::GetMyThreadId(), duration );
|
||||
}
|
||||
# endif
|
||||
#endif
|
||||
}
|
||||
|
||||
bool hsMutex::TryLock()
|
||||
{
|
||||
int status = ::pthread_mutex_trylock(&fPMutex);
|
||||
hsThrowIfOSErr(status);
|
||||
return status==EBUSY?false:true;
|
||||
}
|
||||
|
||||
void hsMutex::Unlock()
|
||||
{
|
||||
int status = ::pthread_mutex_unlock(&fPMutex);
|
||||
hsThrowIfOSErr(status);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
hsSemaphore::hsSemaphore(int initialValue, const char* name)
|
||||
@ -538,7 +461,7 @@ hsEvent::~hsEvent()
|
||||
|
||||
bool hsEvent::Wait( hsMilliseconds timeToWait )
|
||||
{
|
||||
hsTempMutexLock lock( fWaitLock );
|
||||
std::lock_guard<std::mutex> lock(fWaitLock);
|
||||
|
||||
fd_set fdset;
|
||||
FD_ZERO( &fdset );
|
||||
@ -572,7 +495,7 @@ bool hsEvent::Wait( hsMilliseconds timeToWait )
|
||||
|
||||
void hsEvent::Signal()
|
||||
{
|
||||
hsTempMutexLock lock( fSignalLock );
|
||||
std::lock_guard<std::mutex> lock(fSignalLock);
|
||||
write( fFds[kWrite], "*", 1 );
|
||||
}
|
||||
|
||||
|
@ -135,42 +135,6 @@ void hsThread::ThreadYield()
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
hsMutex::hsMutex()
|
||||
{
|
||||
fMutexH = ::CreateMutex(nil, false, nil);
|
||||
if (fMutexH == nil)
|
||||
throw hsOSException(-1);
|
||||
}
|
||||
|
||||
hsMutex::~hsMutex()
|
||||
{
|
||||
::CloseHandle(fMutexH);
|
||||
}
|
||||
|
||||
void hsMutex::Lock()
|
||||
{
|
||||
DWORD state = ::WaitForSingleObject(fMutexH, INFINITE);
|
||||
hsAssert(state != WAIT_FAILED,"hsMutex::Lock -> Wait Failed");
|
||||
hsAssert(state != WAIT_ABANDONED,"hsMutex::Lock -> Abandoned Mutex");
|
||||
hsAssert(state != WAIT_TIMEOUT,"hsMutex::Lock -> Infinite Timeout expired?");
|
||||
}
|
||||
|
||||
bool hsMutex::TryLock()
|
||||
{
|
||||
DWORD state = ::WaitForSingleObject(fMutexH, 0);
|
||||
hsAssert(state != WAIT_ABANDONED,"hsMutex::TryLock -> Abandoned Mutex");
|
||||
return state == WAIT_OBJECT_0?true:false;
|
||||
}
|
||||
|
||||
void hsMutex::Unlock()
|
||||
{
|
||||
BOOL result = ::ReleaseMutex(fMutexH);
|
||||
hsAssert(result != 0, "hsMutex::Unlock Failed!");
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
hsSemaphore::hsSemaphore(int initialValue, const char *name)
|
||||
{
|
||||
fSemaH = ::CreateSemaphore(nil, initialValue, kPosInfinity32, name);
|
||||
|
@ -42,6 +42,7 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
|
||||
|
||||
#include <algorithm>
|
||||
#include <deque>
|
||||
#include <mutex>
|
||||
|
||||
#include "pfPatcher.h"
|
||||
|
||||
@ -101,8 +102,8 @@ struct pfPatcherWorker : public hsThread
|
||||
std::deque<Request> fRequests;
|
||||
std::deque<NetCliFileManifestEntry> fQueuedFiles;
|
||||
|
||||
hsMutex fRequestMut;
|
||||
hsMutex fFileMut;
|
||||
std::mutex fRequestMut;
|
||||
std::mutex fFileMut;
|
||||
hsSemaphore fFileSignal;
|
||||
|
||||
pfPatcher::CompletionFunc fOnComplete;
|
||||
@ -243,7 +244,7 @@ static void IGotAuthFileList(ENetError result, void* param, const NetCliAuthFile
|
||||
// so everything goes directly into the Requests deque because AuthSrv lists
|
||||
// don't have any hashes attached. WHY did eap think this was a good idea?!?!
|
||||
{
|
||||
hsTempMutexLock lock(patcher->fRequestMut);
|
||||
std::lock_guard<std::mutex> lock(patcher->fRequestMut);
|
||||
for (unsigned i = 0; i < infoCount; ++i) {
|
||||
PatcherLogYellow("\tEnqueuing Legacy File '%S'", infoArr[i].filename);
|
||||
|
||||
@ -268,7 +269,7 @@ static void IHandleManifestDownload(pfPatcherWorker* patcher, const wchar_t grou
|
||||
{
|
||||
PatcherLogGreen("\tDownloaded Manifest '%S'", group);
|
||||
{
|
||||
hsTempMutexLock lock(patcher->fFileMut);
|
||||
std::lock_guard<std::mutex> lock(patcher->fFileMut);
|
||||
for (unsigned i = 0; i < entryCount; ++i)
|
||||
patcher->fQueuedFiles.push_back(manifest[i]);
|
||||
patcher->fFileSignal.Signal();
|
||||
@ -287,7 +288,7 @@ static void IPreloaderManifestDownloadCB(ENetError result, void* param, const wc
|
||||
|
||||
// so, we need to ask the AuthSrv about our game code
|
||||
{
|
||||
hsTempMutexLock lock(patcher->fRequestMut);
|
||||
std::lock_guard<std::mutex> lock(patcher->fRequestMut);
|
||||
patcher->fRequests.push_back(pfPatcherWorker::Request(plString::Null, pfPatcherWorker::Request::kPythonList));
|
||||
patcher->fRequests.push_back(pfPatcherWorker::Request(plString::Null, pfPatcherWorker::Request::kSdlList));
|
||||
}
|
||||
@ -341,7 +342,7 @@ pfPatcherWorker::pfPatcherWorker() :
|
||||
pfPatcherWorker::~pfPatcherWorker()
|
||||
{
|
||||
{
|
||||
hsTempMutexLock lock(fRequestMut);
|
||||
std::lock_guard<std::mutex> lock(fRequestMut);
|
||||
std::for_each(fRequests.begin(), fRequests.end(),
|
||||
[] (const Request& req) {
|
||||
if (req.fStream) req.fStream->Close();
|
||||
@ -352,7 +353,7 @@ pfPatcherWorker::~pfPatcherWorker()
|
||||
}
|
||||
|
||||
{
|
||||
hsTempMutexLock lock(fFileMut);
|
||||
std::lock_guard<std::mutex> lock(fFileMut);
|
||||
fQueuedFiles.clear();
|
||||
}
|
||||
}
|
||||
@ -386,7 +387,7 @@ void pfPatcherWorker::EndPatch(ENetError result, const plString& msg)
|
||||
|
||||
bool pfPatcherWorker::IssueRequest()
|
||||
{
|
||||
hsTempMutexLock lock(fRequestMut);
|
||||
std::lock_guard<std::mutex> lock(fRequestMut);
|
||||
if (fRequests.empty()) {
|
||||
fRequestActive = false;
|
||||
fFileSignal.Signal(); // make sure the patch thread doesn't deadlock!
|
||||
@ -451,7 +452,7 @@ hsError pfPatcherWorker::Run()
|
||||
do {
|
||||
fFileSignal.Wait();
|
||||
|
||||
hsTempMutexLock fileLock(fFileMut);
|
||||
std::lock_guard<std::mutex> fileLock(fFileMut);
|
||||
if (!fQueuedFiles.empty()) {
|
||||
ProcessFile();
|
||||
continue;
|
||||
@ -515,8 +516,10 @@ void pfPatcherWorker::ProcessFile()
|
||||
pfPatcherStream* s = new pfPatcherStream(this, dlName, entry);
|
||||
s->Open(clName, "wb");
|
||||
|
||||
hsTempMutexLock lock(fRequestMut);
|
||||
fRequests.push_back(Request(dlName, Request::kFile, s));
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(fRequestMut);
|
||||
fRequests.push_back(Request(dlName, Request::kFile, s));
|
||||
}
|
||||
fQueuedFiles.pop_front();
|
||||
|
||||
if (!fRequestActive)
|
||||
@ -615,19 +618,19 @@ void pfPatcher::OnSelfPatch(FileDownloadFunc cb)
|
||||
|
||||
void pfPatcher::RequestGameCode()
|
||||
{
|
||||
hsTempMutexLock lock(fWorker->fRequestMut);
|
||||
std::lock_guard<std::mutex> lock(fWorker->fRequestMut);
|
||||
fWorker->fRequests.push_back(pfPatcherWorker::Request("SecurePreloader", pfPatcherWorker::Request::kSecurePreloader));
|
||||
}
|
||||
|
||||
void pfPatcher::RequestManifest(const plString& mfs)
|
||||
{
|
||||
hsTempMutexLock lock(fWorker->fRequestMut);
|
||||
std::lock_guard<std::mutex> lock(fWorker->fRequestMut);
|
||||
fWorker->fRequests.push_back(pfPatcherWorker::Request(mfs, pfPatcherWorker::Request::kManifest));
|
||||
}
|
||||
|
||||
void pfPatcher::RequestManifest(const std::vector<plString>& mfs)
|
||||
{
|
||||
hsTempMutexLock lock(fWorker->fRequestMut);
|
||||
std::lock_guard<std::mutex> lock(fWorker->fRequestMut);
|
||||
std::for_each(mfs.begin(), mfs.end(),
|
||||
[&] (const plString& name) {
|
||||
fWorker->fRequests.push_back(pfPatcherWorker::Request(name, pfPatcherWorker::Request::kManifest));
|
||||
|
@ -84,7 +84,7 @@ public:
|
||||
uint32_t GetNumReceivers() const { return fReceivers.GetCount(); }
|
||||
};
|
||||
|
||||
int32_t plDispatch::fNumBufferReq = 0;
|
||||
int32_t plDispatch::fNumBufferReq = 0;
|
||||
bool plDispatch::fMsgActive = false;
|
||||
plMsgWrap* plDispatch::fMsgCurrent = nil;
|
||||
plMsgWrap* plDispatch::fMsgHead = nil;
|
||||
@ -92,8 +92,8 @@ plMsgWrap* plDispatch::fMsgTail = nil;
|
||||
hsTArray<plMessage*> plDispatch::fMsgWatch;
|
||||
MsgRecieveCallback plDispatch::fMsgRecieveCallback = nil;
|
||||
|
||||
hsMutex plDispatch::fMsgCurrentMutex; // mutex for fMsgCurrent
|
||||
hsMutex plDispatch::fMsgDispatchLock; // mutex for IMsgDispatch
|
||||
std::recursive_mutex plDispatch::fMsgCurrentMutex; // mutex for fMsgCurrent
|
||||
std::mutex plDispatch::fMsgDispatchLock; // mutex for IMsgDispatch
|
||||
|
||||
|
||||
plDispatch::plDispatch()
|
||||
@ -227,18 +227,19 @@ bool plDispatch::IListeningForExactType(uint16_t hClass)
|
||||
|
||||
void plDispatch::IMsgEnqueue(plMsgWrap* msgWrap, bool async)
|
||||
{
|
||||
fMsgCurrentMutex.Lock();
|
||||
{
|
||||
std::lock_guard<std::recursive_mutex> lock(fMsgCurrentMutex);
|
||||
|
||||
#ifdef HS_DEBUGGING
|
||||
if( msgWrap->fMsg->HasBCastFlag(plMessage::kMsgWatch) )
|
||||
fMsgWatch.Append(msgWrap->fMsg);
|
||||
if (msgWrap->fMsg->HasBCastFlag(plMessage::kMsgWatch))
|
||||
fMsgWatch.Append(msgWrap->fMsg);
|
||||
#endif // HS_DEBUGGING
|
||||
|
||||
if( fMsgTail )
|
||||
fMsgTail = IInsertToQueue(&fMsgTail->fNext, msgWrap);
|
||||
else
|
||||
fMsgTail = IInsertToQueue(&fMsgHead, msgWrap);
|
||||
fMsgCurrentMutex.Unlock();
|
||||
if (fMsgTail)
|
||||
fMsgTail = IInsertToQueue(&fMsgTail->fNext, msgWrap);
|
||||
else
|
||||
fMsgTail = IInsertToQueue(&fMsgHead, msgWrap);
|
||||
}
|
||||
|
||||
if( !async )
|
||||
// Test for fMsgActive in IMsgDispatch(), properly wrapped inside a mutex -mcn
|
||||
@ -248,25 +249,22 @@ void plDispatch::IMsgEnqueue(plMsgWrap* msgWrap, bool async)
|
||||
// On starts deferring msg delivery until buffering is set to off again.
|
||||
bool plDispatch::SetMsgBuffering(bool on)
|
||||
{
|
||||
fMsgCurrentMutex.Lock();
|
||||
if( on )
|
||||
{
|
||||
hsAssert(fNumBufferReq || !fMsgActive, "Can't start deferring message delivery while delivering messages. See mf");
|
||||
if( !fNumBufferReq && fMsgActive )
|
||||
std::lock_guard<std::recursive_mutex> lock(fMsgCurrentMutex);
|
||||
if (on)
|
||||
{
|
||||
fMsgCurrentMutex.Unlock();
|
||||
return false;
|
||||
}
|
||||
hsAssert(fNumBufferReq || !fMsgActive, "Can't start deferring message delivery while delivering messages. See mf");
|
||||
if (!fNumBufferReq && fMsgActive)
|
||||
return false;
|
||||
|
||||
fNumBufferReq++;
|
||||
fMsgActive = true;
|
||||
fMsgCurrentMutex.Unlock();
|
||||
}
|
||||
else if( !--fNumBufferReq )
|
||||
{
|
||||
fMsgActive = false;
|
||||
fMsgCurrentMutex.Unlock();
|
||||
IMsgDispatch();
|
||||
fNumBufferReq++;
|
||||
fMsgActive = true;
|
||||
}
|
||||
else if (!--fNumBufferReq)
|
||||
{
|
||||
fMsgActive = false;
|
||||
IMsgDispatch();
|
||||
}
|
||||
}
|
||||
hsAssert(fNumBufferReq >= 0, "Mismatched number of on/off dispatch buffering requests");
|
||||
|
||||
@ -275,25 +273,25 @@ bool plDispatch::SetMsgBuffering(bool on)
|
||||
|
||||
void plDispatch::IMsgDispatch()
|
||||
{
|
||||
if( !fMsgDispatchLock.TryLock() )
|
||||
if (!fMsgDispatchLock.try_lock())
|
||||
return;
|
||||
|
||||
if( fMsgActive )
|
||||
{
|
||||
fMsgDispatchLock.Unlock();
|
||||
fMsgDispatchLock.unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
fMsgActive = true;
|
||||
int responseLevel=0;
|
||||
|
||||
fMsgCurrentMutex.Lock();
|
||||
fMsgCurrentMutex.lock();
|
||||
|
||||
plMsgWrap* origTail = fMsgTail;
|
||||
while((fMsgCurrent = fMsgHead))
|
||||
{
|
||||
IDequeue(&fMsgHead, &fMsgTail);
|
||||
fMsgCurrentMutex.Unlock();
|
||||
fMsgCurrentMutex.unlock();
|
||||
|
||||
plMessage* msg = fMsgCurrent->fMsg;
|
||||
bool nonLocalMsg = msg && msg->HasBCastFlag(plMessage::kNetNonLocal);
|
||||
@ -402,16 +400,16 @@ void plDispatch::IMsgDispatch()
|
||||
// }
|
||||
// }
|
||||
|
||||
fMsgCurrentMutex.Lock();
|
||||
fMsgCurrentMutex.lock();
|
||||
|
||||
delete fMsgCurrent;
|
||||
// TEMP
|
||||
fMsgCurrent = (class plMsgWrap *)0xdeadc0de;
|
||||
}
|
||||
fMsgCurrentMutex.Unlock();
|
||||
fMsgCurrentMutex.unlock();
|
||||
|
||||
fMsgActive = false;
|
||||
fMsgDispatchLock.Unlock();
|
||||
fMsgDispatchLock.unlock();
|
||||
}
|
||||
|
||||
//
|
||||
@ -419,12 +417,12 @@ void plDispatch::IMsgDispatch()
|
||||
//
|
||||
bool plDispatch::IMsgNetPropagate(plMessage* msg)
|
||||
{
|
||||
fMsgCurrentMutex.Lock();
|
||||
{
|
||||
std::lock_guard<std::recursive_mutex> lock(fMsgCurrentMutex);
|
||||
|
||||
// Make sure cascaded messages all have the same net flags
|
||||
plNetClientApp::InheritNetMsgFlags(fMsgCurrent ? fMsgCurrent->fMsg : nil, msg, false);
|
||||
|
||||
fMsgCurrentMutex.Unlock();
|
||||
// Make sure cascaded messages all have the same net flags
|
||||
plNetClientApp::InheritNetMsgFlags(fMsgCurrent ? fMsgCurrent->fMsg : nil, msg, false);
|
||||
}
|
||||
|
||||
// Decide if msg should go out over the network.
|
||||
// If kNetForce is used, this message should always go out over the network, even if it's already
|
||||
@ -511,10 +509,9 @@ void plDispatch::MsgQueue(plMessage* msg)
|
||||
{
|
||||
if (fQueuedMsgOn)
|
||||
{
|
||||
fQueuedMsgListMutex.Lock();
|
||||
std::lock_guard<std::mutex> lock(fQueuedMsgListMutex);
|
||||
hsAssert(msg,"Message missing");
|
||||
fQueuedMsgList.push_back(msg);
|
||||
fQueuedMsgListMutex.Unlock();
|
||||
}
|
||||
else
|
||||
MsgSend(msg, false);
|
||||
@ -522,23 +519,23 @@ void plDispatch::MsgQueue(plMessage* msg)
|
||||
|
||||
void plDispatch::MsgQueueProcess()
|
||||
{
|
||||
// Process all messages on Queue, unlock while sending them
|
||||
// this would allow other threads to put new messages on the list while we send()
|
||||
while (1)
|
||||
{
|
||||
plMessage * pMsg = nil;
|
||||
fQueuedMsgListMutex.Lock();
|
||||
int size = fQueuedMsgList.size();
|
||||
if (size)
|
||||
{ pMsg = fQueuedMsgList.front();
|
||||
fQueuedMsgList.pop_front();
|
||||
// Process all messages on Queue, unlock while sending them
|
||||
// this would allow other threads to put new messages on the list while we send()
|
||||
bool empty = false;
|
||||
while (!empty)
|
||||
{
|
||||
plMessage * pMsg = nullptr;
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(fQueuedMsgListMutex);
|
||||
empty = fQueuedMsgList.empty();
|
||||
if (!empty)
|
||||
{
|
||||
pMsg = fQueuedMsgList.front();
|
||||
fQueuedMsgList.pop_front();
|
||||
}
|
||||
}
|
||||
fQueuedMsgListMutex.Unlock();
|
||||
if (pMsg)
|
||||
{ MsgSend(pMsg, false);
|
||||
}
|
||||
if (!size)
|
||||
break;
|
||||
MsgSend(pMsg, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -42,7 +42,9 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
|
||||
|
||||
#ifndef plDispatch_inc
|
||||
#define plDispatch_inc
|
||||
|
||||
#include <list>
|
||||
#include <mutex>
|
||||
#include "hsTemplates.h"
|
||||
#include "plgDispatch.h"
|
||||
#include "hsThread.h"
|
||||
@ -74,10 +76,10 @@ protected:
|
||||
hsKeyedObject* fOwner;
|
||||
|
||||
plMsgWrap* fFutureMsgQueue;
|
||||
static int32_t fNumBufferReq;
|
||||
static int32_t fNumBufferReq;
|
||||
static plMsgWrap* fMsgCurrent;
|
||||
static hsMutex fMsgCurrentMutex; // mutex for above
|
||||
static hsMutex fMsgDispatchLock; // mutex for IMsgDispatch
|
||||
static std::recursive_mutex fMsgCurrentMutex; // mutex for above
|
||||
static std::mutex fMsgDispatchLock; // mutex for IMsgDispatch
|
||||
static plMsgWrap* fMsgHead;
|
||||
static plMsgWrap* fMsgTail;
|
||||
static bool fMsgActive;
|
||||
@ -86,7 +88,7 @@ protected:
|
||||
|
||||
hsTArray<plTypeFilter*> fRegisteredExactTypes;
|
||||
std::list<plMessage*> fQueuedMsgList;
|
||||
hsMutex fQueuedMsgListMutex; // mutex for above
|
||||
std::mutex fQueuedMsgListMutex; // mutex for above
|
||||
bool fQueuedMsgOn; // Turns on or off Queued Messages, Plugins need them off
|
||||
|
||||
hsKeyedObject* IGetOwner() { return fOwner; }
|
||||
|
@ -46,8 +46,8 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
|
||||
***/
|
||||
|
||||
#include "Pch.h"
|
||||
#include "hsThread.h"
|
||||
#include <list>
|
||||
#include <mutex>
|
||||
#include "hsRefCnt.h"
|
||||
#pragma hdrstop
|
||||
|
||||
@ -61,16 +61,24 @@ namespace pnNetCli {
|
||||
***/
|
||||
|
||||
struct ChannelCrit {
|
||||
~ChannelCrit ();
|
||||
ChannelCrit ();
|
||||
inline void Enter () { m_critsect.Lock(); }
|
||||
inline void Leave () { m_critsect.Unlock(); }
|
||||
inline void EnterSafe () { if (m_init) m_critsect.Lock(); }
|
||||
inline void LeaveSafe () { if (m_init) m_critsect.Unlock(); }
|
||||
~ChannelCrit();
|
||||
ChannelCrit() : m_init(true) { }
|
||||
|
||||
inline void lock()
|
||||
{
|
||||
hsAssert(m_init, "Bad things have happened.");
|
||||
m_critsect.lock();
|
||||
}
|
||||
|
||||
inline void unlock()
|
||||
{
|
||||
hsAssert(m_init, "Bad things have happened.");
|
||||
m_critsect.unlock();
|
||||
}
|
||||
|
||||
private:
|
||||
bool m_init;
|
||||
hsMutex m_critsect;
|
||||
std::mutex m_critsect;
|
||||
};
|
||||
|
||||
struct NetMsgChannel : hsRefCnt {
|
||||
@ -100,14 +108,10 @@ static std::list<NetMsgChannel*>* s_channels;
|
||||
*
|
||||
***/
|
||||
|
||||
//===========================================================================
|
||||
ChannelCrit::ChannelCrit () {
|
||||
m_init = true;
|
||||
}
|
||||
|
||||
//===========================================================================
|
||||
ChannelCrit::~ChannelCrit () {
|
||||
EnterSafe();
|
||||
std::lock_guard<ChannelCrit> lock(*this);
|
||||
|
||||
if (s_channels) {
|
||||
while (s_channels->size()) {
|
||||
NetMsgChannel* const channel = s_channels->front();
|
||||
@ -118,7 +122,6 @@ ChannelCrit::~ChannelCrit () {
|
||||
delete s_channels;
|
||||
s_channels = nil;
|
||||
}
|
||||
LeaveSafe();
|
||||
}
|
||||
|
||||
|
||||
@ -298,15 +301,14 @@ NetMsgChannel * NetMsgChannelLock (
|
||||
uint32_t * largestRecv
|
||||
) {
|
||||
NetMsgChannel * channel;
|
||||
s_channelCrit.Enter();
|
||||
if (nil != (channel = FindChannel_CS(protocol, server))) {
|
||||
std::lock_guard<ChannelCrit> lock(s_channelCrit);
|
||||
if (nullptr != (channel = FindChannel_CS(protocol, server))) {
|
||||
*largestRecv = channel->m_largestRecv;
|
||||
channel->Ref("ChannelLock");
|
||||
}
|
||||
else {
|
||||
*largestRecv = 0;
|
||||
}
|
||||
s_channelCrit.Leave();
|
||||
return channel;
|
||||
}
|
||||
|
||||
@ -314,11 +316,9 @@ NetMsgChannel * NetMsgChannelLock (
|
||||
void NetMsgChannelUnlock (
|
||||
NetMsgChannel * channel
|
||||
) {
|
||||
s_channelCrit.Enter();
|
||||
{
|
||||
channel->UnRef("ChannelLock");
|
||||
}
|
||||
s_channelCrit.Leave();
|
||||
std::lock_guard<ChannelCrit> lock(s_channelCrit);
|
||||
|
||||
channel->UnRef("ChannelLock");
|
||||
}
|
||||
|
||||
//============================================================================
|
||||
@ -388,33 +388,31 @@ void NetMsgProtocolRegister (
|
||||
const plBigNum& dh_xa, // client: dh_x server: dh_a
|
||||
const plBigNum& dh_n
|
||||
) {
|
||||
s_channelCrit.EnterSafe();
|
||||
{
|
||||
NetMsgChannel * channel = FindOrCreateChannel_CS(protocol, server);
|
||||
std::lock_guard<ChannelCrit> lock(s_channelCrit);
|
||||
|
||||
// make sure no connections have been established on this protocol, otherwise
|
||||
// we'll be modifying a live data structure; NetCli's don't lock their protocol
|
||||
// to operate on it once they have linked to it!
|
||||
ASSERT(channel->RefCnt() == 1);
|
||||
NetMsgChannel * channel = FindOrCreateChannel_CS(protocol, server);
|
||||
|
||||
channel->m_dh_g = dh_g;
|
||||
channel->m_dh_xa = dh_xa;
|
||||
channel->m_dh_n = dh_n;
|
||||
// make sure no connections have been established on this protocol, otherwise
|
||||
// we'll be modifying a live data structure; NetCli's don't lock their protocol
|
||||
// to operate on it once they have linked to it!
|
||||
ASSERT(channel->RefCnt() == 1);
|
||||
|
||||
if (sendMsgCount)
|
||||
AddSendMsgs_CS(channel, sendMsgs, sendMsgCount);
|
||||
if (recvMsgCount)
|
||||
AddRecvMsgs_CS(channel, recvMsgs, recvMsgCount);
|
||||
}
|
||||
s_channelCrit.LeaveSafe();
|
||||
channel->m_dh_g = dh_g;
|
||||
channel->m_dh_xa = dh_xa;
|
||||
channel->m_dh_n = dh_n;
|
||||
|
||||
if (sendMsgCount)
|
||||
AddSendMsgs_CS(channel, sendMsgs, sendMsgCount);
|
||||
if (recvMsgCount)
|
||||
AddRecvMsgs_CS(channel, recvMsgs, recvMsgCount);
|
||||
}
|
||||
|
||||
//===========================================================================
|
||||
void NetMsgProtocolDestroy (uint32_t protocol, bool server) {
|
||||
s_channelCrit.EnterSafe();
|
||||
std::lock_guard<ChannelCrit> lock(s_channelCrit);
|
||||
|
||||
if (NetMsgChannel* channel = FindChannel_CS(protocol, server)) {
|
||||
s_channels->remove(channel);
|
||||
channel->UnRef("ChannelLink");
|
||||
}
|
||||
s_channelCrit.LeaveSafe();
|
||||
}
|
||||
|
@ -90,12 +90,13 @@ hsError plSoundPreloader::Run()
|
||||
|
||||
while (fRunning)
|
||||
{
|
||||
fCritSect.Lock();
|
||||
while (fBuffers.GetCount())
|
||||
{
|
||||
templist.Append(fBuffers.Pop());
|
||||
std::lock_guard<std::mutex> lock(fCritSect);
|
||||
while (fBuffers.GetCount())
|
||||
{
|
||||
templist.Append(fBuffers.Pop());
|
||||
}
|
||||
}
|
||||
fCritSect.Unlock();
|
||||
|
||||
if (templist.GetCount() == 0)
|
||||
{
|
||||
@ -130,14 +131,15 @@ hsError plSoundPreloader::Run()
|
||||
}
|
||||
|
||||
// we need to be sure that all buffers are removed from our load list when shutting this thread down or we will hang,
|
||||
// since the sound buffer will wait to be destroyed until it is marked as loaded
|
||||
fCritSect.Lock();
|
||||
while (fBuffers.GetCount())
|
||||
// since the sound buffer will wait to be destroyed until it is marked as loaded
|
||||
{
|
||||
plSoundBuffer* buf = fBuffers.Pop();
|
||||
buf->SetLoaded(true);
|
||||
std::lock_guard<std::mutex> lock(fCritSect);
|
||||
while (fBuffers.GetCount())
|
||||
{
|
||||
plSoundBuffer* buf = fBuffers.Pop();
|
||||
buf->SetLoaded(true);
|
||||
}
|
||||
}
|
||||
fCritSect.Unlock();
|
||||
|
||||
return hsOK;
|
||||
}
|
||||
|
@ -57,6 +57,7 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
|
||||
#include "plAudioFileReader.h"
|
||||
#include "hsThread.h"
|
||||
#include "plFileSystem.h"
|
||||
#include <mutex>
|
||||
|
||||
//// Class Definition ////////////////////////////////////////////////////////
|
||||
|
||||
@ -165,7 +166,7 @@ protected:
|
||||
hsTArray<plSoundBuffer*> fBuffers;
|
||||
hsEvent fEvent;
|
||||
bool fRunning;
|
||||
hsMutex fCritSect;
|
||||
std::mutex fCritSect;
|
||||
|
||||
public:
|
||||
virtual hsError Run();
|
||||
@ -184,9 +185,10 @@ public:
|
||||
bool IsRunning() const { return fRunning; }
|
||||
|
||||
void AddBuffer(plSoundBuffer* buffer) {
|
||||
fCritSect.Lock();
|
||||
fBuffers.Push(buffer);
|
||||
fCritSect.Unlock();
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(fCritSect);
|
||||
fBuffers.Push(buffer);
|
||||
}
|
||||
|
||||
fEvent.Signal();
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ plStreamSource::plStreamSource()
|
||||
|
||||
void plStreamSource::ICleanup()
|
||||
{
|
||||
hsTempMutexLock lock(fMutex);
|
||||
std::lock_guard<std::mutex> lock(fMutex);
|
||||
|
||||
// loop through all the file data records, and delete the streams
|
||||
decltype(fFileData.begin()) curData;
|
||||
@ -72,7 +72,7 @@ void plStreamSource::ICleanup()
|
||||
|
||||
hsStream* plStreamSource::GetFile(const plFileName& filename)
|
||||
{
|
||||
hsTempMutexLock lock(fMutex);
|
||||
std::lock_guard<std::mutex> lock(fMutex);
|
||||
|
||||
plFileName sFilename = filename.Normalize('/');
|
||||
if (fFileData.find(sFilename) == fFileData.end())
|
||||
@ -112,7 +112,7 @@ std::vector<plFileName> plStreamSource::GetListOfNames(const plFileName& dir, co
|
||||
{
|
||||
plFileName sDir = dir.Normalize('/');
|
||||
hsAssert(ext.CharAt(0) != '.', "Don't add a dot");
|
||||
hsTempMutexLock lock(fMutex);
|
||||
std::lock_guard<std::mutex> lock(fMutex);
|
||||
|
||||
// loop through all the file data records, and create the list
|
||||
std::vector<plFileName> retVal;
|
||||
@ -142,7 +142,7 @@ bool plStreamSource::InsertFile(const plFileName& filename, hsStream* stream)
|
||||
{
|
||||
plFileName sFilename = filename.Normalize('/');
|
||||
|
||||
hsTempMutexLock lock(fMutex);
|
||||
std::lock_guard<std::mutex> lock(fMutex);
|
||||
if (fFileData.find(sFilename) != fFileData.end())
|
||||
return false; // duplicate entry, return failure
|
||||
|
||||
|
@ -43,8 +43,8 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
|
||||
#define plStreamSource_h_inc
|
||||
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include "hsStream.h"
|
||||
#include "hsThread.h"
|
||||
|
||||
// A class for holding and accessing file streams. The preloader will insert
|
||||
// files in here once they are loaded. In internal builds, if a requested file
|
||||
@ -60,7 +60,7 @@ private:
|
||||
hsStream* fStream; // we own this pointer, so clean it up
|
||||
};
|
||||
std::map<plFileName, fileData, plFileName::less_i> fFileData; // key is filename
|
||||
hsMutex fMutex;
|
||||
std::mutex fMutex;
|
||||
uint32_t fServerKey[4];
|
||||
|
||||
void ICleanup(); // closes all file pointers and cleans up after itself
|
||||
|
@ -55,7 +55,7 @@ const float TimeBasedAvgRing<T>::kPercision = 0.001;
|
||||
template <class T>
|
||||
void TimeBasedAvgRing<T>::AddItem(T value, double time)
|
||||
{
|
||||
hsTempMutexLock lock( fLock );
|
||||
std::lock_guard<std::mutex> lock(fLock);
|
||||
|
||||
if ( fList.empty() )
|
||||
{
|
||||
|
@ -44,9 +44,7 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
|
||||
|
||||
#include "HeadSpin.h"
|
||||
#include <list>
|
||||
|
||||
#include "hsThread.h"
|
||||
|
||||
#include <mutex>
|
||||
|
||||
// A Time based Value Averaging class
|
||||
// implemented in a ring buffer
|
||||
@ -91,7 +89,7 @@ private:
|
||||
float fMaxAvg;
|
||||
double fTotal;
|
||||
TimeListIterator fRingStart, fRingEnd;
|
||||
hsMutex fLock;
|
||||
std::mutex fLock;
|
||||
public:
|
||||
TimeBasedAvgRing():fLen(0.f),fAvg(0.f),fMaxAvg(0.f),fTotal(0.0) {}
|
||||
|
||||
|
@ -69,7 +69,6 @@ class plPipeline;
|
||||
// really be visible at any given time.
|
||||
|
||||
class plStatusLogMgr;
|
||||
class hsMutex;
|
||||
class plStatusLogDrawerStub;
|
||||
class plStatusLog
|
||||
{
|
||||
@ -206,8 +205,6 @@ class plStatusLogMgr
|
||||
|
||||
static plFileName IGetBasePath();
|
||||
|
||||
hsMutex fMutex; // To make multithreaded-safe
|
||||
|
||||
public:
|
||||
|
||||
enum
|
||||
|
Reference in New Issue
Block a user