Browse Source

Simplify plDispatch mutexes a bit

Michael Hansen 10 years ago
parent
commit
b774548066
  1. 49
      Sources/Plasma/NucleusLib/pnDispatch/plDispatch.cpp
  2. 2
      Sources/Plasma/NucleusLib/pnDispatch/plDispatch.h

49
Sources/Plasma/NucleusLib/pnDispatch/plDispatch.cpp

@ -92,7 +92,7 @@ plMsgWrap* plDispatch::fMsgTail = nil;
hsTArray<plMessage*> plDispatch::fMsgWatch; hsTArray<plMessage*> plDispatch::fMsgWatch;
MsgRecieveCallback plDispatch::fMsgRecieveCallback = nil; MsgRecieveCallback plDispatch::fMsgRecieveCallback = nil;
std::recursive_mutex plDispatch::fMsgCurrentMutex; // mutex for fMsgCurrent std::mutex plDispatch::fMsgCurrentMutex; // mutex for fMsgCurrent
std::mutex plDispatch::fMsgDispatchLock; // mutex for IMsgDispatch std::mutex plDispatch::fMsgDispatchLock; // mutex for IMsgDispatch
@ -228,7 +228,7 @@ bool plDispatch::IListeningForExactType(uint16_t hClass)
void plDispatch::IMsgEnqueue(plMsgWrap* msgWrap, bool async) void plDispatch::IMsgEnqueue(plMsgWrap* msgWrap, bool async)
{ {
{ {
std::lock_guard<std::recursive_mutex> lock(fMsgCurrentMutex); std::lock_guard<std::mutex> lock(fMsgCurrentMutex);
#ifdef HS_DEBUGGING #ifdef HS_DEBUGGING
if (msgWrap->fMsg->HasBCastFlag(plMessage::kMsgWatch)) if (msgWrap->fMsg->HasBCastFlag(plMessage::kMsgWatch))
@ -249,22 +249,21 @@ void plDispatch::IMsgEnqueue(plMsgWrap* msgWrap, bool async)
// On starts deferring msg delivery until buffering is set to off again. // On starts deferring msg delivery until buffering is set to off again.
bool plDispatch::SetMsgBuffering(bool on) bool plDispatch::SetMsgBuffering(bool on)
{ {
std::unique_lock<std::mutex> lock(fMsgCurrentMutex);
if (on)
{ {
std::lock_guard<std::recursive_mutex> lock(fMsgCurrentMutex); hsAssert(fNumBufferReq || !fMsgActive, "Can't start deferring message delivery while delivering messages. See mf");
if (on) if (!fNumBufferReq && fMsgActive)
{ return false;
hsAssert(fNumBufferReq || !fMsgActive, "Can't start deferring message delivery while delivering messages. See mf");
if (!fNumBufferReq && fMsgActive)
return false;
fNumBufferReq++; fNumBufferReq++;
fMsgActive = true; fMsgActive = true;
} }
else if (!--fNumBufferReq) else if (!--fNumBufferReq)
{ {
fMsgActive = false; fMsgActive = false;
IMsgDispatch(); lock.unlock();
} IMsgDispatch();
} }
hsAssert(fNumBufferReq >= 0, "Mismatched number of on/off dispatch buffering requests"); hsAssert(fNumBufferReq >= 0, "Mismatched number of on/off dispatch buffering requests");
@ -273,25 +272,23 @@ bool plDispatch::SetMsgBuffering(bool on)
void plDispatch::IMsgDispatch() void plDispatch::IMsgDispatch()
{ {
if (!fMsgDispatchLock.try_lock()) std::unique_lock<std::mutex> dispatchLock(fMsgDispatchLock, std::try_to_lock);
if (!dispatchLock.owns_lock())
return; return;
if( fMsgActive ) if (fMsgActive)
{
fMsgDispatchLock.unlock();
return; return;
}
fMsgActive = true; fMsgActive = true;
int responseLevel=0; int responseLevel=0;
fMsgCurrentMutex.lock(); std::unique_lock<std::mutex> msgCurrentLock(fMsgCurrentMutex);
plMsgWrap* origTail = fMsgTail; plMsgWrap* origTail = fMsgTail;
while((fMsgCurrent = fMsgHead)) while((fMsgCurrent = fMsgHead))
{ {
IDequeue(&fMsgHead, &fMsgTail); IDequeue(&fMsgHead, &fMsgTail);
fMsgCurrentMutex.unlock(); msgCurrentLock.unlock();
plMessage* msg = fMsgCurrent->fMsg; plMessage* msg = fMsgCurrent->fMsg;
bool nonLocalMsg = msg && msg->HasBCastFlag(plMessage::kNetNonLocal); bool nonLocalMsg = msg && msg->HasBCastFlag(plMessage::kNetNonLocal);
@ -400,16 +397,14 @@ void plDispatch::IMsgDispatch()
// } // }
// } // }
fMsgCurrentMutex.lock(); msgCurrentLock.lock();
delete fMsgCurrent; delete fMsgCurrent;
// TEMP // TEMP
fMsgCurrent = (class plMsgWrap *)0xdeadc0de; fMsgCurrent = (class plMsgWrap *)0xdeadc0de;
} }
fMsgCurrentMutex.unlock();
fMsgActive = false; fMsgActive = false;
fMsgDispatchLock.unlock();
} }
// //
@ -418,7 +413,7 @@ void plDispatch::IMsgDispatch()
bool plDispatch::IMsgNetPropagate(plMessage* msg) bool plDispatch::IMsgNetPropagate(plMessage* msg)
{ {
{ {
std::lock_guard<std::recursive_mutex> lock(fMsgCurrentMutex); std::lock_guard<std::mutex> lock(fMsgCurrentMutex);
// Make sure cascaded messages all have the same net flags // Make sure cascaded messages all have the same net flags
plNetClientApp::InheritNetMsgFlags(fMsgCurrent ? fMsgCurrent->fMsg : nil, msg, false); plNetClientApp::InheritNetMsgFlags(fMsgCurrent ? fMsgCurrent->fMsg : nil, msg, false);

2
Sources/Plasma/NucleusLib/pnDispatch/plDispatch.h

@ -78,7 +78,7 @@ protected:
plMsgWrap* fFutureMsgQueue; plMsgWrap* fFutureMsgQueue;
static int32_t fNumBufferReq; static int32_t fNumBufferReq;
static plMsgWrap* fMsgCurrent; static plMsgWrap* fMsgCurrent;
static std::recursive_mutex fMsgCurrentMutex; // mutex for above static std::mutex fMsgCurrentMutex; // mutex for above
static std::mutex fMsgDispatchLock; // mutex for IMsgDispatch static std::mutex fMsgDispatchLock; // mutex for IMsgDispatch
static plMsgWrap* fMsgHead; static plMsgWrap* fMsgHead;
static plMsgWrap* fMsgTail; static plMsgWrap* fMsgTail;

Loading…
Cancel
Save