|
|
@ -84,7 +84,7 @@ public: |
|
|
|
uint32_t GetNumReceivers() const { return fReceivers.GetCount(); } |
|
|
|
uint32_t GetNumReceivers() const { return fReceivers.GetCount(); } |
|
|
|
}; |
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
int32_t plDispatch::fNumBufferReq = 0; |
|
|
|
int32_t plDispatch::fNumBufferReq = 0; |
|
|
|
bool plDispatch::fMsgActive = false; |
|
|
|
bool plDispatch::fMsgActive = false; |
|
|
|
plMsgWrap* plDispatch::fMsgCurrent = nil; |
|
|
|
plMsgWrap* plDispatch::fMsgCurrent = nil; |
|
|
|
plMsgWrap* plDispatch::fMsgHead = nil; |
|
|
|
plMsgWrap* plDispatch::fMsgHead = nil; |
|
|
@ -92,8 +92,8 @@ plMsgWrap* plDispatch::fMsgTail = nil; |
|
|
|
hsTArray<plMessage*> plDispatch::fMsgWatch; |
|
|
|
hsTArray<plMessage*> plDispatch::fMsgWatch; |
|
|
|
MsgRecieveCallback plDispatch::fMsgRecieveCallback = nil; |
|
|
|
MsgRecieveCallback plDispatch::fMsgRecieveCallback = nil; |
|
|
|
|
|
|
|
|
|
|
|
hsMutex plDispatch::fMsgCurrentMutex; // mutex for fMsgCurrent
|
|
|
|
std::recursive_mutex plDispatch::fMsgCurrentMutex; // mutex for fMsgCurrent
|
|
|
|
hsMutex plDispatch::fMsgDispatchLock; // mutex for IMsgDispatch
|
|
|
|
std::mutex plDispatch::fMsgDispatchLock; // mutex for IMsgDispatch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
plDispatch::plDispatch() |
|
|
|
plDispatch::plDispatch() |
|
|
@ -227,18 +227,19 @@ bool plDispatch::IListeningForExactType(uint16_t hClass) |
|
|
|
|
|
|
|
|
|
|
|
void plDispatch::IMsgEnqueue(plMsgWrap* msgWrap, bool async) |
|
|
|
void plDispatch::IMsgEnqueue(plMsgWrap* msgWrap, bool async) |
|
|
|
{ |
|
|
|
{ |
|
|
|
fMsgCurrentMutex.Lock(); |
|
|
|
{ |
|
|
|
|
|
|
|
std::lock_guard<std::recursive_mutex> lock(fMsgCurrentMutex); |
|
|
|
|
|
|
|
|
|
|
|
#ifdef HS_DEBUGGING |
|
|
|
#ifdef HS_DEBUGGING |
|
|
|
if( msgWrap->fMsg->HasBCastFlag(plMessage::kMsgWatch) ) |
|
|
|
if (msgWrap->fMsg->HasBCastFlag(plMessage::kMsgWatch)) |
|
|
|
fMsgWatch.Append(msgWrap->fMsg); |
|
|
|
fMsgWatch.Append(msgWrap->fMsg); |
|
|
|
#endif // HS_DEBUGGING
|
|
|
|
#endif // HS_DEBUGGING
|
|
|
|
|
|
|
|
|
|
|
|
if( fMsgTail ) |
|
|
|
if (fMsgTail) |
|
|
|
fMsgTail = IInsertToQueue(&fMsgTail->fNext, msgWrap); |
|
|
|
fMsgTail = IInsertToQueue(&fMsgTail->fNext, msgWrap); |
|
|
|
else |
|
|
|
else |
|
|
|
fMsgTail = IInsertToQueue(&fMsgHead, msgWrap); |
|
|
|
fMsgTail = IInsertToQueue(&fMsgHead, msgWrap); |
|
|
|
fMsgCurrentMutex.Unlock(); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if( !async ) |
|
|
|
if( !async ) |
|
|
|
// Test for fMsgActive in IMsgDispatch(), properly wrapped inside a mutex -mcn
|
|
|
|
// Test for fMsgActive in IMsgDispatch(), properly wrapped inside a mutex -mcn
|
|
|
@ -248,25 +249,22 @@ void plDispatch::IMsgEnqueue(plMsgWrap* msgWrap, bool async) |
|
|
|
// On starts deferring msg delivery until buffering is set to off again.
|
|
|
|
// On starts deferring msg delivery until buffering is set to off again.
|
|
|
|
bool plDispatch::SetMsgBuffering(bool on) |
|
|
|
bool plDispatch::SetMsgBuffering(bool on) |
|
|
|
{ |
|
|
|
{ |
|
|
|
fMsgCurrentMutex.Lock(); |
|
|
|
|
|
|
|
if( on ) |
|
|
|
|
|
|
|
{ |
|
|
|
{ |
|
|
|
hsAssert(fNumBufferReq || !fMsgActive, "Can't start deferring message delivery while delivering messages. See mf"); |
|
|
|
std::lock_guard<std::recursive_mutex> lock(fMsgCurrentMutex); |
|
|
|
if( !fNumBufferReq && fMsgActive ) |
|
|
|
if (on) |
|
|
|
{ |
|
|
|
{ |
|
|
|
fMsgCurrentMutex.Unlock(); |
|
|
|
hsAssert(fNumBufferReq || !fMsgActive, "Can't start deferring message delivery while delivering messages. See mf"); |
|
|
|
return false; |
|
|
|
if (!fNumBufferReq && fMsgActive) |
|
|
|
} |
|
|
|
return false; |
|
|
|
|
|
|
|
|
|
|
|
fNumBufferReq++; |
|
|
|
fNumBufferReq++; |
|
|
|
fMsgActive = true; |
|
|
|
fMsgActive = true; |
|
|
|
fMsgCurrentMutex.Unlock(); |
|
|
|
} |
|
|
|
} |
|
|
|
else if (!--fNumBufferReq) |
|
|
|
else if( !--fNumBufferReq ) |
|
|
|
{ |
|
|
|
{ |
|
|
|
fMsgActive = false; |
|
|
|
fMsgActive = false; |
|
|
|
IMsgDispatch(); |
|
|
|
fMsgCurrentMutex.Unlock(); |
|
|
|
} |
|
|
|
IMsgDispatch(); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
hsAssert(fNumBufferReq >= 0, "Mismatched number of on/off dispatch buffering requests"); |
|
|
|
hsAssert(fNumBufferReq >= 0, "Mismatched number of on/off dispatch buffering requests"); |
|
|
|
|
|
|
|
|
|
|
@ -275,25 +273,25 @@ bool plDispatch::SetMsgBuffering(bool on) |
|
|
|
|
|
|
|
|
|
|
|
void plDispatch::IMsgDispatch() |
|
|
|
void plDispatch::IMsgDispatch() |
|
|
|
{ |
|
|
|
{ |
|
|
|
if( !fMsgDispatchLock.TryLock() ) |
|
|
|
if (!fMsgDispatchLock.try_lock()) |
|
|
|
return; |
|
|
|
return; |
|
|
|
|
|
|
|
|
|
|
|
if( fMsgActive ) |
|
|
|
if( fMsgActive ) |
|
|
|
{ |
|
|
|
{ |
|
|
|
fMsgDispatchLock.Unlock(); |
|
|
|
fMsgDispatchLock.unlock(); |
|
|
|
return; |
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
fMsgActive = true; |
|
|
|
fMsgActive = true; |
|
|
|
int responseLevel=0; |
|
|
|
int responseLevel=0; |
|
|
|
|
|
|
|
|
|
|
|
fMsgCurrentMutex.Lock(); |
|
|
|
fMsgCurrentMutex.lock(); |
|
|
|
|
|
|
|
|
|
|
|
plMsgWrap* origTail = fMsgTail; |
|
|
|
plMsgWrap* origTail = fMsgTail; |
|
|
|
while((fMsgCurrent = fMsgHead)) |
|
|
|
while((fMsgCurrent = fMsgHead)) |
|
|
|
{ |
|
|
|
{ |
|
|
|
IDequeue(&fMsgHead, &fMsgTail); |
|
|
|
IDequeue(&fMsgHead, &fMsgTail); |
|
|
|
fMsgCurrentMutex.Unlock(); |
|
|
|
fMsgCurrentMutex.unlock(); |
|
|
|
|
|
|
|
|
|
|
|
plMessage* msg = fMsgCurrent->fMsg; |
|
|
|
plMessage* msg = fMsgCurrent->fMsg; |
|
|
|
bool nonLocalMsg = msg && msg->HasBCastFlag(plMessage::kNetNonLocal); |
|
|
|
bool nonLocalMsg = msg && msg->HasBCastFlag(plMessage::kNetNonLocal); |
|
|
@ -402,16 +400,16 @@ void plDispatch::IMsgDispatch() |
|
|
|
// }
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
|
|
|
|
|
|
|
|
fMsgCurrentMutex.Lock(); |
|
|
|
fMsgCurrentMutex.lock(); |
|
|
|
|
|
|
|
|
|
|
|
delete fMsgCurrent; |
|
|
|
delete fMsgCurrent; |
|
|
|
// TEMP
|
|
|
|
// TEMP
|
|
|
|
fMsgCurrent = (class plMsgWrap *)0xdeadc0de; |
|
|
|
fMsgCurrent = (class plMsgWrap *)0xdeadc0de; |
|
|
|
} |
|
|
|
} |
|
|
|
fMsgCurrentMutex.Unlock(); |
|
|
|
fMsgCurrentMutex.unlock(); |
|
|
|
|
|
|
|
|
|
|
|
fMsgActive = false; |
|
|
|
fMsgActive = false; |
|
|
|
fMsgDispatchLock.Unlock(); |
|
|
|
fMsgDispatchLock.unlock(); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
//
|
|
|
@ -419,12 +417,12 @@ void plDispatch::IMsgDispatch() |
|
|
|
//
|
|
|
|
//
|
|
|
|
bool plDispatch::IMsgNetPropagate(plMessage* msg) |
|
|
|
bool plDispatch::IMsgNetPropagate(plMessage* msg) |
|
|
|
{ |
|
|
|
{ |
|
|
|
fMsgCurrentMutex.Lock(); |
|
|
|
{ |
|
|
|
|
|
|
|
std::lock_guard<std::recursive_mutex> lock(fMsgCurrentMutex); |
|
|
|
// Make sure cascaded messages all have the same net flags
|
|
|
|
|
|
|
|
plNetClientApp::InheritNetMsgFlags(fMsgCurrent ? fMsgCurrent->fMsg : nil, msg, false); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
fMsgCurrentMutex.Unlock(); |
|
|
|
// Make sure cascaded messages all have the same net flags
|
|
|
|
|
|
|
|
plNetClientApp::InheritNetMsgFlags(fMsgCurrent ? fMsgCurrent->fMsg : nil, msg, false); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Decide if msg should go out over the network.
|
|
|
|
// Decide if msg should go out over the network.
|
|
|
|
// If kNetForce is used, this message should always go out over the network, even if it's already
|
|
|
|
// If kNetForce is used, this message should always go out over the network, even if it's already
|
|
|
@ -511,10 +509,9 @@ void plDispatch::MsgQueue(plMessage* msg) |
|
|
|
{ |
|
|
|
{ |
|
|
|
if (fQueuedMsgOn) |
|
|
|
if (fQueuedMsgOn) |
|
|
|
{ |
|
|
|
{ |
|
|
|
fQueuedMsgListMutex.Lock(); |
|
|
|
std::lock_guard<std::mutex> lock(fQueuedMsgListMutex); |
|
|
|
hsAssert(msg,"Message missing"); |
|
|
|
hsAssert(msg,"Message missing"); |
|
|
|
fQueuedMsgList.push_back(msg); |
|
|
|
fQueuedMsgList.push_back(msg); |
|
|
|
fQueuedMsgListMutex.Unlock(); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
else |
|
|
|
else |
|
|
|
MsgSend(msg, false); |
|
|
|
MsgSend(msg, false); |
|
|
@ -522,23 +519,23 @@ void plDispatch::MsgQueue(plMessage* msg) |
|
|
|
|
|
|
|
|
|
|
|
void plDispatch::MsgQueueProcess() |
|
|
|
void plDispatch::MsgQueueProcess() |
|
|
|
{ |
|
|
|
{ |
|
|
|
// Process all messages on Queue, unlock while sending them
|
|
|
|
// Process all messages on Queue, unlock while sending them
|
|
|
|
// this would allow other threads to put new messages on the list while we send()
|
|
|
|
// this would allow other threads to put new messages on the list while we send()
|
|
|
|
while (1) |
|
|
|
bool empty = false; |
|
|
|
|
|
|
|
while (!empty) |
|
|
|
{ |
|
|
|
{ |
|
|
|
plMessage * pMsg = nil; |
|
|
|
plMessage * pMsg = nullptr; |
|
|
|
fQueuedMsgListMutex.Lock(); |
|
|
|
{ |
|
|
|
int size = fQueuedMsgList.size(); |
|
|
|
std::lock_guard<std::mutex> lock(fQueuedMsgListMutex); |
|
|
|
if (size) |
|
|
|
empty = fQueuedMsgList.empty(); |
|
|
|
{ pMsg = fQueuedMsgList.front(); |
|
|
|
if (!empty) |
|
|
|
fQueuedMsgList.pop_front(); |
|
|
|
{ |
|
|
|
|
|
|
|
pMsg = fQueuedMsgList.front(); |
|
|
|
|
|
|
|
fQueuedMsgList.pop_front(); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
fQueuedMsgListMutex.Unlock(); |
|
|
|
|
|
|
|
if (pMsg) |
|
|
|
if (pMsg) |
|
|
|
{ MsgSend(pMsg, false); |
|
|
|
MsgSend(pMsg, false); |
|
|
|
} |
|
|
|
|
|
|
|
if (!size) |
|
|
|
|
|
|
|
break; |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|