1
0
mirror of https://foundry.openuru.org/gitblit/r/CWE-ou-minkata.git synced 2025-07-18 11:19:10 +00:00

Replace hsMutex with std::mutex

This commit is contained in:
2014-04-05 20:45:58 -07:00
parent 964256411e
commit 2947acb2c8
16 changed files with 166 additions and 342 deletions

View File

@ -84,7 +84,7 @@ public:
uint32_t GetNumReceivers() const { return fReceivers.GetCount(); }
};
int32_t plDispatch::fNumBufferReq = 0;
int32_t plDispatch::fNumBufferReq = 0;
bool plDispatch::fMsgActive = false;
plMsgWrap* plDispatch::fMsgCurrent = nil;
plMsgWrap* plDispatch::fMsgHead = nil;
@ -92,8 +92,8 @@ plMsgWrap* plDispatch::fMsgTail = nil;
hsTArray<plMessage*> plDispatch::fMsgWatch;
MsgRecieveCallback plDispatch::fMsgRecieveCallback = nil;
hsMutex plDispatch::fMsgCurrentMutex; // mutex for fMsgCurrent
hsMutex plDispatch::fMsgDispatchLock; // mutex for IMsgDispatch
std::recursive_mutex plDispatch::fMsgCurrentMutex; // mutex for fMsgCurrent
std::mutex plDispatch::fMsgDispatchLock; // mutex for IMsgDispatch
plDispatch::plDispatch()
@ -227,18 +227,19 @@ bool plDispatch::IListeningForExactType(uint16_t hClass)
void plDispatch::IMsgEnqueue(plMsgWrap* msgWrap, bool async)
{
fMsgCurrentMutex.Lock();
{
std::lock_guard<std::recursive_mutex> lock(fMsgCurrentMutex);
#ifdef HS_DEBUGGING
if( msgWrap->fMsg->HasBCastFlag(plMessage::kMsgWatch) )
fMsgWatch.Append(msgWrap->fMsg);
if (msgWrap->fMsg->HasBCastFlag(plMessage::kMsgWatch))
fMsgWatch.Append(msgWrap->fMsg);
#endif // HS_DEBUGGING
if( fMsgTail )
fMsgTail = IInsertToQueue(&fMsgTail->fNext, msgWrap);
else
fMsgTail = IInsertToQueue(&fMsgHead, msgWrap);
fMsgCurrentMutex.Unlock();
if (fMsgTail)
fMsgTail = IInsertToQueue(&fMsgTail->fNext, msgWrap);
else
fMsgTail = IInsertToQueue(&fMsgHead, msgWrap);
}
if( !async )
// Test for fMsgActive in IMsgDispatch(), properly wrapped inside a mutex -mcn
@ -248,25 +249,22 @@ void plDispatch::IMsgEnqueue(plMsgWrap* msgWrap, bool async)
// On starts deferring msg delivery until buffering is set to off again.
bool plDispatch::SetMsgBuffering(bool on)
{
fMsgCurrentMutex.Lock();
if( on )
{
hsAssert(fNumBufferReq || !fMsgActive, "Can't start deferring message delivery while delivering messages. See mf");
if( !fNumBufferReq && fMsgActive )
std::lock_guard<std::recursive_mutex> lock(fMsgCurrentMutex);
if (on)
{
fMsgCurrentMutex.Unlock();
return false;
}
hsAssert(fNumBufferReq || !fMsgActive, "Can't start deferring message delivery while delivering messages. See mf");
if (!fNumBufferReq && fMsgActive)
return false;
fNumBufferReq++;
fMsgActive = true;
fMsgCurrentMutex.Unlock();
}
else if( !--fNumBufferReq )
{
fMsgActive = false;
fMsgCurrentMutex.Unlock();
IMsgDispatch();
fNumBufferReq++;
fMsgActive = true;
}
else if (!--fNumBufferReq)
{
fMsgActive = false;
IMsgDispatch();
}
}
hsAssert(fNumBufferReq >= 0, "Mismatched number of on/off dispatch buffering requests");
@ -275,25 +273,25 @@ bool plDispatch::SetMsgBuffering(bool on)
void plDispatch::IMsgDispatch()
{
if( !fMsgDispatchLock.TryLock() )
if (!fMsgDispatchLock.try_lock())
return;
if( fMsgActive )
{
fMsgDispatchLock.Unlock();
fMsgDispatchLock.unlock();
return;
}
fMsgActive = true;
int responseLevel=0;
fMsgCurrentMutex.Lock();
fMsgCurrentMutex.lock();
plMsgWrap* origTail = fMsgTail;
while((fMsgCurrent = fMsgHead))
{
IDequeue(&fMsgHead, &fMsgTail);
fMsgCurrentMutex.Unlock();
fMsgCurrentMutex.unlock();
plMessage* msg = fMsgCurrent->fMsg;
bool nonLocalMsg = msg && msg->HasBCastFlag(plMessage::kNetNonLocal);
@ -402,16 +400,16 @@ void plDispatch::IMsgDispatch()
// }
// }
fMsgCurrentMutex.Lock();
fMsgCurrentMutex.lock();
delete fMsgCurrent;
// TEMP
fMsgCurrent = (class plMsgWrap *)0xdeadc0de;
}
fMsgCurrentMutex.Unlock();
fMsgCurrentMutex.unlock();
fMsgActive = false;
fMsgDispatchLock.Unlock();
fMsgDispatchLock.unlock();
}
//
@ -419,12 +417,12 @@ void plDispatch::IMsgDispatch()
//
bool plDispatch::IMsgNetPropagate(plMessage* msg)
{
fMsgCurrentMutex.Lock();
{
std::lock_guard<std::recursive_mutex> lock(fMsgCurrentMutex);
// Make sure cascaded messages all have the same net flags
plNetClientApp::InheritNetMsgFlags(fMsgCurrent ? fMsgCurrent->fMsg : nil, msg, false);
fMsgCurrentMutex.Unlock();
// Make sure cascaded messages all have the same net flags
plNetClientApp::InheritNetMsgFlags(fMsgCurrent ? fMsgCurrent->fMsg : nil, msg, false);
}
// Decide if msg should go out over the network.
// If kNetForce is used, this message should always go out over the network, even if it's already
@ -511,10 +509,9 @@ void plDispatch::MsgQueue(plMessage* msg)
{
if (fQueuedMsgOn)
{
fQueuedMsgListMutex.Lock();
std::lock_guard<std::mutex> lock(fQueuedMsgListMutex);
hsAssert(msg,"Message missing");
fQueuedMsgList.push_back(msg);
fQueuedMsgListMutex.Unlock();
}
else
MsgSend(msg, false);
@ -522,23 +519,23 @@ void plDispatch::MsgQueue(plMessage* msg)
void plDispatch::MsgQueueProcess()
{
// Process all messages on Queue, unlock while sending them
// this would allow other threads to put new messages on the list while we send()
while (1)
{
plMessage * pMsg = nil;
fQueuedMsgListMutex.Lock();
int size = fQueuedMsgList.size();
if (size)
{ pMsg = fQueuedMsgList.front();
fQueuedMsgList.pop_front();
// Process all messages on Queue, unlock while sending them
// this would allow other threads to put new messages on the list while we send()
bool empty = false;
while (!empty)
{
plMessage * pMsg = nullptr;
{
std::lock_guard<std::mutex> lock(fQueuedMsgListMutex);
empty = fQueuedMsgList.empty();
if (!empty)
{
pMsg = fQueuedMsgList.front();
fQueuedMsgList.pop_front();
}
}
fQueuedMsgListMutex.Unlock();
if (pMsg)
{ MsgSend(pMsg, false);
}
if (!size)
break;
MsgSend(pMsg, false);
}
}

View File

@ -42,7 +42,9 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
#ifndef plDispatch_inc
#define plDispatch_inc
#include <list>
#include <mutex>
#include "hsTemplates.h"
#include "plgDispatch.h"
#include "hsThread.h"
@ -74,10 +76,10 @@ protected:
hsKeyedObject* fOwner;
plMsgWrap* fFutureMsgQueue;
static int32_t fNumBufferReq;
static int32_t fNumBufferReq;
static plMsgWrap* fMsgCurrent;
static hsMutex fMsgCurrentMutex; // mutex for above
static hsMutex fMsgDispatchLock; // mutex for IMsgDispatch
static std::recursive_mutex fMsgCurrentMutex; // mutex for above
static std::mutex fMsgDispatchLock; // mutex for IMsgDispatch
static plMsgWrap* fMsgHead;
static plMsgWrap* fMsgTail;
static bool fMsgActive;
@ -86,7 +88,7 @@ protected:
hsTArray<plTypeFilter*> fRegisteredExactTypes;
std::list<plMessage*> fQueuedMsgList;
hsMutex fQueuedMsgListMutex; // mutex for above
std::mutex fQueuedMsgListMutex; // mutex for above
bool fQueuedMsgOn; // Turns on or off Queued Messages, Plugins need them off
hsKeyedObject* IGetOwner() { return fOwner; }

View File

@ -46,8 +46,8 @@ You can contact Cyan Worlds, Inc. by email legal@cyan.com
***/
#include "Pch.h"
#include "hsThread.h"
#include <list>
#include <mutex>
#include "hsRefCnt.h"
#pragma hdrstop
@ -61,16 +61,24 @@ namespace pnNetCli {
***/
struct ChannelCrit {
~ChannelCrit ();
ChannelCrit ();
inline void Enter () { m_critsect.Lock(); }
inline void Leave () { m_critsect.Unlock(); }
inline void EnterSafe () { if (m_init) m_critsect.Lock(); }
inline void LeaveSafe () { if (m_init) m_critsect.Unlock(); }
~ChannelCrit();
ChannelCrit() : m_init(true) { }
inline void lock()
{
hsAssert(m_init, "Bad things have happened.");
m_critsect.lock();
}
inline void unlock()
{
hsAssert(m_init, "Bad things have happened.");
m_critsect.unlock();
}
private:
bool m_init;
hsMutex m_critsect;
std::mutex m_critsect;
};
struct NetMsgChannel : hsRefCnt {
@ -100,14 +108,10 @@ static std::list<NetMsgChannel*>* s_channels;
*
***/
//===========================================================================
ChannelCrit::ChannelCrit () {
m_init = true;
}
//===========================================================================
ChannelCrit::~ChannelCrit () {
EnterSafe();
std::lock_guard<ChannelCrit> lock(*this);
if (s_channels) {
while (s_channels->size()) {
NetMsgChannel* const channel = s_channels->front();
@ -118,7 +122,6 @@ ChannelCrit::~ChannelCrit () {
delete s_channels;
s_channels = nil;
}
LeaveSafe();
}
@ -298,15 +301,14 @@ NetMsgChannel * NetMsgChannelLock (
uint32_t * largestRecv
) {
NetMsgChannel * channel;
s_channelCrit.Enter();
if (nil != (channel = FindChannel_CS(protocol, server))) {
std::lock_guard<ChannelCrit> lock(s_channelCrit);
if (nullptr != (channel = FindChannel_CS(protocol, server))) {
*largestRecv = channel->m_largestRecv;
channel->Ref("ChannelLock");
}
else {
*largestRecv = 0;
}
s_channelCrit.Leave();
return channel;
}
@ -314,11 +316,9 @@ NetMsgChannel * NetMsgChannelLock (
void NetMsgChannelUnlock (
NetMsgChannel * channel
) {
s_channelCrit.Enter();
{
channel->UnRef("ChannelLock");
}
s_channelCrit.Leave();
std::lock_guard<ChannelCrit> lock(s_channelCrit);
channel->UnRef("ChannelLock");
}
//============================================================================
@ -388,33 +388,31 @@ void NetMsgProtocolRegister (
const plBigNum& dh_xa, // client: dh_x server: dh_a
const plBigNum& dh_n
) {
s_channelCrit.EnterSafe();
{
NetMsgChannel * channel = FindOrCreateChannel_CS(protocol, server);
std::lock_guard<ChannelCrit> lock(s_channelCrit);
// make sure no connections have been established on this protocol, otherwise
// we'll be modifying a live data structure; NetCli's don't lock their protocol
// to operate on it once they have linked to it!
ASSERT(channel->RefCnt() == 1);
NetMsgChannel * channel = FindOrCreateChannel_CS(protocol, server);
channel->m_dh_g = dh_g;
channel->m_dh_xa = dh_xa;
channel->m_dh_n = dh_n;
// make sure no connections have been established on this protocol, otherwise
// we'll be modifying a live data structure; NetCli's don't lock their protocol
// to operate on it once they have linked to it!
ASSERT(channel->RefCnt() == 1);
if (sendMsgCount)
AddSendMsgs_CS(channel, sendMsgs, sendMsgCount);
if (recvMsgCount)
AddRecvMsgs_CS(channel, recvMsgs, recvMsgCount);
}
s_channelCrit.LeaveSafe();
channel->m_dh_g = dh_g;
channel->m_dh_xa = dh_xa;
channel->m_dh_n = dh_n;
if (sendMsgCount)
AddSendMsgs_CS(channel, sendMsgs, sendMsgCount);
if (recvMsgCount)
AddRecvMsgs_CS(channel, recvMsgs, recvMsgCount);
}
//===========================================================================
void NetMsgProtocolDestroy (uint32_t protocol, bool server) {
s_channelCrit.EnterSafe();
std::lock_guard<ChannelCrit> lock(s_channelCrit);
if (NetMsgChannel* channel = FindChannel_CS(protocol, server)) {
s_channels->remove(channel);
channel->UnRef("ChannelLink");
}
s_channelCrit.LeaveSafe();
}