Use epicsMutex and epicsEvent instead of pv::data equivalents

This commit is contained in:
Andrew Johnson
2020-12-04 12:30:21 -06:00
committed by mdavidsaver
parent 0332d0f2c1
commit 98e81a542e
8 changed files with 46 additions and 44 deletions

View File

@ -5,6 +5,7 @@
*/
#include <epicsMutex.h>
#include <pv/standardField.h>
#include <pv/pvAccess.h>
@ -45,7 +46,7 @@ static void ca_connection_handler(struct connection_handler_args args)
void CAChannel::connect(bool isConnected)
{
{
Lock lock(requestsMutex);
epicsGuard<epicsMutex> G(requestsMutex);
channelConnected = isConnected;
}
CAChannelProviderPtr provider(channelProvider.lock());
@ -65,7 +66,7 @@ void CAChannel::notifyClient()
if (!provider) return;
bool isConnected = false;
{
Lock lock(requestsMutex);
epicsGuard<epicsMutex> G(requestsMutex);
isConnected = channelConnected;
}
if (!isConnected) {
@ -142,7 +143,7 @@ void CAChannel::activate(short priority)
CAChannel::~CAChannel()
{
{
Lock lock(requestsMutex);
epicsGuard<epicsMutex> G(requestsMutex);
if (!channelCreated) return;
}
disconnectChannel();
@ -151,7 +152,7 @@ CAChannel::~CAChannel()
void CAChannel::disconnectChannel()
{
{
Lock lock(requestsMutex);
epicsGuard<epicsMutex> G(requestsMutex);
if (!channelCreated) return;
channelCreated = false;
}
@ -219,7 +220,7 @@ void CAChannel::getField(GetFieldRequester::shared_pointer const & requester,
CAChannelGetFieldPtr getField(
new CAChannelGetField(shared_from_this(),requester,subField));
{
Lock lock(requestsMutex);
epicsGuard<epicsMutex> G(requestsMutex);
if (getConnectionState()!=Channel::CONNECTED) {
getFieldQueue.push(getField);
return;
@ -247,7 +248,7 @@ ChannelGet::shared_pointer CAChannel::createChannelGet(
CAChannelGetPtr channelGet =
CAChannelGet::create(shared_from_this(), channelGetRequester, pvRequest);
{
Lock lock(requestsMutex);
epicsGuard<epicsMutex> G(requestsMutex);
if (getConnectionState()!=Channel::CONNECTED) {
getQueue.push(channelGet);
return channelGet;
@ -265,7 +266,7 @@ ChannelPut::shared_pointer CAChannel::createChannelPut(
CAChannelPutPtr channelPut =
CAChannelPut::create(shared_from_this(), channelPutRequester, pvRequest);
{
Lock lock(requestsMutex);
epicsGuard<epicsMutex> G(requestsMutex);
if (getConnectionState()!=Channel::CONNECTED) {
putQueue.push(channelPut);
return channelPut;
@ -283,7 +284,7 @@ Monitor::shared_pointer CAChannel::createMonitor(
CAChannelMonitorPtr channelMonitor =
CAChannelMonitor::create(shared_from_this(), monitorRequester, pvRequest);
{
Lock lock(requestsMutex);
epicsGuard<epicsMutex> G(requestsMutex);
if (getConnectionState()!=Channel::CONNECTED) {
monitorQueue.push(channelMonitor);
return channelMonitor;
@ -541,7 +542,7 @@ void CAChannelPut::put(PVStructure::shared_pointer const & pvPutStructure,
ChannelPutRequester::shared_pointer putRequester(channelPutRequester.lock());
if (!putRequester) return;
{
Lock lock(mutex);
epicsGuard<epicsMutex> G(mutex);
isPut = true;
}
putStatus = dbdToPv->putToDBD(channel,pvPutStructure,block,&ca_put_handler,this);
@ -590,7 +591,7 @@ void CAChannelPut::get()
ChannelPutRequester::shared_pointer putRequester(channelPutRequester.lock());
if (!putRequester) return;
{
Lock lock(mutex);
epicsGuard<epicsMutex> G(mutex);
isPut = false;
}
@ -780,7 +781,7 @@ std::string CAChannelMonitor::getRequesterName()
void CAChannelMonitor::subscriptionEvent(struct event_handler_args &args)
{
{
Lock lock(mutex);
epicsGuard<epicsMutex> G(mutex);
if (!isStarted) return;
}
MonitorRequester::shared_pointer requester(monitorRequester.lock());
@ -808,7 +809,7 @@ void CAChannelMonitor::subscriptionEvent(struct event_handler_args &args)
void CAChannelMonitor::notifyClient()
{
{
Lock lock(mutex);
epicsGuard<epicsMutex> G(mutex);
if(!isStarted) return;
}
MonitorRequester::shared_pointer requester(monitorRequester.lock());
@ -819,7 +820,7 @@ void CAChannelMonitor::notifyClient()
Status CAChannelMonitor::start()
{
{
Lock lock(mutex);
epicsGuard<epicsMutex> G(mutex);
if (isStarted)
return Status(Status::STATUSTYPE_WARNING, "already started");
isStarted = true;
@ -836,7 +837,7 @@ Status CAChannelMonitor::start()
if (result == ECA_NORMAL)
return Status::Ok;
{
Lock lock(mutex);
epicsGuard<epicsMutex> G(mutex);
isStarted = false;
}
return Status(Status::STATUSTYPE_ERROR, string(ca_message(result)));
@ -845,7 +846,7 @@ Status CAChannelMonitor::start()
Status CAChannelMonitor::stop()
{
{
Lock lock(mutex);
epicsGuard<epicsMutex> G(mutex);
if (!isStarted)
return Status(Status::STATUSTYPE_WARNING, "already stopped");
isStarted = false;
@ -861,7 +862,7 @@ Status CAChannelMonitor::stop()
MonitorElementPtr CAChannelMonitor::poll()
{
{
Lock lock(mutex);
epicsGuard<epicsMutex> G(mutex);
if (!isStarted) return MonitorElementPtr();
}
return monitorQueue->poll();

View File

@ -15,10 +15,11 @@
#include <queue>
#include <vector>
#include <epicsMutex.h>
#include <epicsEvent.h>
#include <cadef.h>
#include <pv/pvAccess.h>
#include <pv/event.h>
#include "caProviderPvt.h"
#include "dbdToPv.h"
@ -121,7 +122,7 @@ private:
NotificationPtr connectNotification;
CAContextPtr ca_context;
epics::pvData::Mutex requestsMutex;
epicsMutex requestsMutex;
std::queue<CAChannelGetFieldPtr> getFieldQueue;
std::queue<CAChannelPutPtr> putQueue;
std::queue<CAChannelGetPtr> getQueue;
@ -163,7 +164,7 @@ private:
CAContextPtr ca_context;
DbdToPvPtr dbdToPv;
epics::pvData::Mutex mutex;
epicsMutex mutex;
epics::pvData::PVStructure::shared_pointer pvStructure;
epics::pvData::BitSet::shared_pointer bitSet;
};
@ -208,7 +209,7 @@ private:
CAContextPtr ca_context;
DbdToPvPtr dbdToPv;
epics::pvData::Mutex mutex;
epicsMutex mutex;
epics::pvData::PVStructure::shared_pointer pvStructure;
epics::pvData::BitSet::shared_pointer bitSet;
};
@ -251,7 +252,7 @@ private:
CAContextPtr ca_context;
DbdToPvPtr dbdToPv;
epics::pvData::Mutex mutex;
epicsMutex mutex;
epics::pvData::PVStructure::shared_pointer pvStructure;
epics::pvData::MonitorElementPtr activeElement;

View File

@ -6,6 +6,7 @@
#include <cadef.h>
#include <epicsSignal.h>
#include <epicsMutex.h>
#include <pv/logger.h>
#include <pv/pvAccess.h>
@ -31,7 +32,7 @@ CAChannelProvider::~CAChannelProvider()
{
std::queue<CAChannelPtr> channelQ;
{
Lock lock(channelListMutex);
epicsGuard<epicsMutex> G(channelListMutex);
for (size_t i = 0; i < caChannelList.size(); ++i)
{
CAChannelPtr caChannel(caChannelList[i].lock());
@ -105,7 +106,7 @@ Channel::shared_pointer CAChannelProvider::createChannel(
void CAChannelProvider::addChannel(const CAChannelPtr &channel)
{
Lock lock(channelListMutex);
epicsGuard<epicsMutex> G(channelListMutex);
for (size_t i = 0; i < caChannelList.size(); ++i)
{
if (!(caChannelList[i].lock()))

View File

@ -13,6 +13,7 @@
#define CAPROVIDERPVT_H
#include <cadef.h>
#include <epicsMutex.h>
#include <pv/logger.h>
#include <pv/pvAccess.h>
@ -92,7 +93,7 @@ public:
}
private:
CAContextPtr ca_context;
epics::pvData::Mutex channelListMutex;
epicsMutex channelListMutex;
std::vector<CAChannelWPtr> caChannelList;
NotifierConveyor connectNotifier;

View File

@ -349,7 +349,7 @@ void DbdToPv::getChoicesDone(struct event_handler_args &args)
size_t num = dbr_enum_p->no_str;
choices.reserve(num);
for(size_t i=0; i<num; ++i) choices.push_back(string(&dbr_enum_p->strs[i][0]));
choicesEvent.signal();
choicesEvent.trigger();
}

View File

@ -10,10 +10,9 @@
#ifndef DbdToPv_H
#define DbdToPv_H
#include <shareLib.h>
#include <pv/pvAccess.h>
#include <epicsEvent.h>
#include <cadef.h>
#include <pv/event.h>
#include <pv/pvAccess.h>
#include "caChannel.h"
namespace epics {
@ -130,7 +129,7 @@ private:
chtype caValueType;
chtype caRequestType;
unsigned long maxElements;
epics::pvData::Event choicesEvent;
epicsEvent choicesEvent;
epicsTimeStamp caTimeStamp;
CaAlarm caAlarm;
CaDisplay caDisplay;

View File

@ -7,14 +7,13 @@
#include <iostream>
#include <queue>
#include <epicsThread.h>
#include <pv/event.h>
#include <pv/lock.h>
#include <epicsMutex.h>
#include <epicsEvent.h>
#include <pv/sharedPtr.h>
#define epicsExportSharedSymbols
#include "notifierConveyor.h"
using epics::pvData::Lock;
namespace epics {
namespace pvAccess {
namespace ca {
@ -23,10 +22,10 @@ NotifierConveyor::~NotifierConveyor()
{
if (thread) {
{
Lock the(mutex);
epicsGuard<epicsMutex> G(mutex);
halt = true;
}
workToDo.signal();
workToDo.trigger();
thread->exitWait();
}
}
@ -45,12 +44,12 @@ void NotifierConveyor::notifyClient(
NotificationPtr const &notificationPtr)
{
{
Lock the(mutex);
epicsGuard<epicsMutex> G(mutex);
if (halt || notificationPtr->queued) return;
notificationPtr->queued = true;
workQueue.push(notificationPtr);
}
workToDo.signal();
workToDo.trigger();
}
void NotifierConveyor::run()
@ -58,7 +57,7 @@ void NotifierConveyor::run()
bool stopping;
do {
workToDo.wait();
Lock the(mutex);
epicsGuard<epicsMutex> G(mutex);
stopping = halt;
while (!stopping && !workQueue.empty())
{
@ -69,7 +68,7 @@ void NotifierConveyor::run()
notification->queued = false;
NotifierClientPtr client(notification->client.lock());
if (client) {
the.unlock();
epicsGuardRelease<epicsMutex> U(G);
try { client->notifyClient(); }
catch (std::exception &e) {
std::cerr << "Exception from notifyClient(): "
@ -79,7 +78,6 @@ void NotifierConveyor::run()
std::cerr << "Unknown exception from notifyClient()"
<< std::endl;
}
the.lock();
}
stopping = halt;
// client's destructor may run here, could delete *this

View File

@ -9,8 +9,9 @@
#include <queue>
#include <epicsThread.h>
#include <pv/event.h>
#include <pv/lock.h>
#include <epicsMutex.h>
#include <epicsEvent.h>
#include <pv/sharedPtr.h>
namespace epics {
namespace pvAccess {
@ -55,8 +56,8 @@ public:
private:
std::tr1::shared_ptr<epicsThread> thread;
epics::pvData::Mutex mutex;
epics::pvData::Event workToDo;
epicsMutex mutex;
epicsEvent workToDo;
std::queue<NotificationWPtr> workQueue;
bool halt;
};