remove g++ or c++11 requirement

^#$*&#$ MSVC
This commit is contained in:
Michael Davidsaver
2017-11-01 10:04:36 -05:00
parent b30948249a
commit 943247f0f0
10 changed files with 48 additions and 53 deletions

View File

@ -94,9 +94,9 @@ ChannelCacheEntry::CRequester::channelStateChange(pva::Channel::shared_pointer c
}
// fanout notification
AUTO_VAL(interested, chan->interested.lock_vector()); // Copy
ChannelCacheEntry::interested_t::vector_type interested(chan->interested.lock_vector()); // Copy
FOREACH(it, end, interested)
FOREACH(ChannelCacheEntry::interested_t::vector_type::const_iterator, it, end, interested)
{
GWChannel *chan = it->get();
pva::ChannelRequester::shared_pointer req(chan->requester.lock());
@ -167,7 +167,7 @@ ChannelCache::~ChannelCache()
entries_t E;
E.swap(entries);
FOREACH(it, end, E)
FOREACH(entries_t::iterator, it, end, E)
{
ChannelCacheEntry *ent = it->second.get();

View File

@ -3,15 +3,7 @@
#include <memory>
#if __cplusplus>=201103L
# define AUTO_VAL(NAME,VAL) auto NAME = VAL
# define AUTO_REF(NAME,VAL) auto& NAME = VAL
# define FOREACH(IT,END,C) for(auto IT=(C).begin(), END=(C).end(); IT!=END; ++IT)
#elif defined(__GNUC__)
# define AUTO_VAL(NAME,VAL) __typeof__(VAL) NAME(VAL)
# define AUTO_REF(NAME,VAL) __typeof__(VAL)& NAME(VAL)
# define FOREACH(IT,END,C) for(__typeof__((C).begin()) IT=(C).begin(), END=(C).end(); IT!=END; ++IT)
#endif
#define FOREACH(ITERTYPE, IT,END,C) for(ITERTYPE IT=(C).begin(), END=(C).end(); IT!=END; ++IT)
namespace p2p {
#if __cplusplus>=201103L

View File

@ -177,7 +177,7 @@ MonitorCacheEntry::monitorEvent(pvd::MonitorPtr const & monitor)
if(usr->filled.empty())
dsnotify.push_back(pusr);
AUTO_VAL(elem, usr->empty.front());
pvd::MonitorElementPtr elem(usr->empty.front());
*elem->overrunBitSet = *lastelem->overrunBitSet;
*elem->changedBitSet = *lastelem->changedBitSet;
@ -197,7 +197,7 @@ MonitorCacheEntry::monitorEvent(pvd::MonitorPtr const & monitor)
// unlock here, race w/ stop(), unlisten()?
//TODO: notify from worker thread
FOREACH(it,end,dsnotify) {
FOREACH(dsnotify_t::iterator, it,end,dsnotify) {
MonitorUser *usr = (*it).get();
pvd::MonitorRequester::shared_pointer req(usr->req);
epicsAtomicIncrSizeT(&usr->nwakeups);
@ -224,7 +224,7 @@ MonitorCacheEntry::unlisten(pvd::MonitorPtr const & monitor)
M->destroy();
std::cout<<__PRETTY_FUNCTION__<<" destroy client monitor\n";
}
FOREACH(it, end, tonotify) {
FOREACH(interested_t::vector_type::iterator, it, end, tonotify) {
MonitorUser *usr = it->get();
pvd::MonitorRequester::shared_pointer req(usr->req);
if(usr->inuse.empty()) // TODO: what about stopped?

View File

@ -220,7 +220,7 @@ void statusServer(int lvl, const char *chanexpr)
if(!chanexpr || iswild) { // no string or some glob pattern
entries = scp->cache.entries; // copy of std::map
} else if(chanexpr) { // just one channel
AUTO_VAL(it, scp->cache.entries.find(chanexpr));
ChannelCache::entries_t::iterator it(scp->cache.entries.find(chanexpr));
if(it!=scp->cache.entries.end())
entries[it->first] = it->second;
}
@ -233,7 +233,7 @@ void statusServer(int lvl, const char *chanexpr)
if(lvl<=0)
continue;
FOREACH(it, end, entries) {
FOREACH(ChannelCache::entries_t::const_iterator, it, end, entries) {
const std::string& channame = it->first;
if(iswild && !epicsStrGlobMatch(channame.c_str(), chanexpr))
continue;
@ -263,7 +263,7 @@ void statusServer(int lvl, const char *chanexpr)
if(lvl<=1)
continue;
FOREACH(it2, end2, mons) {
FOREACH(ChannelCacheEntry::mon_entries_t::lock_vector_type::const_iterator, it2, end2, mons) {
MonitorCacheEntry& ME = *it2->second;
MonitorCacheEntry::interested_t::vector_type usrs;
@ -305,7 +305,7 @@ void statusServer(int lvl, const char *chanexpr)
if(lvl<=2)
continue;
FOREACH(it3, end3, usrs) {
FOREACH(MonitorCacheEntry::interested_t::vector_type::const_iterator, it3, end3, usrs) {
MonitorUser& MU = **it3;
size_t nempty, nfilled, nused, total;
@ -412,7 +412,7 @@ void refCheck(int lvl)
return;
}
if(ctx) {
const AUTO_REF(prov, ctx->getChannelProviders());
const std::vector<pva::ChannelProvider::shared_pointer>& prov(ctx->getChannelProviders());
if(lvl>0) std::cout<<"Server has "<<prov.size()<<" providers\n";
@ -433,17 +433,17 @@ void refCheck(int lvl)
chan_count += entries.size();
FOREACH(it, end, entries)
FOREACH(ChannelCache::entries_t::const_iterator, it, end, entries)
{
AUTO_VAL(M, it->second->mon_entries.lock_vector());
ChannelCacheEntry::mon_entries_t::lock_vector_type M(it->second->mon_entries.lock_vector());
if(lvl>0) std::cout<<" Channel "<<it->second->channelName
<<" has "<<M.size()<<" Client Monitors\n";
mon_count += M.size();
FOREACH(it2, end2, M)
FOREACH(ChannelCacheEntry::mon_entries_t::lock_vector_type::const_iterator, it2, end2, M)
{
AUTO_REF(W, it2->second->interested);
const MonitorCacheEntry::interested_t& W(it2->second->interested);
if(lvl>0) std::cout<<" Used by "<<W.size()<<" Client Monitors\n";
mon_user_count += W.size();
}

View File

@ -52,7 +52,8 @@ struct GroupMemberInfo {
std::string pvname, // aka. name passed to dbChannelOpen()
pvfldname; // PVStructure sub-field
std::string structID; // ID to assign to sub-field
std::set<std::string> triggers; // names in GroupInfo::members_names which are post()d on events from pvfldname
typedef std::set<std::string> triggers_t;
triggers_t triggers; // names in GroupInfo::members_names which are post()d on events from pvfldname
std::tr1::shared_ptr<PVIFBuilder> builder; // not actually shared, but allows us to be copyable
int putorder;
@ -91,11 +92,11 @@ struct PDBProcessor
// validate trigger mappings and process into bit map form
void resolveTriggers()
{
FOREACH(it, end, groups) { // for each group
FOREACH(groups_t::iterator, it, end, groups) { // for each group
GroupInfo& info = it->second;
if(info.hastriggers) {
FOREACH(it2, end2, info.triggers) { // for each trigger source
FOREACH(GroupInfo::triggers_t::iterator, it2, end2, info.triggers) { // for each trigger source
const std::string& src = it2->first;
GroupInfo::triggers_set_t& targets = it2->second;
@ -111,7 +112,7 @@ struct PDBProcessor
fprintf(stderr, " pdb trg '%s.%s' -> ",
info.name.c_str(), src.c_str());
FOREACH(it3, end3, targets) { // for each trigger target
FOREACH(GroupInfo::triggers_set_t::const_iterator, it3, end3, targets) { // for each trigger target
const std::string& target = *it3;
if(target=="*") {
@ -151,7 +152,7 @@ struct PDBProcessor
} else {
if(PDBProviderDebug>1) fprintf(stderr, " pdb default triggers for '%s'\n", info.name.c_str());
FOREACH(it2, end2, info.members) {
FOREACH(GroupInfo::members_t::iterator, it2, end2, info.members) {
GroupMemberInfo& mem = *it2;
if(mem.pvname.empty())
continue;
@ -324,9 +325,9 @@ PDBProvider::PDBProvider(const epics::pvAccess::Configuration::shared_pointer &)
#ifdef USE_MULTILOCK
// assemble group PVD structure definitions and build dbLockers
FOREACH(it, end, proc.groups)
FOREACH(PDBProcessor::groups_t::const_iterator, it, end, proc.groups)
{
GroupInfo &info=it->second;
const GroupInfo &info=it->second;
try{
if(persist_pv_map.find(info.name)!=persist_pv_map.end())
throw std::runtime_error("name already in used");
@ -361,7 +362,7 @@ PDBProvider::PDBProvider(const epics::pvAccess::Configuration::shared_pointer &)
for(size_t i=0, J=0, N=info.members.size(); i<N; i++)
{
GroupMemberInfo &mem = info.members[i];
const GroupMemberInfo &mem = info.members[i];
// parse down attachment point to build/traverse structure
FieldName parts(mem.pvfldname);
@ -432,7 +433,7 @@ PDBProvider::PDBProvider(const epics::pvAccess::Configuration::shared_pointer &)
// construct locker for records triggered by each member
for(size_t i=0, J=0, N=info.members.size(); i<N; i++)
{
GroupMemberInfo &mem = info.members[i];
const GroupMemberInfo &mem = info.members[i];
if(mem.pvname.empty()) continue;
PDBGroupPV::Info& info = pv->members[J++];
@ -441,7 +442,7 @@ PDBProvider::PDBProvider(const epics::pvAccess::Configuration::shared_pointer &)
std::vector<dbCommon*> trig_records;
trig_records.reserve(mem.triggers.size());
FOREACH(it, end, mem.triggers) {
FOREACH(GroupMemberInfo::triggers_t::const_iterator, it, end, mem.triggers) {
members_map_t::const_iterator imap(members_map.find(*it));
if(imap==members_map.end())
throw std::logic_error("trigger resolution missed map to non-dbChannel");
@ -489,7 +490,7 @@ PDBProvider::PDBProvider(const epics::pvAccess::Configuration::shared_pointer &)
// prepare for monitor
size_t i=0;
FOREACH(it2, end2, pv->members)
FOREACH(PDBGroupPV::members_t::iterator, it2, end2, pv->members)
{
PDBGroupPV::Info& info = *it2;
info.evt_VALUE.index = info.evt_PROPERTY.index = i++;

View File

@ -50,7 +50,7 @@ void pdb_group_event(void *user_arg, struct dbChannel *chan,
// we ignore 'pfl' (and the dbEvent queue) when collecting an atomic snapshot
DBManyLocker L(info.locker); // lock only those records in the triggers list
FOREACH(it, end, info.triggers)
FOREACH(PDBGroupPV::Info::triggers_t::const_iterator, it, end, info.triggers)
{
size_t i = *it;
// go get a consistent snapshot we must ignore the db_field_log which came through the dbEvent buffer
@ -61,7 +61,7 @@ void pdb_group_event(void *user_arg, struct dbChannel *chan,
if(self->initial_waits>0) return; // don't post() until all subscriptions get initial updates
FOREACH(it, end, self->interested) {
FOREACH(PDBGroupPV::interested_t::const_iterator, it, end, self->interested) {
PDBGroupMonitor& mon = *it->get();
mon.post(self->scratch);
}

View File

@ -87,7 +87,8 @@ struct epicsShareClass PDBGroupPV : public PDBPV
DBCH chan;
std::tr1::shared_ptr<PVIFBuilder> builder;
FieldName attachment;
std::vector<size_t> triggers; // index in PDBGroupPV::members
typedef std::vector<size_t> triggers_t;
triggers_t triggers; // index in PDBGroupPV::members
DBManyLock locker; // lock only those channels being triggered
p2p::auto_ptr<PVIF> pvif;
DBEvent evt_VALUE, evt_PROPERTY;
@ -95,7 +96,8 @@ struct epicsShareClass PDBGroupPV : public PDBPV
Info() :had_initial_VALUE(false), had_initial_PROPERTY(false), allowProc(false) {}
};
epics::pvData::shared_vector<Info> members;
typedef epics::pvData::shared_vector<Info> members_t;
members_t members;
DBManyLock locker; // all member channels

View File

@ -46,7 +46,7 @@ void pdb_single_event(void *user_arg, struct dbChannel *chan,
if(!self->hadevent_VALUE || !self->hadevent_PROPERTY)
return;
FOREACH(it, end, self->interested) {
FOREACH(PDBSinglePV::interested_t::const_iterator, it, end, self->interested) {
PDBSingleMonitor& mon = *it->get();
mon.post(self->scratch);
}

View File

@ -86,7 +86,7 @@ void pvaLinkChannel::channelStateChange(pva::Channel::shared_pointer const & cha
assert(chan==channel);
if(pvaLinkDebug>2) std::cerr<<"pvaLink channelStateChange "<<name<<pva::Channel::ConnectionStateNames[connectionState]<<"\n";
if(connectionState!=pva::Channel::CONNECTED) {
FOREACH(it, end, links) {
FOREACH(links_t::const_iterator, it, end, links) {
pvaLink* L = *it;
L->detach();
}
@ -132,7 +132,7 @@ void pvaLinkChannel::monitorConnect(pvd::Status const & status,
return;
}
FOREACH(it, end, links) {
FOREACH(links_t::const_iterator, it, end, links) {
pvaLink* L = *it;
L->attach();
}
@ -169,7 +169,7 @@ void pvaLinkChannel::triggerProc(bool atomic, bool force)
{
bool doscan = false;
// check if we actually need to scan anything
FOREACH(it, end, links) {
FOREACH(links_t::const_iterator, it, end, links) {
pvaLink* L = *it;
if ((L->linkmods & pvlOptCP) ||
@ -213,7 +213,7 @@ void pvaLinkChannel::scan(void* arg, epicsJobMode mode)
myscan.usecached = usecached;
if(usecached) {
if(pvaLinkDebug>4) std::cerr<<"populate cache\n";
FOREACH(it, end, links) {
FOREACH(links_t::const_iterator, it, end, links) {
pvaLink *link = *it;
link->get(link->atomcache);
if(pvaLinkDebug>4)
@ -227,7 +227,7 @@ void pvaLinkChannel::scan(void* arg, epicsJobMode mode)
UnGuard U(G);
// we may scan a record after the originating link is re-targeted
FOREACH(it, end, links) {
FOREACH(links_t::const_iterator, it, end, links) {
pvaLink *link = *it;
dbCommon *prec=link->plink->precord;
@ -243,7 +243,7 @@ void pvaLinkChannel::scan(void* arg, epicsJobMode mode)
// another scan may be queued by this point
if(usecached) {
FOREACH(it, end, links) {
FOREACH(links_t::const_iterator, it, end, links) {
pvaLink *link = *it;
link->atomcache.clear();
}

View File

@ -418,12 +418,12 @@ void TestPV::post(const pvd::BitSet& changed, bool notify)
channels_t::vector_type toupdate(channels.lock_vector());
FOREACH(it, end, toupdate) // channel
FOREACH(channels_t::vector_type::const_iterator, it, end, toupdate) // channel
{
TestPVChannel *chan = it->get();
TestPVChannel::monitors_t::vector_type tomon(chan->monitors.lock_vector());
FOREACH(it2, end2, tomon) // monitor/subscription
FOREACH(TestPVChannel::monitors_t::vector_type::const_iterator, it2, end2, tomon) // monitor/subscription
{
TestPVMonitor *mon = it2->get();
@ -446,7 +446,7 @@ void TestPV::post(const pvd::BitSet& changed, bool notify)
if(mon->buffer.empty())
mon->needWakeup = true;
AUTO_REF(elem, mon->free.front());
pvd::MonitorElementPtr& elem(mon->free.front());
// Note: can't use 'changed' to optimize this copy since we don't know
// the state of the free element
elem->pvStructurePtr->copyUnchecked(*mon->overflow->pvStructurePtr);
@ -477,7 +477,7 @@ void TestPV::disconnect()
Guard G(lock);
channels_t::vector_type toupdate(channels.lock_vector());
FOREACH(it, end, toupdate) // channel
FOREACH(channels_t::vector_type::const_iterator, it, end, toupdate) // channel
{
TestPVChannel *chan = it->get();
@ -580,12 +580,12 @@ void TestProvider::dispatch()
testDiag("TestProvider::dispatch");
pvs_t::lock_vector_type allpvs(pvs.lock_vector());
FOREACH(pvit, pvend, allpvs)
FOREACH(pvs_t::lock_vector_type::const_iterator, pvit, pvend, allpvs)
{
TestPV *pv = pvit->second.get();
TestPV::channels_t::vector_type channels(pv->channels.lock_vector());
FOREACH(chit, chend, channels)
FOREACH(TestPV::channels_t::vector_type::const_iterator, chit, chend, channels)
{
TestPVChannel *chan = chit->get();
TestPVChannel::monitors_t::vector_type monitors(chan->monitors.lock_vector());
@ -593,7 +593,7 @@ void TestProvider::dispatch()
if(!chan->isConnected())
continue;
FOREACH(monit, monend, monitors)
FOREACH(TestPVChannel::monitors_t::vector_type::const_iterator, monit, monend, monitors)
{
TestPVMonitor *mon = monit->get();