From 4296c5e015b473dbb6653b5433765285938e551f Mon Sep 17 00:00:00 2001 From: Michael Davidsaver Date: Sun, 23 Sep 2018 12:03:21 -0700 Subject: [PATCH] more SharedPV::close() --- src/server/pva/sharedstate.h | 4 +++ src/server/sharedstate_channel.cpp | 2 +- src/server/sharedstate_pv.cpp | 51 +++++++++++++++--------------- 3 files changed, 31 insertions(+), 26 deletions(-) diff --git a/src/server/pva/sharedstate.h b/src/server/pva/sharedstate.h index 6d6d19f..2a32b8e 100644 --- a/src/server/pva/sharedstate.h +++ b/src/server/pva/sharedstate.h @@ -143,6 +143,10 @@ public: //! If destory=true, the internal client list is cleared. //! @post In the closed state //! @note Provider locking rules apply (@see provider_roles_requester_locking). + //! + //! close() is not final, even with destroy=true new clients may begin connecting, and open() may be called again. + //! A final close() should be performed after the removal from StaticProvider/DynamicProvider + //! which will prevent new clients. virtual void close(bool destroy=false); //! Create a new container which may be used to prepare to call post(). diff --git a/src/server/sharedstate_channel.cpp b/src/server/sharedstate_channel.cpp index b5912e7..7789b8c 100644 --- a/src/server/sharedstate_channel.cpp +++ b/src/server/sharedstate_channel.cpp @@ -51,7 +51,7 @@ SharedChannel::SharedChannel(const std::tr1::shared_ptr &owner, if(owner->channels.empty()) handler = owner->handler; owner->channels.push_back(this); - owner->notifiedConn = !!handler; + owner->notifiedConn = true; } if(handler) { handler->onFirstConnect(owner); diff --git a/src/server/sharedstate_pv.cpp b/src/server/sharedstate_pv.cpp index 4860b23..98851dc 100644 --- a/src/server/sharedstate_pv.cpp +++ b/src/server/sharedstate_pv.cpp @@ -231,35 +231,36 @@ void SharedPV::close(bool destroy) { Guard I(mutex); - if(!type) - return; + if(type) { - p_put.reserve(puts.size()); - p_rpc.reserve(rpcs.size()); - p_monitor.reserve(monitors.size()); - p_channel.reserve(channels.size()); + p_put.reserve(puts.size()); + p_rpc.reserve(rpcs.size()); + p_monitor.reserve(monitors.size()); + p_channel.reserve(channels.size()); - FOR_EACH(puts_t::const_iterator, it, end, puts) { - (*it)->mapper.reset(); - p_put.push_back((*it)->requester.lock()); - } - FOR_EACH(rpcs_t::const_iterator, it, end, rpcs) { - p_rpc.push_back((*it)->requester.lock()); - } - FOR_EACH(monitors_t::const_iterator, it, end, monitors) { - (*it)->close(); - try { - p_monitor.push_back((*it)->shared_from_this()); - }catch(std::tr1::bad_weak_ptr&) { /* ignore, racing dtor */ } - } - FOR_EACH(channels_t::const_iterator, it, end, channels) { - try { - p_channel.push_back((*it)->shared_from_this()); - }catch(std::tr1::bad_weak_ptr&) { /* ignore, racing dtor */ } + FOR_EACH(puts_t::const_iterator, it, end, puts) { + (*it)->mapper.reset(); + p_put.push_back((*it)->requester.lock()); + } + FOR_EACH(rpcs_t::const_iterator, it, end, rpcs) { + p_rpc.push_back((*it)->requester.lock()); + } + FOR_EACH(monitors_t::const_iterator, it, end, monitors) { + (*it)->close(); + try { + p_monitor.push_back((*it)->shared_from_this()); + }catch(std::tr1::bad_weak_ptr&) { /* ignore, racing dtor */ } + } + FOR_EACH(channels_t::const_iterator, it, end, channels) { + try { + p_channel.push_back((*it)->shared_from_this()); + }catch(std::tr1::bad_weak_ptr&) { /* ignore, racing dtor */ } + } + + type.reset(); + current.reset(); } - type.reset(); - current.reset(); if(destroy) { // forget about all clients, to prevent the possibility of our // sending a second destroy notification.