varios fixes at psi repo, as of 2022-02-01
Change-Id: I8cdc849126d52ef0f2f27a0faf661830aac6f874
This commit is contained in:
parent
5e3fb10884
commit
acb3bdad6a
@ -27,12 +27,11 @@ import sys
|
|||||||
import argparse
|
import argparse
|
||||||
from os import path
|
from os import path
|
||||||
|
|
||||||
import mlzlog
|
|
||||||
|
|
||||||
# Add import path for inplace usage
|
# Add import path for inplace usage
|
||||||
sys.path.insert(0, path.abspath(path.join(path.dirname(__file__), '..')))
|
sys.path.insert(0, path.abspath(path.join(path.dirname(__file__), '..')))
|
||||||
|
|
||||||
from secop.lib import getGeneralConfig
|
from secop.lib import getGeneralConfig
|
||||||
|
from secop.logging import initLogging
|
||||||
from secop.server import Server
|
from secop.server import Server
|
||||||
|
|
||||||
|
|
||||||
@ -60,15 +59,21 @@ def parseArgv(argv):
|
|||||||
parser.add_argument('-c',
|
parser.add_argument('-c',
|
||||||
'--cfgfiles',
|
'--cfgfiles',
|
||||||
action='store',
|
action='store',
|
||||||
help="comma separated list of cfg files\n"
|
help="comma separated list of cfg files,\n"
|
||||||
"defaults to <name_of_the_instance>\n"
|
"defaults to <name_of_the_instance>.\n"
|
||||||
"cfgfiles given without '.cfg' extension are searched in the configuration directory,"
|
"cfgfiles given without '.cfg' extension are searched in the configuration directory, "
|
||||||
"else they are treated as path names",
|
"else they are treated as path names",
|
||||||
default=None)
|
default=None)
|
||||||
|
parser.add_argument('-g',
|
||||||
|
'--gencfg',
|
||||||
|
action='store',
|
||||||
|
help="full path of general config file,\n"
|
||||||
|
"defaults to env. variable FRAPPY_CONFIG_FILE\n",
|
||||||
|
default=None)
|
||||||
parser.add_argument('-t',
|
parser.add_argument('-t',
|
||||||
'--test',
|
'--test',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
help='Check cfg files only',
|
help='check cfg files only',
|
||||||
default=False)
|
default=False)
|
||||||
return parser.parse_args(argv)
|
return parser.parse_args(argv)
|
||||||
|
|
||||||
@ -80,9 +85,9 @@ def main(argv=None):
|
|||||||
args = parseArgv(argv[1:])
|
args = parseArgv(argv[1:])
|
||||||
|
|
||||||
loglevel = 'debug' if args.verbose else ('error' if args.quiet else 'info')
|
loglevel = 'debug' if args.verbose else ('error' if args.quiet else 'info')
|
||||||
mlzlog.initLogging('secop', loglevel, getGeneralConfig()['logdir'])
|
getGeneralConfig(args.gencfg)
|
||||||
|
log = initLogging(loglevel)
|
||||||
srv = Server(args.name, mlzlog.log, cfgfiles=args.cfgfiles, interface=args.port, testonly=args.test)
|
srv = Server(args.name, log, cfgfiles=args.cfgfiles, interface=args.port, testonly=args.test)
|
||||||
|
|
||||||
if args.daemonize:
|
if args.daemonize:
|
||||||
srv.start()
|
srv.start()
|
||||||
|
20
secop/io.py
20
secop/io.py
@ -90,6 +90,7 @@ class IOBase(Communicator):
|
|||||||
|
|
||||||
def earlyInit(self):
|
def earlyInit(self):
|
||||||
self._lock = threading.RLock()
|
self._lock = threading.RLock()
|
||||||
|
super().earlyInit()
|
||||||
|
|
||||||
def connectStart(self):
|
def connectStart(self):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
@ -218,6 +219,7 @@ class StringIO(IOBase):
|
|||||||
if not self.is_connected:
|
if not self.is_connected:
|
||||||
self.read_is_connected() # try to reconnect
|
self.read_is_connected() # try to reconnect
|
||||||
if not self._conn:
|
if not self._conn:
|
||||||
|
self.log.debug('can not connect to %r' % self.uri)
|
||||||
raise CommunicationSilentError('can not connect to %r' % self.uri)
|
raise CommunicationSilentError('can not connect to %r' % self.uri)
|
||||||
try:
|
try:
|
||||||
with self._lock:
|
with self._lock:
|
||||||
@ -234,15 +236,15 @@ class StringIO(IOBase):
|
|||||||
if garbage is None: # read garbage only once
|
if garbage is None: # read garbage only once
|
||||||
garbage = self._conn.flush_recv()
|
garbage = self._conn.flush_recv()
|
||||||
if garbage:
|
if garbage:
|
||||||
self.log.debug('garbage: %r', garbage)
|
self.log.debug('garbage: %r' % garbage)
|
||||||
self._conn.send(cmd + self._eol_write)
|
self._conn.send(cmd + self._eol_write)
|
||||||
self.log.debug('send: %s', cmd + self._eol_write)
|
self.log.debug('> %s' % cmd.decode(self.encoding))
|
||||||
reply = self._conn.readline(self.timeout)
|
reply = self._conn.readline(self.timeout)
|
||||||
except ConnectionClosed as e:
|
except ConnectionClosed as e:
|
||||||
self.closeConnection()
|
self.closeConnection()
|
||||||
raise CommunicationFailedError('disconnected') from None
|
raise CommunicationFailedError('disconnected') from None
|
||||||
reply = reply.decode(self.encoding)
|
reply = reply.decode(self.encoding)
|
||||||
self.log.debug('recv: %s', reply)
|
self.log.debug('< %s' % reply)
|
||||||
return reply
|
return reply
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if str(e) == self._last_error:
|
if str(e) == self._last_error:
|
||||||
@ -291,6 +293,10 @@ def make_bytes(string):
|
|||||||
return bytes([int(c, 16) if HEX_CODE.match(c) else ord(c) for c in string.split()])
|
return bytes([int(c, 16) if HEX_CODE.match(c) else ord(c) for c in string.split()])
|
||||||
|
|
||||||
|
|
||||||
|
def hexify(bytes_):
|
||||||
|
return ' '.join('%02x' % r for r in bytes_)
|
||||||
|
|
||||||
|
|
||||||
class BytesIO(IOBase):
|
class BytesIO(IOBase):
|
||||||
identification = Property(
|
identification = Property(
|
||||||
"""identification
|
"""identification
|
||||||
@ -330,14 +336,14 @@ class BytesIO(IOBase):
|
|||||||
time.sleep(self.wait_before)
|
time.sleep(self.wait_before)
|
||||||
garbage = self._conn.flush_recv()
|
garbage = self._conn.flush_recv()
|
||||||
if garbage:
|
if garbage:
|
||||||
self.log.debug('garbage: %r', garbage)
|
self.log.debug('garbage: %s', hexify(garbage))
|
||||||
self._conn.send(request)
|
self._conn.send(request)
|
||||||
self.log.debug('send: %r', request)
|
self.log.debug('> %s', hexify(request))
|
||||||
reply = self._conn.readbytes(replylen, self.timeout)
|
reply = self._conn.readbytes(replylen, self.timeout)
|
||||||
except ConnectionClosed as e:
|
except ConnectionClosed as e:
|
||||||
self.closeConnection()
|
self.closeConnection()
|
||||||
raise CommunicationFailedError('disconnected') from None
|
raise CommunicationFailedError('disconnected') from None
|
||||||
self.log.debug('recv: %r', reply)
|
self.log.debug('< %s', hexify(reply))
|
||||||
return self.getFullReply(request, reply)
|
return self.getFullReply(request, reply)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if str(e) == self._last_error:
|
if str(e) == self._last_error:
|
||||||
@ -362,7 +368,7 @@ class BytesIO(IOBase):
|
|||||||
:return: the full reply (replyheader + additional bytes)
|
:return: the full reply (replyheader + additional bytes)
|
||||||
|
|
||||||
When the reply length is variable, :meth:`communicate` should be called
|
When the reply length is variable, :meth:`communicate` should be called
|
||||||
with the `replylen` argument set to minimum expected length of the reply.
|
with the `replylen` argument set to the minimum expected length of the reply.
|
||||||
Typically this method determines then the length of additional bytes from
|
Typically this method determines then the length of additional bytes from
|
||||||
the already received bytes (replyheader) and/or the request and calls
|
the already received bytes (replyheader) and/or the request and calls
|
||||||
:meth:`readBytes` to get the remaining bytes.
|
:meth:`readBytes` to get the remaining bytes.
|
||||||
|
@ -153,7 +153,7 @@ class Change:
|
|||||||
self._reply = None
|
self._reply = None
|
||||||
|
|
||||||
def __getattr__(self, key):
|
def __getattr__(self, key):
|
||||||
"""return attribute from module key is not in self._valuedict"""
|
"""return attribute from module key when not in self._valuedict"""
|
||||||
if key in self._valuedict:
|
if key in self._valuedict:
|
||||||
return self._valuedict[key]
|
return self._valuedict[key]
|
||||||
return getattr(self._module, key)
|
return getattr(self._module, key)
|
||||||
@ -174,6 +174,9 @@ class Change:
|
|||||||
self._valuedict.update(result)
|
self._valuedict.update(result)
|
||||||
return self._reply
|
return self._reply
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return 'Change<%s>' % ', '.join('%s=%r' % kv for kv in self._valuedict.items())
|
||||||
|
|
||||||
|
|
||||||
class IOHandlerBase:
|
class IOHandlerBase:
|
||||||
"""abstract IO handler
|
"""abstract IO handler
|
||||||
@ -280,6 +283,8 @@ class IOHandler(IOHandlerBase):
|
|||||||
reply = self.send_command(module)
|
reply = self.send_command(module)
|
||||||
# convert them to parameters
|
# convert them to parameters
|
||||||
result = self.analyze(module, *reply)
|
result = self.analyze(module, *reply)
|
||||||
|
module.log.debug('result of analyze_%s: %s', self.group,
|
||||||
|
', '.join('%s=%r' % kv for kv in result.items()))
|
||||||
for pname, value in result.items():
|
for pname, value in result.items():
|
||||||
setattr(module, pname, value)
|
setattr(module, pname, value)
|
||||||
for pname in self.parameters:
|
for pname in self.parameters:
|
||||||
@ -322,6 +327,7 @@ class IOHandler(IOHandlerBase):
|
|||||||
change = Change(self, module, valuedict)
|
change = Change(self, module, valuedict)
|
||||||
if force_read:
|
if force_read:
|
||||||
change.readValues()
|
change.readValues()
|
||||||
|
module.log.debug('call change_%s(%r)', self.group, change)
|
||||||
values = self.change(module, change)
|
values = self.change(module, change)
|
||||||
if values is None: # this indicates that nothing has to be written
|
if values is None: # this indicates that nothing has to be written
|
||||||
return
|
return
|
||||||
|
@ -27,40 +27,60 @@ import socket
|
|||||||
import sys
|
import sys
|
||||||
import threading
|
import threading
|
||||||
import traceback
|
import traceback
|
||||||
|
from configparser import ConfigParser
|
||||||
from os import environ, path
|
from os import environ, path
|
||||||
|
|
||||||
|
CONFIG = {}
|
||||||
|
unset_value = object()
|
||||||
|
|
||||||
repodir = path.abspath(path.join(path.dirname(__file__), '..', '..'))
|
|
||||||
|
|
||||||
if path.splitext(sys.executable)[1] == ".exe" and not path.basename(sys.executable).startswith('python'):
|
def getGeneralConfig(confdir=None):
|
||||||
|
global CONFIG # pylint: disable=global-statement
|
||||||
|
|
||||||
|
if CONFIG:
|
||||||
|
if confdir:
|
||||||
|
raise ValueError('getGeneralConfig with argument must be called first')
|
||||||
|
else:
|
||||||
|
repodir = path.abspath(path.join(path.dirname(__file__), '..', '..'))
|
||||||
|
if path.splitext(sys.executable)[1] == ".exe" and not path.basename(sys.executable).startswith('python'):
|
||||||
|
# special MS windows environment
|
||||||
CONFIG = {
|
CONFIG = {
|
||||||
'piddir': './',
|
'piddir': './',
|
||||||
'logdir': './log',
|
'logdir': './log',
|
||||||
'confdir': './',
|
'confdir': './',
|
||||||
}
|
}
|
||||||
elif not path.exists(path.join(repodir, '.git')):
|
elif not path.exists(path.join(repodir, '.git')):
|
||||||
CONFIG = {
|
CONFIG = {
|
||||||
'piddir': '/var/run/secop',
|
'piddir': '/var/run/secop',
|
||||||
'logdir': '/var/log',
|
'logdir': '/var/log',
|
||||||
'confdir': '/etc/secop',
|
'confdir': '/etc/secop',
|
||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
CONFIG = {
|
CONFIG = {
|
||||||
'piddir': path.join(repodir, 'pid'),
|
'piddir': path.join(repodir, 'pid'),
|
||||||
'logdir': path.join(repodir, 'log'),
|
'logdir': path.join(repodir, 'log'),
|
||||||
'confdir': path.join(repodir, 'cfg'),
|
'confdir': path.join(repodir, 'cfg'),
|
||||||
}
|
}
|
||||||
# overwrite with env variables SECOP_LOGDIR, SECOP_PIDDIR, SECOP_CONFDIR, if present
|
gen_config_path = confdir or environ.get('FRAPPY_CONFIG_FILE',
|
||||||
for dirname in CONFIG:
|
path.join(CONFIG['confdir'], 'generalConfig.cfg'))
|
||||||
|
if gen_config_path and path.exists(gen_config_path):
|
||||||
|
parser = ConfigParser()
|
||||||
|
parser.optionxform = str
|
||||||
|
parser.read([gen_config_path])
|
||||||
|
CONFIG = {}
|
||||||
|
# only the FRAPPY section is relevant, other sections might be used by others
|
||||||
|
for key, value in parser['FRAPPY'].items():
|
||||||
|
if value.startswith('./'):
|
||||||
|
CONFIG[key] = path.abspath(path.join(repodir, value))
|
||||||
|
else:
|
||||||
|
# expand ~ to username, also in path lists separated with ':'
|
||||||
|
CONFIG[key] = ':'.join(path.expanduser(v) for v in value.split(':'))
|
||||||
|
else:
|
||||||
|
for dirname in CONFIG:
|
||||||
CONFIG[dirname] = environ.get('SECOP_%s' % dirname.upper(), CONFIG[dirname])
|
CONFIG[dirname] = environ.get('SECOP_%s' % dirname.upper(), CONFIG[dirname])
|
||||||
|
# this is not customizable
|
||||||
# this is not customizable
|
CONFIG['basedir'] = repodir
|
||||||
CONFIG['basedir'] = repodir
|
return CONFIG
|
||||||
|
|
||||||
# TODO: if ever more general options are need, we should think about a general config file
|
|
||||||
|
|
||||||
|
|
||||||
unset_value = object()
|
|
||||||
|
|
||||||
|
|
||||||
class lazy_property:
|
class lazy_property:
|
||||||
@ -253,10 +273,6 @@ def getfqdn(name=''):
|
|||||||
return socket.getfqdn(name)
|
return socket.getfqdn(name)
|
||||||
|
|
||||||
|
|
||||||
def getGeneralConfig():
|
|
||||||
return CONFIG
|
|
||||||
|
|
||||||
|
|
||||||
def formatStatusBits(sword, labels, start=0):
|
def formatStatusBits(sword, labels, start=0):
|
||||||
"""Return a list of labels according to bit state in `sword` starting
|
"""Return a list of labels according to bit state in `sword` starting
|
||||||
with bit `start` and the first label in `labels`.
|
with bit `start` and the first label in `labels`.
|
||||||
@ -266,3 +282,11 @@ def formatStatusBits(sword, labels, start=0):
|
|||||||
if sword & (1 << i) and lbl:
|
if sword & (1 << i) and lbl:
|
||||||
result.append(lbl)
|
result.append(lbl)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class UniqueObject:
|
||||||
|
def __init__(self, name):
|
||||||
|
self.name = name
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return 'UniqueObject(%r)' % self.name
|
||||||
|
Loading…
x
Reference in New Issue
Block a user