enhance logging
- bin/secop-server options -v and -q applied to console logger only - level for logfile taken from general config - option for automatic deletion of old logfiles - added 'comlog' level (between debug and info) This allows to run the servers by default with 'comlog' level on the logfiles, which helps a lot for analyzing very rare communication errors in retrospect. to avoid spamming of the normal log files, comlog data is stored separately, one file per communicator + redesign of remote logging (no more need of LoggerAdapter) Change-Id: Ie156a202b1e7304e50bbe830901bc75872f6ffe2 Reviewed-on: https://forge.frm2.tum.de/review/c/sine2020/secop/playground/+/27427 Tested-by: Jenkins Automated Tests <pedersen+jenkins@frm2.tum.de> Reviewed-by: Markus Zolliker <markus.zolliker@psi.ch>
This commit is contained in:
38
secop/io.py
38
secop/io.py
@@ -57,6 +57,7 @@ class HasIodev(Module):
|
||||
if not ioname:
|
||||
ioname = iodev or name + '_iodev'
|
||||
iodev = self.iodevClass(ioname, srv.log.getChild(ioname), opts, srv)
|
||||
iodev.callingModule = []
|
||||
srv.modules[ioname] = iodev
|
||||
self.iodevDict[self.uri] = ioname
|
||||
self.iodev = ioname
|
||||
@@ -71,8 +72,13 @@ class HasIodev(Module):
|
||||
pass
|
||||
super().initModule()
|
||||
|
||||
def sendRecv(self, command):
|
||||
return self._iodev.communicate(command)
|
||||
def communicate(self, *args):
|
||||
return self._iodev.communicate(*args)
|
||||
|
||||
def multicomm(self, *args):
|
||||
return self._iodev.multicomm(*args)
|
||||
|
||||
sendRecv = communicate # TODO: remove legacy stuff
|
||||
|
||||
|
||||
class IOBase(Communicator):
|
||||
@@ -154,6 +160,9 @@ class IOBase(Communicator):
|
||||
if removeme:
|
||||
self._reconnectCallbacks.pop(key)
|
||||
|
||||
def communicate(self, command):
|
||||
return NotImplementedError
|
||||
|
||||
|
||||
class StringIO(IOBase):
|
||||
"""line oriented communicator
|
||||
@@ -234,15 +243,15 @@ class StringIO(IOBase):
|
||||
if garbage is None: # read garbage only once
|
||||
garbage = self._conn.flush_recv()
|
||||
if garbage:
|
||||
self.log.debug('garbage: %r', garbage)
|
||||
self.comLog('garbage: %r', garbage)
|
||||
self._conn.send(cmd + self._eol_write)
|
||||
self.log.debug('send: %s', cmd + self._eol_write)
|
||||
self.comLog('> %s', cmd.decode(self.encoding))
|
||||
reply = self._conn.readline(self.timeout)
|
||||
except ConnectionClosed as e:
|
||||
self.closeConnection()
|
||||
raise CommunicationFailedError('disconnected') from None
|
||||
reply = reply.decode(self.encoding)
|
||||
self.log.debug('recv: %s', reply)
|
||||
self.comLog('< %s', reply)
|
||||
return reply
|
||||
except Exception as e:
|
||||
if str(e) == self._last_error:
|
||||
@@ -291,6 +300,10 @@ def make_bytes(string):
|
||||
return bytes([int(c, 16) if HEX_CODE.match(c) else ord(c) for c in string.split()])
|
||||
|
||||
|
||||
def hexify(bytes_):
|
||||
return ' '.join('%02x' % r for r in bytes_)
|
||||
|
||||
|
||||
class BytesIO(IOBase):
|
||||
identification = Property(
|
||||
"""identification
|
||||
@@ -330,14 +343,14 @@ class BytesIO(IOBase):
|
||||
time.sleep(self.wait_before)
|
||||
garbage = self._conn.flush_recv()
|
||||
if garbage:
|
||||
self.log.debug('garbage: %r', garbage)
|
||||
self.comLog('garbage: %r', garbage)
|
||||
self._conn.send(request)
|
||||
self.log.debug('send: %r', request)
|
||||
self.comLog('> %s', hexify(request))
|
||||
reply = self._conn.readbytes(replylen, self.timeout)
|
||||
except ConnectionClosed as e:
|
||||
self.closeConnection()
|
||||
raise CommunicationFailedError('disconnected') from None
|
||||
self.log.debug('recv: %r', reply)
|
||||
self.comLog('< %s', hexify(reply))
|
||||
return self.getFullReply(request, reply)
|
||||
except Exception as e:
|
||||
if str(e) == self._last_error:
|
||||
@@ -346,6 +359,15 @@ class BytesIO(IOBase):
|
||||
self.log.error(self._last_error)
|
||||
raise
|
||||
|
||||
@Command((ArrayOf(TupleOf(BLOBType(), IntRange(0)))), result=ArrayOf(BLOBType()))
|
||||
def multicomm(self, requests):
|
||||
"""communicate multiple request/replies in one row"""
|
||||
replies = []
|
||||
with self._lock:
|
||||
for request in requests:
|
||||
replies.append(self.communicate(*request))
|
||||
return replies
|
||||
|
||||
def readBytes(self, nbytes):
|
||||
"""read bytes
|
||||
|
||||
|
||||
@@ -94,10 +94,28 @@ class GeneralConfig:
|
||||
except KeyError:
|
||||
return default
|
||||
|
||||
def getint(self, key, default=None):
|
||||
try:
|
||||
return int(self.__getitem__(key))
|
||||
except KeyError:
|
||||
return default
|
||||
|
||||
def __getattr__(self, key):
|
||||
"""goodie: use generalConfig.<key> instead of generalConfig.get('<key>')"""
|
||||
return self.get(key)
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
if key == '_config':
|
||||
super().__setattr__(key, value)
|
||||
return
|
||||
if hasattr(type(self), key):
|
||||
raise AttributeError('can not set generalConfig.%s' % key)
|
||||
self._config[key] = value # for test purposes
|
||||
|
||||
@property
|
||||
def initialized(self):
|
||||
return bool(self._config)
|
||||
|
||||
|
||||
generalConfig = GeneralConfig()
|
||||
|
||||
|
||||
143
secop/logging.py
143
secop/logging.py
@@ -20,11 +20,20 @@
|
||||
# *****************************************************************************
|
||||
|
||||
|
||||
from logging import LoggerAdapter
|
||||
from mlzlog import LOGLEVELS
|
||||
import os
|
||||
from os.path import dirname, join
|
||||
from logging import DEBUG, INFO, addLevelName
|
||||
import mlzlog
|
||||
|
||||
from secop.lib import generalConfig
|
||||
from secop.datatypes import BoolType
|
||||
from secop.properties import Property
|
||||
|
||||
OFF = 99
|
||||
LOG_LEVELS = dict(LOGLEVELS, off=OFF)
|
||||
COMLOG = 15
|
||||
addLevelName(COMLOG, 'COMLOG')
|
||||
assert DEBUG < COMLOG < INFO
|
||||
LOG_LEVELS = dict(mlzlog.LOGLEVELS, off=OFF, comlog=COMLOG)
|
||||
LEVEL_NAMES = {v: k for k, v in LOG_LEVELS.items()}
|
||||
|
||||
|
||||
@@ -39,28 +48,120 @@ def check_level(level):
|
||||
raise ValueError('%r is not a valid level' % level)
|
||||
|
||||
|
||||
class Adapter(LoggerAdapter):
|
||||
def __init__(self, modobj):
|
||||
super().__init__(modobj.log, {})
|
||||
self.subscriptions = {} # dict [conn] of level
|
||||
self.modobj = modobj
|
||||
class RemoteLogHandler(mlzlog.Handler):
|
||||
"""handler for remote logging"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.subscriptions = {} # dict[modname] of tuple(mobobj, dict [conn] of level)
|
||||
|
||||
def log(self, level, msg, *args, **kwargs):
|
||||
super().log(level, msg, *args, **kwargs)
|
||||
for conn, lev in self.subscriptions.items():
|
||||
if level >= lev:
|
||||
self.modobj.DISPATCHER.send_log_msg(
|
||||
conn, self.modobj.name, LEVEL_NAMES[level], msg % args)
|
||||
def emit(self, record):
|
||||
"""unused"""
|
||||
|
||||
def set_log_level(self, conn, level):
|
||||
def handle(self, record):
|
||||
modname = record.name.split('.')[-1]
|
||||
try:
|
||||
modobj, subscriptions = self.subscriptions[modname]
|
||||
except KeyError:
|
||||
return
|
||||
for conn, lev in subscriptions.items():
|
||||
if record.levelno >= lev:
|
||||
modobj.DISPATCHER.send_log_msg(
|
||||
conn, modobj.name, LEVEL_NAMES[record.levelno],
|
||||
record.getMessage())
|
||||
|
||||
def set_conn_level(self, modobj, conn, level):
|
||||
level = check_level(level)
|
||||
modobj, subscriptions = self.subscriptions.setdefault(modobj.name, (modobj, {}))
|
||||
if level == OFF:
|
||||
self.subscriptions.pop(conn, None)
|
||||
subscriptions.pop(conn, None)
|
||||
else:
|
||||
self.subscriptions[conn] = level
|
||||
subscriptions[conn] = level
|
||||
|
||||
def __repr__(self):
|
||||
return 'RemoteLogHandler()'
|
||||
|
||||
|
||||
def set_log_level(modobj, conn, level):
|
||||
if not isinstance(modobj.log, Adapter):
|
||||
modobj.log = Adapter(modobj)
|
||||
modobj.log.set_log_level(conn, level)
|
||||
class LogfileHandler(mlzlog.LogfileHandler):
|
||||
|
||||
def __init__(self, logdir, rootname, max_days=0):
|
||||
self.logdir = logdir
|
||||
self.rootname = rootname
|
||||
self.max_days = max_days
|
||||
super().__init__(logdir, rootname)
|
||||
|
||||
def emit(self, record):
|
||||
if record.levelno != COMLOG:
|
||||
super().emit(record)
|
||||
|
||||
def doRollover(self):
|
||||
super().doRollover()
|
||||
if self.max_days:
|
||||
# keep only the last max_days files
|
||||
with os.scandir(dirname(self.baseFilename)) as it:
|
||||
files = sorted(entry.path for entry in it if entry.name != 'current')
|
||||
for filepath in files[-self.max_days:]:
|
||||
os.remove(filepath)
|
||||
|
||||
|
||||
class ComLogfileHandler(LogfileHandler):
|
||||
"""handler for logging communication"""
|
||||
|
||||
def format(self, record):
|
||||
return '%s %s' % (self.formatter.formatTime(record), record.getMessage())
|
||||
|
||||
|
||||
class HasComlog:
|
||||
"""mixin for modules with comlog"""
|
||||
comlog = Property('whether communication is logged ', BoolType(),
|
||||
default=True, export=False)
|
||||
_comLog = None
|
||||
|
||||
def earlyInit(self):
|
||||
super().earlyInit()
|
||||
if self.comlog and generalConfig.initialized and generalConfig.comlog:
|
||||
self._comLog = mlzlog.Logger('COMLOG.%s' % self.name)
|
||||
self._comLog.handlers[:] = []
|
||||
directory = join(logger.logdir, logger.rootname, 'comlog', self.DISPATCHER.name)
|
||||
self._comLog.addHandler(ComLogfileHandler(
|
||||
directory, self.name, max_days=generalConfig.getint('comlog_days', 7)))
|
||||
return
|
||||
|
||||
def comLog(self, msg, *args, **kwds):
|
||||
self.log.log(COMLOG, msg, *args, **kwds)
|
||||
if self._comLog:
|
||||
self._comLog.info(msg, *args)
|
||||
|
||||
|
||||
class MainLogger:
|
||||
def __init__(self):
|
||||
self.log = None
|
||||
self.logdir = None
|
||||
self.rootname = None
|
||||
self.console_handler = None
|
||||
|
||||
def init(self, console_level='info'):
|
||||
self.rootname = generalConfig.get('logger_root', 'frappy')
|
||||
# set log level to minimum on the logger, effective levels on the handlers
|
||||
# needed also for RemoteLogHandler
|
||||
# modified from mlzlog.initLogging
|
||||
mlzlog.setLoggerClass(mlzlog.MLZLogger)
|
||||
assert self.log is None
|
||||
self.log = mlzlog.log = mlzlog.MLZLogger(self.rootname)
|
||||
|
||||
self.log.setLevel(DEBUG)
|
||||
self.log.addHandler(mlzlog.ColoredConsoleHandler())
|
||||
|
||||
self.logdir = generalConfig.get('logdir', '/tmp/log')
|
||||
if self.logdir:
|
||||
logfile_days = generalConfig.getint('logfile_days')
|
||||
logfile_handler = LogfileHandler(self.logdir, self.rootname, max_days=logfile_days)
|
||||
if generalConfig.logfile_days:
|
||||
logfile_handler.max_days = int(generalConfig.logfile_days)
|
||||
logfile_handler.setLevel(LOG_LEVELS[generalConfig.get('logfile_level', 'info')])
|
||||
self.log.addHandler(logfile_handler)
|
||||
|
||||
self.log.addHandler(RemoteLogHandler())
|
||||
self.log.handlers[0].setLevel(LOG_LEVELS[console_level])
|
||||
|
||||
|
||||
logger = MainLogger()
|
||||
|
||||
@@ -36,6 +36,7 @@ from secop.lib.enum import Enum
|
||||
from secop.params import Accessible, Command, Parameter
|
||||
from secop.poller import BasicPoller, Poller
|
||||
from secop.properties import HasProperties, Property
|
||||
from secop.logging import RemoteLogHandler, HasComlog
|
||||
|
||||
Done = object() #: a special return value for a read/write function indicating that the setter is triggered already
|
||||
|
||||
@@ -124,20 +125,22 @@ class HasAccessibles(HasProperties):
|
||||
|
||||
def wrapped_rfunc(self, pname=pname, rfunc=rfunc):
|
||||
if rfunc:
|
||||
self.log.debug("calling %r" % rfunc)
|
||||
self.log.debug("call read_%s" % pname)
|
||||
try:
|
||||
value = rfunc(self)
|
||||
self.log.debug("rfunc(%s) returned %r" % (pname, value))
|
||||
if value is Done: # the setter is already triggered
|
||||
return getattr(self, pname)
|
||||
value = getattr(self, pname)
|
||||
self.log.debug("read_%s returned Done (%r)" % (pname, value))
|
||||
return value
|
||||
self.log.debug("read_%s returned %r" % (pname, value))
|
||||
except Exception as e:
|
||||
self.log.debug("rfunc(%s) failed %r" % (pname, e))
|
||||
self.log.debug("read_%s failed %r" % (pname, e))
|
||||
self.announceUpdate(pname, None, e)
|
||||
raise
|
||||
else:
|
||||
# return cached value
|
||||
self.log.debug("rfunc(%s): return cached value" % pname)
|
||||
value = self.accessibles[pname].value
|
||||
self.log.debug("return cached %s = %r" % (pname, value))
|
||||
setattr(self, pname, value) # important! trigger the setter
|
||||
return value
|
||||
|
||||
@@ -158,16 +161,18 @@ class HasAccessibles(HasProperties):
|
||||
if not wrapped:
|
||||
|
||||
def wrapped_wfunc(self, value, pname=pname, wfunc=wfunc):
|
||||
self.log.debug("check validity of %s = %r" % (pname, value))
|
||||
pobj = self.accessibles[pname]
|
||||
value = pobj.datatype(value)
|
||||
if wfunc:
|
||||
self.log.debug('calling %s %r(%r)' % (wfunc.__name__, wfunc, value))
|
||||
self.log.debug("check and call write_%s(%r)" % (pname, value))
|
||||
value = pobj.datatype(value)
|
||||
returned_value = wfunc(self, value)
|
||||
if returned_value is Done: # the setter is already triggered
|
||||
return getattr(self, pname)
|
||||
if returned_value is not None: # goodie: accept missing return value
|
||||
value = returned_value
|
||||
else:
|
||||
self.log.debug("check %s = %r" % (pname, value))
|
||||
value = pobj.datatype(value)
|
||||
setattr(self, pname, value)
|
||||
return value
|
||||
|
||||
@@ -266,6 +271,7 @@ class Module(HasAccessibles):
|
||||
self.earlyInitDone = False
|
||||
self.initModuleDone = False
|
||||
self.startModuleDone = False
|
||||
self.remoteLogHandler = None
|
||||
errors = []
|
||||
|
||||
# handle module properties
|
||||
@@ -585,6 +591,16 @@ class Module(HasAccessibles):
|
||||
if started_callback:
|
||||
started_callback()
|
||||
|
||||
def setRemoteLogging(self, conn, level):
|
||||
if self.remoteLogHandler is None:
|
||||
for handler in self.log.handlers:
|
||||
if isinstance(handler, RemoteLogHandler):
|
||||
self.remoteLogHandler = handler
|
||||
break
|
||||
else:
|
||||
raise ValueError('remote handler not found')
|
||||
self.remoteLogHandler.set_conn_level(self, conn, level)
|
||||
|
||||
|
||||
class Readable(Module):
|
||||
"""basic readable module"""
|
||||
@@ -700,7 +716,7 @@ class Drivable(Writable):
|
||||
"""cease driving, go to IDLE state"""
|
||||
|
||||
|
||||
class Communicator(Module):
|
||||
class Communicator(HasComlog, Module):
|
||||
"""basic abstract communication module"""
|
||||
|
||||
@Command(StringType(), result=StringType())
|
||||
|
||||
@@ -45,7 +45,6 @@ from time import time as currenttime
|
||||
from secop.errors import NoSuchCommandError, NoSuchModuleError, \
|
||||
NoSuchParameterError, ProtocolError, ReadOnlyError, SECoPServerError
|
||||
from secop.params import Parameter
|
||||
from secop.logging import set_log_level
|
||||
from secop.protocol.messages import COMMANDREPLY, DESCRIPTIONREPLY, \
|
||||
DISABLEEVENTSREPLY, ENABLEEVENTSREPLY, ERRORPREFIX, EVENTREPLY, \
|
||||
HEARTBEATREPLY, IDENTREPLY, IDENTREQUEST, READREPLY, WRITEREPLY, \
|
||||
@@ -84,6 +83,7 @@ class Dispatcher:
|
||||
# eventname is <modulename> or <modulename>:<parametername>
|
||||
self._subscriptions = {}
|
||||
self._lock = threading.RLock()
|
||||
self.name = name
|
||||
self.restart = srv.restart
|
||||
self.shutdown = srv.shutdown
|
||||
|
||||
@@ -378,16 +378,16 @@ class Dispatcher:
|
||||
|
||||
def send_log_msg(self, conn, modname, level, msg):
|
||||
"""send log message """
|
||||
if conn in self._connections:
|
||||
conn.send_reply((LOG_EVENT, '%s:%s' % (modname, level), msg))
|
||||
conn.send_reply((LOG_EVENT, '%s:%s' % (modname, level), msg))
|
||||
|
||||
def set_all_log_levels(self, conn, level):
|
||||
for modobj in self._modules.values():
|
||||
set_log_level(modobj, conn, level)
|
||||
modobj.setRemoteLogging(conn, level)
|
||||
|
||||
def handle_logging(self, conn, specifier, level):
|
||||
if specifier and specifier != '.':
|
||||
set_log_level(self._modules[specifier], conn, level)
|
||||
modobj = self._modules[specifier]
|
||||
modobj.setRemoteLogging(conn, level)
|
||||
else:
|
||||
self.set_all_log_levels(conn, level)
|
||||
return LOGGING_REPLY, specifier, level
|
||||
|
||||
Reference in New Issue
Block a user