From b8105091562786b6f3361a19aa3ba054177d837b Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Wed, 29 Oct 2025 12:07:02 +0100 Subject: [PATCH 01/35] POC for each interface type --- .clang-format | 245 ++++++++++++++++++++++++++++++ Makefile | 26 ++++ db/channels.db | 19 +++ scripts/ioc.sh | 9 ++ scripts/st.cmd | 16 ++ src/asynStreamGeneratorDriver.cpp | 213 ++++++++++++++++++++++++++ src/asynStreamGeneratorDriver.dbd | 1 + src/asynStreamGeneratorDriver.h | 26 ++++ 8 files changed, 555 insertions(+) create mode 100644 .clang-format create mode 100644 Makefile create mode 100644 db/channels.db create mode 100755 scripts/ioc.sh create mode 100755 scripts/st.cmd create mode 100644 src/asynStreamGeneratorDriver.cpp create mode 100644 src/asynStreamGeneratorDriver.dbd create mode 100644 src/asynStreamGeneratorDriver.h diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000..a92596f --- /dev/null +++ b/.clang-format @@ -0,0 +1,245 @@ +--- +Language: Cpp +# BasedOnStyle: LLVM +AccessModifierOffset: -2 +AlignAfterOpenBracket: Align +AlignArrayOfStructures: None +AlignConsecutiveAssignments: + Enabled: false + AcrossEmptyLines: false + AcrossComments: false + AlignCompound: false + AlignFunctionPointers: false + PadOperators: true +AlignConsecutiveBitFields: + Enabled: false + AcrossEmptyLines: false + AcrossComments: false + AlignCompound: false + AlignFunctionPointers: false + PadOperators: false +AlignConsecutiveDeclarations: + Enabled: false + AcrossEmptyLines: false + AcrossComments: false + AlignCompound: false + AlignFunctionPointers: false + PadOperators: false +AlignConsecutiveMacros: + Enabled: false + AcrossEmptyLines: false + AcrossComments: false + AlignCompound: false + AlignFunctionPointers: false + PadOperators: false +AlignConsecutiveShortCaseStatements: + Enabled: false + AcrossEmptyLines: false + AcrossComments: false + AlignCaseColons: false +AlignEscapedNewlines: Right +AlignOperands: Align +AlignTrailingComments: + Kind: Always + OverEmptyLines: 0 +AllowAllArgumentsOnNextLine: true +AllowAllParametersOfDeclarationOnNextLine: true +AllowBreakBeforeNoexceptSpecifier: Never +AllowShortBlocksOnASingleLine: Never +AllowShortCaseLabelsOnASingleLine: false +AllowShortCompoundRequirementOnASingleLine: true +AllowShortEnumsOnASingleLine: true +AllowShortFunctionsOnASingleLine: All +AllowShortIfStatementsOnASingleLine: Never +AllowShortLambdasOnASingleLine: All +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterDefinitionReturnType: None +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: false +AlwaysBreakTemplateDeclarations: MultiLine +AttributeMacros: + - __capability +BinPackArguments: true +BinPackParameters: true +BitFieldColonSpacing: Both +BraceWrapping: + AfterCaseLabel: false + AfterClass: false + AfterControlStatement: Never + AfterEnum: false + AfterExternBlock: false + AfterFunction: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + BeforeCatch: false + BeforeElse: false + BeforeLambdaBody: false + BeforeWhile: false + IndentBraces: false + SplitEmptyFunction: true + SplitEmptyRecord: true + SplitEmptyNamespace: true +BreakAdjacentStringLiterals: true +BreakAfterAttributes: Leave +BreakAfterJavaFieldAnnotations: false +BreakArrays: true +BreakBeforeBinaryOperators: None +BreakBeforeConceptDeclarations: Always +BreakBeforeBraces: Attach +BreakBeforeInlineASMColon: OnlyMultiline +BreakBeforeTernaryOperators: true +BreakConstructorInitializers: BeforeColon +BreakInheritanceList: BeforeColon +BreakStringLiterals: true +ColumnLimit: 80 +CommentPragmas: '^ IWYU pragma:' +CompactNamespaces: false +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DerivePointerAlignment: false +DisableFormat: false +EmptyLineAfterAccessModifier: Never +EmptyLineBeforeAccessModifier: LogicalBlock +ExperimentalAutoDetectBinPacking: false +FixNamespaceComments: true +ForEachMacros: + - foreach + - Q_FOREACH + - BOOST_FOREACH +IfMacros: + - KJ_IF_MAYBE +IncludeBlocks: Preserve +IncludeCategories: + - Regex: '^"(llvm|llvm-c|clang|clang-c)/' + Priority: 2 + SortPriority: 0 + CaseSensitive: false + - Regex: '^(<|"(gtest|gmock|isl|json)/)' + Priority: 3 + SortPriority: 0 + CaseSensitive: false + - Regex: '.*' + Priority: 1 + SortPriority: 0 + CaseSensitive: false +IncludeIsMainRegex: '(Test)?$' +IncludeIsMainSourceRegex: '' +IndentAccessModifiers: false +IndentCaseBlocks: false +IndentCaseLabels: false +IndentExternBlock: AfterExternBlock +IndentGotoLabels: true +IndentPPDirectives: None +IndentRequiresClause: true +IndentWidth: 4 +IndentWrappedFunctionNames: false +InsertBraces: false +InsertNewlineAtEOF: false +InsertTrailingCommas: None +IntegerLiteralSeparator: + Binary: 0 + BinaryMinDigits: 0 + Decimal: 0 + DecimalMinDigits: 0 + Hex: 0 + HexMinDigits: 0 +JavaScriptQuotes: Leave +JavaScriptWrapImports: true +KeepEmptyLinesAtTheStartOfBlocks: true +KeepEmptyLinesAtEOF: false +LambdaBodyIndentation: Signature +LineEnding: DeriveLF +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCBinPackProtocolList: Auto +ObjCBlockIndentWidth: 2 +ObjCBreakBeforeNestedBlockParam: true +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: true +PackConstructorInitializers: BinPack +PenaltyBreakAssignment: 2 +PenaltyBreakBeforeFirstCallParameter: 19 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakOpenParenthesis: 0 +PenaltyBreakScopeResolution: 500 +PenaltyBreakString: 1000 +PenaltyBreakTemplateDeclaration: 10 +PenaltyExcessCharacter: 1000000 +PenaltyIndentedWhitespace: 0 +PenaltyReturnTypeOnItsOwnLine: 60 +PointerAlignment: Right +PPIndentWidth: -1 +QualifierAlignment: Leave +ReferenceAlignment: Pointer +ReflowComments: true +RemoveBracesLLVM: false +RemoveParentheses: Leave +RemoveSemicolon: false +RequiresClausePosition: OwnLine +RequiresExpressionIndentation: OuterScope +SeparateDefinitionBlocks: Leave +ShortNamespaceLines: 1 +SkipMacroDefinitionBody: false +SortIncludes: CaseSensitive +SortJavaStaticImport: Before +SortUsingDeclarations: LexicographicNumeric +SpaceAfterCStyleCast: false +SpaceAfterLogicalNot: false +SpaceAfterTemplateKeyword: true +SpaceAroundPointerQualifiers: Default +SpaceBeforeAssignmentOperators: true +SpaceBeforeCaseColon: false +SpaceBeforeCpp11BracedList: false +SpaceBeforeCtorInitializerColon: true +SpaceBeforeInheritanceColon: true +SpaceBeforeJsonColon: false +SpaceBeforeParens: ControlStatements +SpaceBeforeParensOptions: + AfterControlStatements: true + AfterForeachMacros: true + AfterFunctionDefinitionName: false + AfterFunctionDeclarationName: false + AfterIfMacros: true + AfterOverloadedOperator: false + AfterPlacementOperator: true + AfterRequiresInClause: false + AfterRequiresInExpression: false + BeforeNonEmptyParentheses: false +SpaceBeforeRangeBasedForLoopColon: true +SpaceBeforeSquareBrackets: false +SpaceInEmptyBlock: false +SpacesBeforeTrailingComments: 1 +SpacesInAngles: Never +SpacesInContainerLiterals: true +SpacesInLineCommentPrefix: + Minimum: 1 + Maximum: -1 +SpacesInParens: Never +SpacesInParensOptions: + InCStyleCasts: false + InConditionalStatements: false + InEmptyParentheses: false + Other: false +SpacesInSquareBrackets: false +Standard: Latest +StatementAttributeLikeMacros: + - Q_EMIT +StatementMacros: + - Q_UNUSED + - QT_REQUIRE_VERSION +TabWidth: 8 +UseTab: Never +VerilogBreakBetweenInstancePorts: true +WhitespaceSensitiveMacros: + - BOOST_PP_STRINGIZE + - CF_SWIFT_NAME + - NS_SWIFT_NAME + - PP_STRINGIZE + - STRINGIZE +... diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..2bbce1b --- /dev/null +++ b/Makefile @@ -0,0 +1,26 @@ +# Include the external Makefile +include /ioc/tools/driver.makefile + +MODULE=StreamGenerator +BUILDCLASSES=Linux +EPICS_VERSIONS=7.0.7 +#ARCH_FILTER=RHEL% +ARCH_FILTER=linux-x86_64 + +# Additional module dependencies +REQUIRED+=asyn + +DBDS += src/asynStreamGeneratorDriver.dbd + +# DB files to include in the release +TEMPLATES += db/channels.db + +# These headers allow to depend on this library for derived drivers. +HEADERS += src/asynStreamGeneratorDriver.h + +# Source files to build +SOURCES += src/asynStreamGeneratorDriver.cpp + +USR_CFLAGS += -Wall -Wextra -Wunused-result -Werror -fvisibility=hidden # -Wpedantic // Does not work because EPICS macros trigger warnings + +LIB_SYS_LIBS += rdkafka diff --git a/db/channels.db b/db/channels.db new file mode 100644 index 0000000..19aed39 --- /dev/null +++ b/db/channels.db @@ -0,0 +1,19 @@ +# EPICS Database for streamdevice specific to measurement channels +# +# Macros +# INSTR - Prefix +# NAME - the device name, e.g. EL737 +# PORT - Stream Generator Port +# CHANNEL - the number associated with the measurment channel + +################################################################################ +# Read all monitors values + +record(longin, "$(INSTR)$(NAME):M$(CHANNEL)") +{ + field(DESC, "DAQ CH$(CHANNEL)") + field(EGU, "cts") + field(DTYP, "asynInt32") + field(INP, "@asyn($(PORT),0,$(TIMEOUT=1))COUNTS") + field(SCAN, "I/O Intr") +} diff --git a/scripts/ioc.sh b/scripts/ioc.sh new file mode 100755 index 0000000..7528295 --- /dev/null +++ b/scripts/ioc.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +export EPICS_HOST_ARCH=linux-x86_64 +export EPICS_BASE=/usr/local/epics/base-7.0.7 + +PARENT_PATH="$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P )" + +# /usr/local/bin/procServ -o -L - -f -i ^D^C 20001 "${PARENT_PATH}/st.cmd" -d +${PARENT_PATH}/st.cmd diff --git a/scripts/st.cmd b/scripts/st.cmd new file mode 100755 index 0000000..31dce8e --- /dev/null +++ b/scripts/st.cmd @@ -0,0 +1,16 @@ +#!/usr/local/bin/iocsh +#-d + +on error break + +require StreamGenerator, test + +epicsEnvSet("INSTR", "SQ:TEST:") +epicsEnvSet("NAME", "SG") + +drvAsynIPPortConfigure("ASYN_IP_PORT", "127.0.0.1:9071:9073 UDP", 0, 0, 0) +asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4) + +dbLoadRecords("$(StreamGenerator_DB)channels.db", "INSTR=$(INSTR), NAME=$(NAME), PORT=ASYN_SG, CHANNEL=0") + +iocInit() diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp new file mode 100644 index 0000000..0870399 --- /dev/null +++ b/src/asynStreamGeneratorDriver.cpp @@ -0,0 +1,213 @@ +#include "asynOctetSyncIO.h" +#include +#include +#include + +#include "asynStreamGeneratorDriver.h" +#include + +/* Wrapper to set config values and error out if needed. + */ +static void set_config(rd_kafka_conf_t *conf, char *key, char *value) { + char errstr[512]; + rd_kafka_conf_res_t res; + + res = rd_kafka_conf_set(conf, key, value, errstr, sizeof(errstr)); + if (res != RD_KAFKA_CONF_OK) { + // TODO + // g_error("Unable to set config: %s", errstr); + exit(1); + } +} + +static void udpPollerTask(void *drvPvt) { + asynStreamGeneratorDriver *pSGD = (asynStreamGeneratorDriver *)drvPvt; + pSGD->receiveUDP(); +} + +/** Constructor for the asynStreamGeneratorDriver class. + * Calls constructor for the asynPortDriver base class. + * \param[in] portName The name of the asyn port driver to be created. */ +asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, + const char *ipPortName, + const int numChannels) + : asynPortDriver(portName, 1, /* maxAddr */ + asynInt32Mask, /* Interface mask */ + asynInt32Mask, /* Interrupt mask */ + 0, /* asynFlags. This driver does not block and it is + not multi-device, but has a + destructor ASYN_DESTRUCTIBLE our version of the Asyn + is too old to support this flag */ + 1, /* Autoconnect */ + 0, /* Default priority */ + 0) /* Default stack size*/ +{ + + // Parameter Setup + createParam(P_CountsString, asynParamInt32, &P_Counts); + setIntegerParam(P_Counts, 0); + + // UDP Receive Setup + pasynOctetSyncIO->connect(ipPortName, 0, &pasynUDPUser, NULL); + + /* Create the thread that receives UDP traffic in the background */ + asynStatus status = + (asynStatus)(epicsThreadCreate( + "udp_receive", epicsThreadPriorityMedium, + epicsThreadGetStackSize(epicsThreadStackMedium), + (EPICSTHREADFUNC)::udpPollerTask, this) == NULL); + if (status) { + // printf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, + // functionName, status); + printf("%s:%s: epicsThreadCreate failure, status=%d\n", + "StreamGenerator", "init", status); + return; + } + + // Kafka Produce Setup + rd_kafka_conf_t *conf; + char errstr[512]; + + // Create client configuration + conf = rd_kafka_conf_new(); + set_config(conf, "bootstrap.servers", "linkafka01:9092"); + set_config(conf, "queue.buffering.max.messages", "1e7"); + + // Create the Producer instance. + rd_kafka_t *producer = + rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + if (!producer) { + // TODO + // g_error("Failed to create new producer: %s", errstr); + exit(1); + } + + char *msg = "asdf\n"; + + rd_kafka_resp_err_t err = + rd_kafka_producev(producer, RD_KAFKA_V_TOPIC("NEWEFU_TEST"), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + // RD_KAFKA_V_KEY((void *)key, key_len), + RD_KAFKA_V_VALUE((void *)msg, 6), + // RD_KAFKA_V_OPAQUE(NULL), + RD_KAFKA_V_END); + + if (err) { + // TODO + // g_error("Failed to produce to topic %s: %s", topic, + // rd_kafka_err2str(err)); + exit(1); + } + + epicsStdoutPrintf("Kafka Queue Size %d\n", rd_kafka_outq_len(producer)); + + rd_kafka_poll(producer, 0); + epicsStdoutPrintf("Kafka Queue Size %d\n", rd_kafka_outq_len(producer)); + rd_kafka_flush(producer, 10 * 1000); + epicsStdoutPrintf("Kafka Queue Size %d\n", rd_kafka_outq_len(producer)); +} + +asynStreamGeneratorDriver::~asynStreamGeneratorDriver() {} + +asynStatus asynStreamGeneratorDriver::readInt32(asynUser *pasynUser, + epicsInt32 *value) { + + int function = pasynUser->reason; + asynStatus status; + + if (function == P_Counts) { + int val; + status = getIntegerParam(P_Counts, &val); + *value = val; + return status; + } else { + return asynError; + } + return asynSuccess; +} + +void asynStreamGeneratorDriver::receiveUDP() { + asynStatus status; + int isConnected; + + char buffer[1500]; + size_t received; + int eomReason; + + int val; + + while (true) { + // epicsStdoutPrintf("polling!!"); + status = pasynManager->isConnected(pasynUDPUser, &isConnected); + if (status) { + asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, + "%s:%s: error calling pasynManager->isConnected, " + "status=%d, error=%s\n", + "StreamGenerator", "receiveUDP", status, + pasynUDPUser->errorMessage); + // driverName, functionName, status, + // pasynUserIPPort_->errorMessage); + } + asynPrint(pasynUserSelf, ASYN_TRACEIO_DRIVER, + "%s:%s: isConnected = %d\n", // + "StreamGenerator", "receiveUDP", isConnected); + + status = pasynOctetSyncIO->read(pasynUDPUser, buffer, 1500, + 1, // timeout + &received, &eomReason); + + // if (status) + // asynPrint( + // pasynUserSelf, ASYN_TRACE_ERROR, + // "%s:%s: error calling pasynOctetSyncIO->read, status=%d\n", + // "StreamGenerator", "receiveUDP", status); + + buffer[received] = 0; + + if (received) + asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, "%s:%s: received %s\n", + "StreamGenerator", "receiveUDP", buffer); + + lock(); + getIntegerParam(P_Counts, &val); + val += received > 0; + setIntegerParam(P_Counts, val); + callParamCallbacks(); + unlock(); + + epicsThreadSleep(0.001); // seconds + } +} + +/* Configuration routine. Called directly, or from the iocsh function below */ + +extern "C" { + +/** EPICS iocsh callable function to call constructor for the + * asynStreamGeneratorDriver class. \param[in] portName The name of the asyn + * port driver to be created. */ +asynStatus asynStreamGeneratorDriverConfigure(const char *portName, + const char *ipPortName, + const int numChannels) { + new asynStreamGeneratorDriver(portName, ipPortName, numChannels); + return asynSuccess; +} + +/* EPICS iocsh shell commands */ + +static const iocshArg initArg0 = {"portName", iocshArgString}; +static const iocshArg initArg1 = {"ipPortName", iocshArgString}; +static const iocshArg initArg2 = {"numChannels", iocshArgInt}; +static const iocshArg *const initArgs[] = {&initArg0, &initArg1, &initArg2}; +static const iocshFuncDef initFuncDef = {"asynStreamGenerator", 3, initArgs}; +static void initCallFunc(const iocshArgBuf *args) { + asynStreamGeneratorDriverConfigure(args[0].sval, args[1].sval, + args[2].ival); +} + +void asynStreamGeneratorDriverRegister(void) { + iocshRegister(&initFuncDef, initCallFunc); +} + +epicsExportRegistrar(asynStreamGeneratorDriverRegister); +} diff --git a/src/asynStreamGeneratorDriver.dbd b/src/asynStreamGeneratorDriver.dbd new file mode 100644 index 0000000..d0df127 --- /dev/null +++ b/src/asynStreamGeneratorDriver.dbd @@ -0,0 +1 @@ +registrar("asynStreamGeneratorDriverRegister") diff --git a/src/asynStreamGeneratorDriver.h b/src/asynStreamGeneratorDriver.h new file mode 100644 index 0000000..28bcab7 --- /dev/null +++ b/src/asynStreamGeneratorDriver.h @@ -0,0 +1,26 @@ +#ifndef asynStreamGeneratorDriver_H +#define asynStreamGeneratorDriver_H + +#include "asynPortDriver.h" + +/* These are the drvInfo strings that are used to identify the parameters. */ +#define P_CountsString "COUNTS" /* asynInt32, r/w */ + +class asynStreamGeneratorDriver : public asynPortDriver { + public: + asynStreamGeneratorDriver(const char *portName, const char *ipPortName, + const int numChannels); + virtual ~asynStreamGeneratorDriver(); + + virtual asynStatus readInt32(asynUser *pasynUser, epicsInt32 *value); + + void receiveUDP(); + + protected: + int P_Counts; + + private: + asynUser *pasynUDPUser; +}; + +#endif From c2ca5f699c6ee7a19ffd5465327901962f0a14d9 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Thu, 30 Oct 2025 11:51:16 +0100 Subject: [PATCH 02/35] progress with parsing and kafka --- db/channels.db | 3 +- scripts/st.cmd | 5 +- scripts/udp_gen.py | 74 ++++++++++++ src/asynStreamGeneratorDriver.cpp | 186 +++++++++++++++++++++++++----- src/asynStreamGeneratorDriver.h | 6 +- 5 files changed, 241 insertions(+), 33 deletions(-) create mode 100644 scripts/udp_gen.py diff --git a/db/channels.db b/db/channels.db index 19aed39..ef0175e 100644 --- a/db/channels.db +++ b/db/channels.db @@ -14,6 +14,7 @@ record(longin, "$(INSTR)$(NAME):M$(CHANNEL)") field(DESC, "DAQ CH$(CHANNEL)") field(EGU, "cts") field(DTYP, "asynInt32") - field(INP, "@asyn($(PORT),0,$(TIMEOUT=1))COUNTS") + field(INP, "@asyn($(PORT),0,$(TIMEOUT=1)) COUNTS$(CHANNEL)") field(SCAN, "I/O Intr") + field(PINI, "YES") } diff --git a/scripts/st.cmd b/scripts/st.cmd index 31dce8e..80ea698 100755 --- a/scripts/st.cmd +++ b/scripts/st.cmd @@ -8,9 +8,12 @@ require StreamGenerator, test epicsEnvSet("INSTR", "SQ:TEST:") epicsEnvSet("NAME", "SG") -drvAsynIPPortConfigure("ASYN_IP_PORT", "127.0.0.1:9071:9073 UDP", 0, 0, 0) +drvAsynIPPortConfigure("ASYN_IP_PORT", "127.0.0.1:9071:54321 UDP", 0, 0, 0) asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4) dbLoadRecords("$(StreamGenerator_DB)channels.db", "INSTR=$(INSTR), NAME=$(NAME), PORT=ASYN_SG, CHANNEL=0") +dbLoadRecords("$(StreamGenerator_DB)channels.db", "INSTR=$(INSTR), NAME=$(NAME), PORT=ASYN_SG, CHANNEL=1") +dbLoadRecords("$(StreamGenerator_DB)channels.db", "INSTR=$(INSTR), NAME=$(NAME), PORT=ASYN_SG, CHANNEL=2") +dbLoadRecords("$(StreamGenerator_DB)channels.db", "INSTR=$(INSTR), NAME=$(NAME), PORT=ASYN_SG, CHANNEL=3") iocInit() diff --git a/scripts/udp_gen.py b/scripts/udp_gen.py new file mode 100644 index 0000000..428e2ad --- /dev/null +++ b/scripts/udp_gen.py @@ -0,0 +1,74 @@ +import socket +import time +import random + +sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + +header = [ + 0, 0, # buffer length in 16bit words (1, 0) == 1, (0, 1) == 256 + 0, 0x80, # buffer type (probably should be 0) + 21, 0, # header length + 0, 0, # buffer number + 0, 0, # run id + 0x3, # status + 0, # id of sending module + 0, 0, # timestamp low + 0, 0, # timestamp mid + 0, 0, # timestamp high +] + [0, 0] * 12 # parameters + +data = [ + 0, + 0, + 0, + 0, + 0, + 0 +] + +start_time = time.time_ns() // 100 + +while True: + # update buffer number + header[6] = (header[6] + 1) % 0xff + header[7] = (header[7] + (header[6] == 0)) % 0xff + + # update timestamp + base_timestamp = time.time_ns() // 100 - start_time + t_low = base_timestamp & 0xffff + t_mid = (base_timestamp >> 16) & 0xffff + t_high = (base_timestamp >> 32) & 0xffff + header[12] = t_low & 0xff + header[13] = t_low >> 8 + header[14] = t_mid & 0xff + header[15] = t_mid >> 8 + header[16] = t_high & 0xff + header[17] = t_high >> 8 + + num_events = random.randint(0, 243) + + # update buffer length + buffer_length = 21 + num_events * 3 + header[0] = buffer_length & 0xff + header[1] = (buffer_length >> 8) & 0xff + + tosend = list(header) + + for i in range(num_events): + d = list(data) + + # set monitor + d[5] = (1 << 7) | random.randint(0,3) + + # update trigger timestamp + event_timestamp = (time.time_ns() // 100) - base_timestamp + d[0] = event_timestamp & 0xff + d[1] = (event_timestamp >> 8) & 0xff + d[2] = (event_timestamp >> 16) & 0x07 + + tosend += d + + sock.sendto(bytes(tosend), ('127.0.0.1', 54321)) + mv = memoryview(bytes(header)).cast('H') + print(f'Sent packet {mv[3]} with {num_events} events {base_timestamp}') + time.sleep(1) diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index 0870399..7987a94 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -1,8 +1,13 @@ #include "asynOctetSyncIO.h" +#include #include #include #include +// Just for printing +#define __STDC_FORMAT_MACROS +#include + #include "asynStreamGeneratorDriver.h" #include @@ -25,15 +30,57 @@ static void udpPollerTask(void *drvPvt) { pSGD->receiveUDP(); } +// UDP Packet Definitions +struct __attribute__((__packed__)) UDPHeader { + uint16_t BufferLength; + uint16_t BufferType; + uint16_t HeaderLength; + uint16_t BufferNumber; + uint16_t RunCmdID; + uint16_t Status : 8; + uint16_t McpdID : 8; + uint16_t TimeStamp[3]; + uint16_t Parameter0[3]; + uint16_t Parameter1[3]; + uint16_t Parameter2[3]; + uint16_t Parameter3[3]; + + inline uint64_t nanosecs() { + uint64_t nsec{((uint64_t)TimeStamp[2]) << 32 | + ((uint64_t)TimeStamp[1]) << 16 | (uint64_t)TimeStamp[0]}; + return nsec * 100; + } +}; + +struct __attribute__((__packed__)) DetectorEvent { + uint64_t TimeStamp : 19; + uint16_t XPosition : 10; + uint16_t YPosition : 10; + uint16_t Amplitude : 8; + uint16_t Id : 1; + inline uint32_t nanosecs() { return TimeStamp * 100; } +}; + +struct __attribute__((__packed__)) MonitorEvent { + uint64_t TimeStamp : 19; + uint64_t Data : 21; + uint64_t DataID : 4; + uint64_t TriggerID : 3; + uint64_t Id : 1; + inline uint32_t nanosecs() { return TimeStamp * 100; } +}; + /** Constructor for the asynStreamGeneratorDriver class. * Calls constructor for the asynPortDriver base class. * \param[in] portName The name of the asyn port driver to be created. */ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, const char *ipPortName, const int numChannels) - : asynPortDriver(portName, 1, /* maxAddr */ - asynInt32Mask, /* Interface mask */ - asynInt32Mask, /* Interrupt mask */ + : asynPortDriver(portName, 1, /* maxAddr */ + // 5, + asynInt32Mask | asynInt64Mask | + asynDrvUserMask, /* Interface mask */ + asynInt32Mask | asynInt64Mask, /* Interrupt mask */ 0, /* asynFlags. This driver does not block and it is not multi-device, but has a destructor ASYN_DESTRUCTIBLE our version of the Asyn @@ -42,20 +89,30 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, 0, /* Default priority */ 0) /* Default stack size*/ { + this->num_channels = numChannels; // Parameter Setup - createParam(P_CountsString, asynParamInt32, &P_Counts); - setIntegerParam(P_Counts, 0); + char pv_name_buffer[100]; + P_Counts = new int[numChannels]; + + asynStatus status; + + for (size_t i = 0; i < numChannels; ++i) { + memset(pv_name_buffer, 0, 100); + epicsSnprintf(pv_name_buffer, 100, P_CountsString, i); + status = createParam(pv_name_buffer, asynParamInt32, P_Counts + i); + setIntegerParam(P_Counts[i], 0); + printf("%s %d %d %d\n", pv_name_buffer, P_Counts[i], i, status); + } // UDP Receive Setup pasynOctetSyncIO->connect(ipPortName, 0, &pasynUDPUser, NULL); /* Create the thread that receives UDP traffic in the background */ - asynStatus status = - (asynStatus)(epicsThreadCreate( - "udp_receive", epicsThreadPriorityMedium, - epicsThreadGetStackSize(epicsThreadStackMedium), - (EPICSTHREADFUNC)::udpPollerTask, this) == NULL); + status = (asynStatus)(epicsThreadCreate( + "udp_receive", epicsThreadPriorityMedium, + epicsThreadGetStackSize(epicsThreadStackMedium), + (EPICSTHREADFUNC)::udpPollerTask, this) == NULL); if (status) { // printf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, // functionName, status); @@ -107,19 +164,32 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, epicsStdoutPrintf("Kafka Queue Size %d\n", rd_kafka_outq_len(producer)); } -asynStreamGeneratorDriver::~asynStreamGeneratorDriver() {} +asynStreamGeneratorDriver::~asynStreamGeneratorDriver() { delete[] P_Counts; } asynStatus asynStreamGeneratorDriver::readInt32(asynUser *pasynUser, epicsInt32 *value) { + // asynStatus asynStreamGeneratorDriver::readInt64(asynUser *pasynUser, + // epicsInt64 *value) { + const char *paramName; int function = pasynUser->reason; asynStatus status; - if (function == P_Counts) { - int val; - status = getIntegerParam(P_Counts, &val); - *value = val; - return status; + // TODO not freed + getParamName(function, ¶mName); + + bool is_p_counts = false; + for (size_t i = 0; i < num_channels; ++i) { + is_p_counts = is_p_counts | function == P_Counts[i]; + } + + if (is_p_counts) { + status = getIntegerParam(function, value); + + asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, "%s:%s: function %d %s %d\n", + "StreamGenerator", "readInt64", function, paramName, status); + // return status; + return asynSuccess; } else { return asynError; } @@ -134,9 +204,18 @@ void asynStreamGeneratorDriver::receiveUDP() { size_t received; int eomReason; - int val; + epicsInt32 val; + + // TODO epics doesn't seem to support uint64, you would need an array of + // uint32. It does support int64 though.. so we start with that + epicsInt32 *monitor_counts = new epicsInt32[this->num_channels]; while (true) { + // memset doesn't work with epicsInt32 + for (size_t i = 0; i < this->num_channels; ++i) { + monitor_counts[i] = 0; + } + // epicsStdoutPrintf("polling!!"); status = pasynManager->isConnected(pasynUDPUser, &isConnected); if (status) { @@ -153,7 +232,7 @@ void asynStreamGeneratorDriver::receiveUDP() { "StreamGenerator", "receiveUDP", isConnected); status = pasynOctetSyncIO->read(pasynUDPUser, buffer, 1500, - 1, // timeout + 0, // timeout &received, &eomReason); // if (status) @@ -162,20 +241,69 @@ void asynStreamGeneratorDriver::receiveUDP() { // "%s:%s: error calling pasynOctetSyncIO->read, status=%d\n", // "StreamGenerator", "receiveUDP", status); - buffer[received] = 0; + // buffer[received] = 0; - if (received) - asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, "%s:%s: received %s\n", - "StreamGenerator", "receiveUDP", buffer); + if (received) { + asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, "%s:%s: received %d\n", + "StreamGenerator", "receiveUDP", received); - lock(); - getIntegerParam(P_Counts, &val); - val += received > 0; - setIntegerParam(P_Counts, val); - callParamCallbacks(); - unlock(); + UDPHeader *header = (UDPHeader *)buffer; - epicsThreadSleep(0.001); // seconds + size_t total_events = (header->BufferLength - 21) / 3; + + // TODO lots of checks and validation missing everywhere here + if (received == total_events * 6 + 42) { + asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, + "%s:%s: received packet %d with %d events (%" PRIu64 + ")\n", + "StreamGenerator", "receiveUDP", header->BufferNumber, + total_events, header->nanosecs()); + + for (size_t i = 0; i < total_events; ++i) { + char *event = (buffer + 21 * 2 + i * 6); + + if (event[5] & 0x80) { // Monitor Event + MonitorEvent *m_event = (MonitorEvent *)event; + + // asynPrint( + // pasynUserSelf, ASYN_TRACE_ERROR, + // "%s:%s: event (%03d) on monitor %d (%" PRIu64 + // ")\n", "StreamGenerator", "receiveUDP", i, + // m_event->DataID, header->nanosecs() + + // (uint64_t)m_event->nanosecs()); + + monitor_counts[m_event->DataID] += 1; + } else { // Detector Event + DetectorEvent *d_event = (DetectorEvent *)event; + } + } + + for (size_t i = 0; i < num_channels; ++i) { + getIntegerParam(P_Counts[i], &val); + monitor_counts[i] += val; + } + + asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, + "%s:%s: monitor 0: (%d), monitor 1: (%d), monitor 2: " + "(%d), monitor 3: (%d)\n", + "StreamGenerator", "receiveUDP", monitor_counts[0], + monitor_counts[1], monitor_counts[2], + monitor_counts[3]); + + lock(); + for (size_t i = 0; i < num_channels; ++i) { + setIntegerParam(P_Counts[i], monitor_counts[i]); + } + callParamCallbacks(); + unlock(); + } else { + asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, + "%s:%s: invalid UDP packet\n", "StreamGenerator", + "receiveUDP"); + } + } + + epicsThreadSleep(1); // seconds } } diff --git a/src/asynStreamGeneratorDriver.h b/src/asynStreamGeneratorDriver.h index 28bcab7..780cde7 100644 --- a/src/asynStreamGeneratorDriver.h +++ b/src/asynStreamGeneratorDriver.h @@ -4,7 +4,7 @@ #include "asynPortDriver.h" /* These are the drvInfo strings that are used to identify the parameters. */ -#define P_CountsString "COUNTS" /* asynInt32, r/w */ +#define P_CountsString "COUNTS%d" /* asynInt32, r/w */ class asynStreamGeneratorDriver : public asynPortDriver { public: @@ -12,15 +12,17 @@ class asynStreamGeneratorDriver : public asynPortDriver { const int numChannels); virtual ~asynStreamGeneratorDriver(); + // virtual asynStatus readInt64(asynUser *pasynUser, epicsInt64 *value); virtual asynStatus readInt32(asynUser *pasynUser, epicsInt32 *value); void receiveUDP(); protected: - int P_Counts; + int *P_Counts; private: asynUser *pasynUDPUser; + int num_channels; }; #endif From 2d5a43c09af6670b7efcb9a068628b0602c0985f Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Thu, 30 Oct 2025 11:55:10 +0100 Subject: [PATCH 03/35] adds external dependency versions --- .gitmodules | 6 ++++++ dep/flatbuffers | 1 + dep/streaming-data-types | 1 + 3 files changed, 8 insertions(+) create mode 100644 .gitmodules create mode 160000 dep/flatbuffers create mode 160000 dep/streaming-data-types diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..84a08ad --- /dev/null +++ b/.gitmodules @@ -0,0 +1,6 @@ +[submodule "dep/streaming-data-types"] + path = dep/streaming-data-types + url = https://github.com/ess-dmsc/streaming-data-types.git +[submodule "dep/flatbuffers"] + path = dep/flatbuffers + url = https://github.com/google/flatbuffers.git diff --git a/dep/flatbuffers b/dep/flatbuffers new file mode 160000 index 0000000..1872409 --- /dev/null +++ b/dep/flatbuffers @@ -0,0 +1 @@ +Subproject commit 187240970746d00bbd26b0f5873ed54d2477f9f3 diff --git a/dep/streaming-data-types b/dep/streaming-data-types new file mode 160000 index 0000000..3b1830f --- /dev/null +++ b/dep/streaming-data-types @@ -0,0 +1 @@ +Subproject commit 3b1830faf268bda2175618162cb6a6ce25b0aa23 From 2d065a0db9bb49cb0bcd8c706fad07255cafd82f Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Thu, 30 Oct 2025 13:53:00 +0100 Subject: [PATCH 04/35] can send ess streaming data types flatbuffer messages via kafka --- .gitignore | 3 +++ Makefile | 4 +++- src/asynStreamGeneratorDriver.cpp | 25 ++++++++++++++++++------- 3 files changed, 24 insertions(+), 8 deletions(-) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..a472f69 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +O.* +.*ignore +schemas/ diff --git a/Makefile b/Makefile index 2bbce1b..6cd9a42 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,6 @@ DBDS += src/asynStreamGeneratorDriver.dbd # DB files to include in the release TEMPLATES += db/channels.db -# These headers allow to depend on this library for derived drivers. HEADERS += src/asynStreamGeneratorDriver.h # Source files to build @@ -23,4 +22,7 @@ SOURCES += src/asynStreamGeneratorDriver.cpp USR_CFLAGS += -Wall -Wextra -Wunused-result -Werror -fvisibility=hidden # -Wpedantic // Does not work because EPICS macros trigger warnings +# Required to support EV42/44 +USR_CXXFLAGS += -I../dep/flatbuffers/include/ -I../schemas + LIB_SYS_LIBS += rdkafka diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index 7987a94..a098ad0 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -1,4 +1,5 @@ #include "asynOctetSyncIO.h" +#include "ev42_events_generated.h" #include #include #include @@ -140,14 +141,24 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, } char *msg = "asdf\n"; + // EventMessageBuilder b; + // We could I believe reuse a buffer which might be more performant. + flatbuffers::FlatBufferBuilder builder(1024); + std::vector tof = {1, 2, 3}; + std::vector did = {0, 0, 0}; + auto message = + CreateEventMessageDirect(builder, "monitor1", 0, 0, &tof, &did); - rd_kafka_resp_err_t err = - rd_kafka_producev(producer, RD_KAFKA_V_TOPIC("NEWEFU_TEST"), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - // RD_KAFKA_V_KEY((void *)key, key_len), - RD_KAFKA_V_VALUE((void *)msg, 6), - // RD_KAFKA_V_OPAQUE(NULL), - RD_KAFKA_V_END); + builder.Finish(message, "ev42"); + printf("buffer size: %d\n", builder.GetSize()); + + rd_kafka_resp_err_t err = rd_kafka_producev( + producer, RD_KAFKA_V_TOPIC("NEWEFU_TEST"), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + // RD_KAFKA_V_KEY((void *)key, key_len), + RD_KAFKA_V_VALUE((void *)builder.GetBufferPointer(), builder.GetSize()), + // RD_KAFKA_V_OPAQUE(NULL), + RD_KAFKA_V_END); if (err) { // TODO From 09ba30025ad48d1f7a4e944cc34c4f5db30ab350 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Thu, 30 Oct 2025 14:02:42 +0100 Subject: [PATCH 05/35] adds information on how to build dependencies for the project --- README.md | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 0000000..ab512df --- /dev/null +++ b/README.md @@ -0,0 +1,35 @@ +# StreamGenerator + +## Dependencies + +Currently, this project requires a system install of librdkafka. On Redhat, +this means you should run: + +```bash +dnf install -y librdkafka-devel +``` + +Additionally, you must first build Google's *flatbuffers* and ESS's +**streaming-data-types** libraries, which are both included in this project as +submodules under the `dep` directory and which are both necessary to build this +project. + +First, you should enter the *flatbuffers* directory and run the following: + +```bash +cmake -G "Unix Makefiles" +make -j +``` + +After these steps, you will find the program `flatc` has been built and placed +in the directory. + +Next, you should return to the top of this project's directory tree, and create +the flatbuffers from ESS's schema files. This you can do as follows: + +```bash +./dep/flatbuffers/flatc -o schemas/ --cpp --gen-mutable --gen-name-strings --scoped-enums ./dep/streaming-data-types/schemas/* +``` + +This generates header files from each of ESS's schemas and places them in a +schemas directory. From 4c1741bd4b979525a422f17d9c0217c094b02db3 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Thu, 30 Oct 2025 15:07:21 +0100 Subject: [PATCH 06/35] very inefficient, but can receive udp monitor events and count them and send them as kafka events --- src/asynStreamGeneratorDriver.cpp | 201 ++++++++++++++++++++++-------- src/asynStreamGeneratorDriver.h | 11 ++ 2 files changed, 158 insertions(+), 54 deletions(-) diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index a098ad0..ecf0553 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -3,7 +3,6 @@ #include #include #include -#include // Just for printing #define __STDC_FORMAT_MACROS @@ -31,6 +30,11 @@ static void udpPollerTask(void *drvPvt) { pSGD->receiveUDP(); } +static void monitorProducerTask(void *drvPvt) { + asynStreamGeneratorDriver *pSGD = (asynStreamGeneratorDriver *)drvPvt; + pSGD->produceMonitor(); +} + // UDP Packet Definitions struct __attribute__((__packed__)) UDPHeader { uint16_t BufferLength; @@ -82,16 +86,14 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, asynInt32Mask | asynInt64Mask | asynDrvUserMask, /* Interface mask */ asynInt32Mask | asynInt64Mask, /* Interrupt mask */ - 0, /* asynFlags. This driver does not block and it is - not multi-device, but has a - destructor ASYN_DESTRUCTIBLE our version of the Asyn - is too old to support this flag */ - 1, /* Autoconnect */ - 0, /* Default priority */ - 0) /* Default stack size*/ -{ - this->num_channels = numChannels; - + 0, /* asynFlags. This driver does not block and it is + not multi-device, but has a + destructor ASYN_DESTRUCTIBLE our version of the Asyn + is too old to support this flag */ + 1, /* Autoconnect */ + 0, /* Default priority */ + 0), /* Default stack size*/ + num_channels(numChannels), monitorQueue(1000, false) { // Parameter Setup char pv_name_buffer[100]; P_Counts = new int[numChannels]; @@ -106,6 +108,36 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, printf("%s %d %d %d\n", pv_name_buffer, P_Counts[i], i, status); } + char errstr[512]; + + // Create client configuration + rd_kafka_conf_t *conf = rd_kafka_conf_new(); + set_config(conf, "bootstrap.servers", "linkafka01:9092"); + set_config(conf, "queue.buffering.max.messages", "1e7"); + + // Create the Producer instance. + this->monitorProducer = + rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + if (!this->monitorProducer) { + // TODO + // g_error("Failed to create new producer: %s", errstr); + exit(1); + } + + // Setup for Thread Producing Monitor Kafka Events + status = + (asynStatus)(epicsThreadCreate( + "monitor_produce", epicsThreadPriorityMedium, + epicsThreadGetStackSize(epicsThreadStackMedium), + (EPICSTHREADFUNC)::monitorProducerTask, this) == NULL); + if (status) { + // printf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, + // functionName, status); + printf("%s:%s: epicsThreadCreate failure, status=%d\n", + "StreamGenerator", "init", status); + return; + } + // UDP Receive Setup pasynOctetSyncIO->connect(ipPortName, 0, &pasynUDPUser, NULL); @@ -122,61 +154,64 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, return; } - // Kafka Produce Setup - rd_kafka_conf_t *conf; - char errstr[512]; + // // Kafka Produce Setup + // rd_kafka_conf_t *conf; + // char errstr[512]; - // Create client configuration - conf = rd_kafka_conf_new(); - set_config(conf, "bootstrap.servers", "linkafka01:9092"); - set_config(conf, "queue.buffering.max.messages", "1e7"); + // // Create client configuration + // conf = rd_kafka_conf_new(); + // set_config(conf, "bootstrap.servers", "linkafka01:9092"); + // set_config(conf, "queue.buffering.max.messages", "1e7"); - // Create the Producer instance. - rd_kafka_t *producer = - rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); - if (!producer) { - // TODO - // g_error("Failed to create new producer: %s", errstr); - exit(1); - } + // // Create the Producer instance. + // rd_kafka_t *producer = + // rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + // if (!producer) { + // // TODO + // // g_error("Failed to create new producer: %s", errstr); + // exit(1); + // } - char *msg = "asdf\n"; - // EventMessageBuilder b; - // We could I believe reuse a buffer which might be more performant. - flatbuffers::FlatBufferBuilder builder(1024); - std::vector tof = {1, 2, 3}; - std::vector did = {0, 0, 0}; - auto message = - CreateEventMessageDirect(builder, "monitor1", 0, 0, &tof, &did); + // char *msg = "asdf\n"; + // // EventMessageBuilder b; + // // We could I believe reuse a buffer which might be more performant. + // flatbuffers::FlatBufferBuilder builder(1024); + // // clear with build.Clear(); + // std::vector tof = {1, 2, 3}; + // std::vector did = {0, 0, 0}; + // auto message = + // CreateEventMessageDirect(builder, "monitor1", 0, 0, &tof, &did); - builder.Finish(message, "ev42"); - printf("buffer size: %d\n", builder.GetSize()); + // builder.Finish(message, "ev42"); + // printf("buffer size: %d\n", builder.GetSize()); - rd_kafka_resp_err_t err = rd_kafka_producev( - producer, RD_KAFKA_V_TOPIC("NEWEFU_TEST"), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - // RD_KAFKA_V_KEY((void *)key, key_len), - RD_KAFKA_V_VALUE((void *)builder.GetBufferPointer(), builder.GetSize()), - // RD_KAFKA_V_OPAQUE(NULL), - RD_KAFKA_V_END); + // rd_kafka_resp_err_t err = rd_kafka_producev( + // producer, RD_KAFKA_V_TOPIC("NEWEFU_TEST"), + // RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + // // RD_KAFKA_V_KEY((void *)key, key_len), + // RD_KAFKA_V_VALUE((void *)builder.GetBufferPointer(), + // builder.GetSize()), + // // RD_KAFKA_V_OPAQUE(NULL), + // RD_KAFKA_V_END); - if (err) { - // TODO - // g_error("Failed to produce to topic %s: %s", topic, - // rd_kafka_err2str(err)); - exit(1); - } + // if (err) { + // // TODO + // // g_error("Failed to produce to topic %s: %s", topic, + // // rd_kafka_err2str(err)); + // exit(1); + // } - epicsStdoutPrintf("Kafka Queue Size %d\n", rd_kafka_outq_len(producer)); + // epicsStdoutPrintf("Kafka Queue Size %d\n", rd_kafka_outq_len(producer)); - rd_kafka_poll(producer, 0); - epicsStdoutPrintf("Kafka Queue Size %d\n", rd_kafka_outq_len(producer)); - rd_kafka_flush(producer, 10 * 1000); - epicsStdoutPrintf("Kafka Queue Size %d\n", rd_kafka_outq_len(producer)); + // rd_kafka_poll(producer, 0); + // epicsStdoutPrintf("Kafka Queue Size %d\n", rd_kafka_outq_len(producer)); + // rd_kafka_flush(producer, 10 * 1000); + // epicsStdoutPrintf("Kafka Queue Size %d\n", rd_kafka_outq_len(producer)); } asynStreamGeneratorDriver::~asynStreamGeneratorDriver() { delete[] P_Counts; } +// TODO pretty sure I don't actually need to overwrite this asynStatus asynStreamGeneratorDriver::readInt32(asynUser *pasynUser, epicsInt32 *value) { // asynStatus asynStreamGeneratorDriver::readInt64(asynUser *pasynUser, @@ -207,6 +242,56 @@ asynStatus asynStreamGeneratorDriver::readInt32(asynUser *pasynUser, return asynSuccess; } +void asynStreamGeneratorDriver::produceMonitor() { + + flatbuffers::FlatBufferBuilder builder(1024); + + while (true) { + + if (!this->monitorQueue.isEmpty()) { + + builder.Clear(); + auto nme = this->monitorQueue.pop(); + + std::vector tof = {nme->TimeStamp}; + std::vector did = {nme->DataID}; + + auto message = + CreateEventMessageDirect(builder, "monitor", 0, 0, &tof, &did); + + builder.Finish(message, "ev42"); + // printf("buffer size: %d\n", builder.GetSize()); + + rd_kafka_resp_err_t err = rd_kafka_producev( + monitorProducer, RD_KAFKA_V_TOPIC("NEWEFU_TEST"), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + // RD_KAFKA_V_KEY((void *)key, key_len), + RD_KAFKA_V_VALUE((void *)builder.GetBufferPointer(), + builder.GetSize()), + // RD_KAFKA_V_OPAQUE(NULL), + RD_KAFKA_V_END); + + if (err) { + // TODO + // g_error("Failed to produce to topic %s: %s", topic, + // rd_kafka_err2str(err)); + } + + // epicsStdoutPrintf("Kafka Queue Size %d\n", + // rd_kafka_outq_len(monitorProducer)); + + rd_kafka_poll(monitorProducer, 0); + + printf("Monitor Events Queued %d\n", this->monitorQueue.getHighWaterMark()); + this->monitorQueue.resetHighWaterMark(); + + delete nme; + } + + epicsThreadSleep(0.001); // seconds + } +} + void asynStreamGeneratorDriver::receiveUDP() { asynStatus status; int isConnected; @@ -284,6 +369,14 @@ void asynStreamGeneratorDriver::receiveUDP() { // (uint64_t)m_event->nanosecs()); monitor_counts[m_event->DataID] += 1; + + // TODO needs to be freed + auto nme = new NormalisedMonitorEvent(); + nme->TimeStamp = + header->nanosecs() + (uint64_t)m_event->nanosecs(); + nme->DataID = m_event->DataID; + this->monitorQueue.push(nme); + } else { // Detector Event DetectorEvent *d_event = (DetectorEvent *)event; } diff --git a/src/asynStreamGeneratorDriver.h b/src/asynStreamGeneratorDriver.h index 780cde7..c1f00cd 100644 --- a/src/asynStreamGeneratorDriver.h +++ b/src/asynStreamGeneratorDriver.h @@ -2,6 +2,13 @@ #define asynStreamGeneratorDriver_H #include "asynPortDriver.h" +#include +#include + +struct __attribute__((__packed__)) NormalisedMonitorEvent { + uint64_t TimeStamp; + uint32_t DataID : 4; +}; /* These are the drvInfo strings that are used to identify the parameters. */ #define P_CountsString "COUNTS%d" /* asynInt32, r/w */ @@ -16,6 +23,7 @@ class asynStreamGeneratorDriver : public asynPortDriver { virtual asynStatus readInt32(asynUser *pasynUser, epicsInt32 *value); void receiveUDP(); + void produceMonitor(); protected: int *P_Counts; @@ -23,6 +31,9 @@ class asynStreamGeneratorDriver : public asynPortDriver { private: asynUser *pasynUDPUser; int num_channels; + + epicsRingPointer monitorQueue; + rd_kafka_t *monitorProducer; }; #endif From 750436732c62e8941f0b380d61ce9fd273a17d43 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Thu, 30 Oct 2025 16:48:33 +0100 Subject: [PATCH 07/35] can receive both monitor and detector udp events and send them to different kafka topics --- scripts/st.cmd | 1 + scripts/udp_gen.py | 51 +++- src/asynStreamGeneratorDriver.cpp | 397 +++++++++++++++++++----------- src/asynStreamGeneratorDriver.h | 13 +- 4 files changed, 306 insertions(+), 156 deletions(-) diff --git a/scripts/st.cmd b/scripts/st.cmd index 80ea698..5d8db12 100755 --- a/scripts/st.cmd +++ b/scripts/st.cmd @@ -15,5 +15,6 @@ dbLoadRecords("$(StreamGenerator_DB)channels.db", "INSTR=$(INSTR), NAME=$(NAME), dbLoadRecords("$(StreamGenerator_DB)channels.db", "INSTR=$(INSTR), NAME=$(NAME), PORT=ASYN_SG, CHANNEL=1") dbLoadRecords("$(StreamGenerator_DB)channels.db", "INSTR=$(INSTR), NAME=$(NAME), PORT=ASYN_SG, CHANNEL=2") dbLoadRecords("$(StreamGenerator_DB)channels.db", "INSTR=$(INSTR), NAME=$(NAME), PORT=ASYN_SG, CHANNEL=3") +dbLoadRecords("$(StreamGenerator_DB)channels.db", "INSTR=$(INSTR), NAME=$(NAME), PORT=ASYN_SG, CHANNEL=4") iocInit() diff --git a/scripts/udp_gen.py b/scripts/udp_gen.py index 428e2ad..fc51e17 100644 --- a/scripts/udp_gen.py +++ b/scripts/udp_gen.py @@ -54,19 +54,50 @@ while True: tosend = list(header) - for i in range(num_events): - d = list(data) + # I believe, that in our case we never mix monitor and detector events as + # the monitors should have id 0 and the detector events 1-9 so I have + # excluded that posibility here. That would, however, if true mean we could + # reduce also the number of checks on the parsing side of things... - # set monitor - d[5] = (1 << 7) | random.randint(0,3) + is_monitor = random.randint(0, 9) - # update trigger timestamp - event_timestamp = (time.time_ns() // 100) - base_timestamp - d[0] = event_timestamp & 0xff - d[1] = (event_timestamp >> 8) & 0xff - d[2] = (event_timestamp >> 16) & 0x07 + if is_monitor > 3: - tosend += d + for i in range(num_events): + d = list(data) + + monitor = random.randint(0,3) + + d[5] = (1 << 7) | monitor + + # update trigger timestamp + event_timestamp = (time.time_ns() // 100) - base_timestamp + d[0] = event_timestamp & 0xff + d[1] = (event_timestamp >> 8) & 0xff + d[2] = (event_timestamp >> 16) & 0x07 + + tosend += d + + else: + + for i in range(num_events): + d = list(data) + + amplitude = random.randint(0, 255) + x_pos = random.randint(0, 1023) + y_pos = random.randint(0, 1023) + event_timestamp = (time.time_ns() // 100) - base_timestamp + + d[5] = (0 << 7) | (amplitude >> 1) + d[4] = ((amplitude & 0x01) << 7) | (y_pos >> 3) + d[3] = ((y_pos << 5) & 0xE0) | (x_pos >> 5) + d[2] = ((x_pos << 3) & 0xF8) + + d[0] = event_timestamp & 0xff + d[1] = (event_timestamp >> 8) & 0xff + d[2] |= (event_timestamp >> 16) & 0x07 + + tosend += d sock.sendto(bytes(tosend), ('127.0.0.1', 54321)) mv = memoryview(bytes(header)).cast('H') diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index ecf0553..e8838cf 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -35,6 +35,11 @@ static void monitorProducerTask(void *drvPvt) { pSGD->produceMonitor(); } +static void detectorProducerTask(void *drvPvt) { + asynStreamGeneratorDriver *pSGD = (asynStreamGeneratorDriver *)drvPvt; + pSGD->produceDetector(); +} + // UDP Packet Definitions struct __attribute__((__packed__)) UDPHeader { uint16_t BufferLength; @@ -93,14 +98,15 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, 1, /* Autoconnect */ 0, /* Default priority */ 0), /* Default stack size*/ - num_channels(numChannels), monitorQueue(1000, false) { + num_channels(numChannels + 1), monitorQueue(1000, false), + detectorQueue(1000, false) { // Parameter Setup char pv_name_buffer[100]; - P_Counts = new int[numChannels]; + P_Counts = new int[this->num_channels]; asynStatus status; - for (size_t i = 0; i < numChannels; ++i) { + for (size_t i = 0; i < this->num_channels; ++i) { memset(pv_name_buffer, 0, 100); epicsSnprintf(pv_name_buffer, 100, P_CountsString, i); status = createParam(pv_name_buffer, asynParamInt32, P_Counts + i); @@ -115,7 +121,7 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, set_config(conf, "bootstrap.servers", "linkafka01:9092"); set_config(conf, "queue.buffering.max.messages", "1e7"); - // Create the Producer instance. + // Create the Monitor Producer instance. this->monitorProducer = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); if (!this->monitorProducer) { @@ -124,6 +130,19 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, exit(1); } + conf = rd_kafka_conf_new(); + set_config(conf, "bootstrap.servers", "linkafka01:9092"); + set_config(conf, "queue.buffering.max.messages", "1e7"); + + // Create the Detector Producer instance. + this->detectorProducer = + rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + if (!this->detectorProducer) { + // TODO + // g_error("Failed to create new producer: %s", errstr); + exit(1); + } + // Setup for Thread Producing Monitor Kafka Events status = (asynStatus)(epicsThreadCreate( @@ -138,6 +157,20 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, return; } + // Setup for Thread Producing Detector Kafka Events + status = (asynStatus)(epicsThreadCreate( + "monitor_produce", epicsThreadPriorityMedium, + epicsThreadGetStackSize(epicsThreadStackMedium), + (EPICSTHREADFUNC)::detectorProducerTask, + this) == NULL); + if (status) { + // printf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, + // functionName, status); + printf("%s:%s: epicsThreadCreate failure, status=%d\n", + "StreamGenerator", "init", status); + return; + } + // UDP Receive Setup pasynOctetSyncIO->connect(ipPortName, 0, &pasynUDPUser, NULL); @@ -154,143 +187,51 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, return; } - // // Kafka Produce Setup - // rd_kafka_conf_t *conf; - // char errstr[512]; - - // // Create client configuration - // conf = rd_kafka_conf_new(); - // set_config(conf, "bootstrap.servers", "linkafka01:9092"); - // set_config(conf, "queue.buffering.max.messages", "1e7"); - - // // Create the Producer instance. - // rd_kafka_t *producer = - // rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); - // if (!producer) { - // // TODO - // // g_error("Failed to create new producer: %s", errstr); - // exit(1); - // } - - // char *msg = "asdf\n"; - // // EventMessageBuilder b; - // // We could I believe reuse a buffer which might be more performant. - // flatbuffers::FlatBufferBuilder builder(1024); - // // clear with build.Clear(); - // std::vector tof = {1, 2, 3}; - // std::vector did = {0, 0, 0}; - // auto message = - // CreateEventMessageDirect(builder, "monitor1", 0, 0, &tof, &did); - - // builder.Finish(message, "ev42"); - // printf("buffer size: %d\n", builder.GetSize()); - - // rd_kafka_resp_err_t err = rd_kafka_producev( - // producer, RD_KAFKA_V_TOPIC("NEWEFU_TEST"), - // RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - // // RD_KAFKA_V_KEY((void *)key, key_len), - // RD_KAFKA_V_VALUE((void *)builder.GetBufferPointer(), - // builder.GetSize()), - // // RD_KAFKA_V_OPAQUE(NULL), - // RD_KAFKA_V_END); - - // if (err) { - // // TODO - // // g_error("Failed to produce to topic %s: %s", topic, - // // rd_kafka_err2str(err)); - // exit(1); - // } - - // epicsStdoutPrintf("Kafka Queue Size %d\n", rd_kafka_outq_len(producer)); - + // TODO add exit should perhaps ensure the queue is flushed // rd_kafka_poll(producer, 0); // epicsStdoutPrintf("Kafka Queue Size %d\n", rd_kafka_outq_len(producer)); // rd_kafka_flush(producer, 10 * 1000); // epicsStdoutPrintf("Kafka Queue Size %d\n", rd_kafka_outq_len(producer)); } -asynStreamGeneratorDriver::~asynStreamGeneratorDriver() { delete[] P_Counts; } - -// TODO pretty sure I don't actually need to overwrite this -asynStatus asynStreamGeneratorDriver::readInt32(asynUser *pasynUser, - epicsInt32 *value) { - // asynStatus asynStreamGeneratorDriver::readInt64(asynUser *pasynUser, - // epicsInt64 *value) { - - const char *paramName; - int function = pasynUser->reason; - asynStatus status; - - // TODO not freed - getParamName(function, ¶mName); - - bool is_p_counts = false; - for (size_t i = 0; i < num_channels; ++i) { - is_p_counts = is_p_counts | function == P_Counts[i]; - } - - if (is_p_counts) { - status = getIntegerParam(function, value); - - asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, "%s:%s: function %d %s %d\n", - "StreamGenerator", "readInt64", function, paramName, status); - // return status; - return asynSuccess; - } else { - return asynError; - } - return asynSuccess; +asynStreamGeneratorDriver::~asynStreamGeneratorDriver() { + // should make sure queues are empty and freed + // and that the kafka producers are flushed and freed + delete[] P_Counts; } -void asynStreamGeneratorDriver::produceMonitor() { - - flatbuffers::FlatBufferBuilder builder(1024); - - while (true) { - - if (!this->monitorQueue.isEmpty()) { - - builder.Clear(); - auto nme = this->monitorQueue.pop(); - - std::vector tof = {nme->TimeStamp}; - std::vector did = {nme->DataID}; - - auto message = - CreateEventMessageDirect(builder, "monitor", 0, 0, &tof, &did); - - builder.Finish(message, "ev42"); - // printf("buffer size: %d\n", builder.GetSize()); - - rd_kafka_resp_err_t err = rd_kafka_producev( - monitorProducer, RD_KAFKA_V_TOPIC("NEWEFU_TEST"), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - // RD_KAFKA_V_KEY((void *)key, key_len), - RD_KAFKA_V_VALUE((void *)builder.GetBufferPointer(), - builder.GetSize()), - // RD_KAFKA_V_OPAQUE(NULL), - RD_KAFKA_V_END); - - if (err) { - // TODO - // g_error("Failed to produce to topic %s: %s", topic, - // rd_kafka_err2str(err)); - } - - // epicsStdoutPrintf("Kafka Queue Size %d\n", - // rd_kafka_outq_len(monitorProducer)); - - rd_kafka_poll(monitorProducer, 0); - - printf("Monitor Events Queued %d\n", this->monitorQueue.getHighWaterMark()); - this->monitorQueue.resetHighWaterMark(); - - delete nme; - } - - epicsThreadSleep(0.001); // seconds - } -} +// // TODO pretty sure I don't actually need to overwrite this +// asynStatus asynStreamGeneratorDriver::readInt32(asynUser *pasynUser, +// epicsInt32 *value) { +// // asynStatus asynStreamGeneratorDriver::readInt64(asynUser *pasynUser, +// // epicsInt64 *value) { +// +// const char *paramName; +// int function = pasynUser->reason; +// asynStatus status; +// +// // TODO not freed +// getParamName(function, ¶mName); +// +// bool is_p_counts = false; +// for (size_t i = 0; i < num_channels; ++i) { +// is_p_counts = is_p_counts | function == P_Counts[i]; +// } +// +// if (is_p_counts) { +// status = getIntegerParam(function, value); +// +// asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, "%s:%s: function %d %s +// %d\n", +// "StreamGenerator", "readInt64", function, paramName, +// status); +// // return status; +// return asynSuccess; +// } else { +// return asynError; +// } +// return asynSuccess; +// } void asynStreamGeneratorDriver::receiveUDP() { asynStatus status; @@ -302,14 +243,17 @@ void asynStreamGeneratorDriver::receiveUDP() { epicsInt32 val; + const uint32_t x_pixels = 128; + const uint32_t y_pixels = 128; + // TODO epics doesn't seem to support uint64, you would need an array of // uint32. It does support int64 though.. so we start with that - epicsInt32 *monitor_counts = new epicsInt32[this->num_channels]; + epicsInt32 *counts = new epicsInt32[this->num_channels]; while (true) { // memset doesn't work with epicsInt32 for (size_t i = 0; i < this->num_channels; ++i) { - monitor_counts[i] = 0; + counts[i] = 0; } // epicsStdoutPrintf("polling!!"); @@ -368,9 +312,9 @@ void asynStreamGeneratorDriver::receiveUDP() { // m_event->DataID, header->nanosecs() + // (uint64_t)m_event->nanosecs()); - monitor_counts[m_event->DataID] += 1; + counts[m_event->DataID + 1] += 1; - // TODO needs to be freed + // needs to be freed!!! auto nme = new NormalisedMonitorEvent(); nme->TimeStamp = header->nanosecs() + (uint64_t)m_event->nanosecs(); @@ -379,24 +323,34 @@ void asynStreamGeneratorDriver::receiveUDP() { } else { // Detector Event DetectorEvent *d_event = (DetectorEvent *)event; + counts[0] += 1; + + // needs to be freed!!! + auto nde = new NormalisedDetectorEvent(); + nde->TimeStamp = + header->nanosecs() + (uint64_t)d_event->nanosecs(); + nde->PixID = + (header->McpdID - 1) * x_pixels * y_pixels + + x_pixels * (uint32_t)d_event->XPosition + + (uint32_t)d_event->YPosition; + this->detectorQueue.push(nde); } } for (size_t i = 0; i < num_channels; ++i) { getIntegerParam(P_Counts[i], &val); - monitor_counts[i] += val; + counts[i] += val; } asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, - "%s:%s: monitor 0: (%d), monitor 1: (%d), monitor 2: " - "(%d), monitor 3: (%d)\n", - "StreamGenerator", "receiveUDP", monitor_counts[0], - monitor_counts[1], monitor_counts[2], - monitor_counts[3]); + "%s:%s: det: (%d), mon0: (%d), mon1: (%d), mon2: " + "(%d), mon3: (%d)\n", + "StreamGenerator", "receiveUDP", counts[0], counts[1], + counts[2], counts[3], counts[4]); lock(); for (size_t i = 0; i < num_channels; ++i) { - setIntegerParam(P_Counts[i], monitor_counts[i]); + setIntegerParam(P_Counts[i], counts[i]); } callParamCallbacks(); unlock(); @@ -411,6 +365,161 @@ void asynStreamGeneratorDriver::receiveUDP() { } } +void asynStreamGeneratorDriver::produceMonitor() { + + flatbuffers::FlatBufferBuilder builder(1024); + + std::vector tof; + tof.reserve(9000); + + std::vector did; + did.reserve(9000); + + int total = 0; + epicsTimeStamp last_sent = epicsTime::getCurrent(); + + uint64_t message_id = 0; + + while (true) { + + if (!this->monitorQueue.isEmpty()) { + + ++total; + auto nme = this->monitorQueue.pop(); + tof.push_back(nme->TimeStamp); + did.push_back(nme->DataID); + delete nme; + + } else { + epicsThreadSleep(0.001); // seconds + } + + epicsTimeStamp now = epicsTime::getCurrent(); + + // At least every 0.2 seconds + if (total >= 8192 || + epicsTimeDiffInNS(&now, &last_sent) > 200'000'000ll) { + last_sent = epicsTime::getCurrent(); + + if (total) { + total = 0; + + builder.Clear(); + + auto message = CreateEventMessageDirect( + builder, "monitor", message_id++, 0, &tof, &did); + + builder.Finish(message, "ev42"); + // printf("buffer size: %d\n", builder.GetSize()); + + rd_kafka_resp_err_t err = rd_kafka_producev( + monitorProducer, RD_KAFKA_V_TOPIC("NEWEFU_TEST"), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + // RD_KAFKA_V_KEY((void *)key, key_len), + RD_KAFKA_V_VALUE((void *)builder.GetBufferPointer(), + builder.GetSize()), + // RD_KAFKA_V_OPAQUE(NULL), + RD_KAFKA_V_END); + + if (err) { + // TODO + // g_error("Failed to produce to topic %s: %s", topic, + // rd_kafka_err2str(err)); + } + + // epicsStdoutPrintf("Kafka Queue Size %d\n", + // rd_kafka_outq_len(monitorProducer)); + + rd_kafka_poll(monitorProducer, 0); + + printf("Monitor Events Queued before sending %d\n", + this->monitorQueue.getHighWaterMark()); + this->monitorQueue.resetHighWaterMark(); + + tof.clear(); + did.clear(); + } + } + } +} + +void asynStreamGeneratorDriver::produceDetector() { + + flatbuffers::FlatBufferBuilder builder(1024); + + std::vector tof; + tof.reserve(9000); + + std::vector did; + did.reserve(9000); + + int total = 0; + epicsTimeStamp last_sent = epicsTime::getCurrent(); + + uint64_t message_id = 0; + + while (true) { + + if (!this->detectorQueue.isEmpty()) { + + ++total; + auto nde = this->detectorQueue.pop(); + tof.push_back(nde->TimeStamp); + did.push_back(nde->PixID); + delete nde; + } else { + epicsThreadSleep(0.001); // seconds + } + + epicsTimeStamp now = epicsTime::getCurrent(); + + // At least every 0.2 seconds + if (total >= 8192 || + epicsTimeDiffInNS(&now, &last_sent) > 200'000'000ll) { + last_sent = epicsTime::getCurrent(); + + if (total) { + total = 0; + + builder.Clear(); + + auto message = CreateEventMessageDirect( + builder, "detector", message_id++, 0, &tof, &did); + + builder.Finish(message, "ev42"); + // printf("buffer size: %d\n", builder.GetSize()); + + rd_kafka_resp_err_t err = rd_kafka_producev( + detectorProducer, RD_KAFKA_V_TOPIC("NEWEFU_TEST2"), + RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), + // RD_KAFKA_V_KEY((void *)key, key_len), + RD_KAFKA_V_VALUE((void *)builder.GetBufferPointer(), + builder.GetSize()), + // RD_KAFKA_V_OPAQUE(NULL), + RD_KAFKA_V_END); + + if (err) { + // TODO + // g_error("Failed to produce to topic %s: %s", topic, + // rd_kafka_err2str(err)); + } + + // epicsStdoutPrintf("Kafka Queue Size %d\n", + // rd_kafka_outq_len(monitorProducer)); + + rd_kafka_poll(detectorProducer, 0); + + printf("Detector Events Queued before sending %d\n", + this->detectorQueue.getHighWaterMark()); + this->detectorQueue.resetHighWaterMark(); + + tof.clear(); + did.clear(); + } + } + } +} + /* Configuration routine. Called directly, or from the iocsh function below */ extern "C" { diff --git a/src/asynStreamGeneratorDriver.h b/src/asynStreamGeneratorDriver.h index c1f00cd..6ee1c6b 100644 --- a/src/asynStreamGeneratorDriver.h +++ b/src/asynStreamGeneratorDriver.h @@ -7,7 +7,12 @@ struct __attribute__((__packed__)) NormalisedMonitorEvent { uint64_t TimeStamp; - uint32_t DataID : 4; + uint8_t DataID : 4; +}; + +struct __attribute__((__packed__)) NormalisedDetectorEvent { + uint64_t TimeStamp; + uint32_t PixID; }; /* These are the drvInfo strings that are used to identify the parameters. */ @@ -20,10 +25,11 @@ class asynStreamGeneratorDriver : public asynPortDriver { virtual ~asynStreamGeneratorDriver(); // virtual asynStatus readInt64(asynUser *pasynUser, epicsInt64 *value); - virtual asynStatus readInt32(asynUser *pasynUser, epicsInt32 *value); + // virtual asynStatus readInt32(asynUser *pasynUser, epicsInt32 *value); void receiveUDP(); void produceMonitor(); + void produceDetector(); protected: int *P_Counts; @@ -34,6 +40,9 @@ class asynStreamGeneratorDriver : public asynPortDriver { epicsRingPointer monitorQueue; rd_kafka_t *monitorProducer; + + epicsRingPointer detectorQueue; + rd_kafka_t *detectorProducer; }; #endif From d7bf3977fc8ed34ca23444c5df36b0d076b34f75 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Fri, 31 Oct 2025 10:16:50 +0100 Subject: [PATCH 08/35] reorganises and cleans up some parts of the code --- Makefile | 2 +- scripts/udp_gen.py | 5 +- src/asynStreamGeneratorDriver.cpp | 275 ++++++++++++------------------ src/asynStreamGeneratorDriver.h | 64 ++++++- 4 files changed, 170 insertions(+), 176 deletions(-) diff --git a/Makefile b/Makefile index 6cd9a42..f17d207 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ DBDS += src/asynStreamGeneratorDriver.dbd # DB files to include in the release TEMPLATES += db/channels.db -HEADERS += src/asynStreamGeneratorDriver.h +# HEADERS += src/asynStreamGeneratorDriver.h # Source files to build SOURCES += src/asynStreamGeneratorDriver.cpp diff --git a/scripts/udp_gen.py b/scripts/udp_gen.py index fc51e17..fcd19fd 100644 --- a/scripts/udp_gen.py +++ b/scripts/udp_gen.py @@ -45,7 +45,8 @@ while True: header[16] = t_high & 0xff header[17] = t_high >> 8 - num_events = random.randint(0, 243) + # num_events = random.randint(0, 243) + num_events = 243 # update buffer length buffer_length = 21 + num_events * 3 @@ -102,4 +103,4 @@ while True: sock.sendto(bytes(tosend), ('127.0.0.1', 54321)) mv = memoryview(bytes(header)).cast('H') print(f'Sent packet {mv[3]} with {num_events} events {base_timestamp}') - time.sleep(1) + # time.sleep(0.0005) diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index e8838cf..c933a75 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -11,20 +11,46 @@ #include "asynStreamGeneratorDriver.h" #include -/* Wrapper to set config values and error out if needed. +/******************************************************************************* + * Kafka Methods */ -static void set_config(rd_kafka_conf_t *conf, char *key, char *value) { + +static void set_kafka_config_key(rd_kafka_conf_t *conf, char *key, + char *value) { char errstr[512]; rd_kafka_conf_res_t res; res = rd_kafka_conf_set(conf, key, value, errstr, sizeof(errstr)); if (res != RD_KAFKA_CONF_OK) { - // TODO - // g_error("Unable to set config: %s", errstr); + epicsStdoutPrintf("Failed to set config value %s : %s\n", key, value); exit(1); } } +static rd_kafka_t *create_kafka_producer() { + + char errstr[512]; + rd_kafka_t *producer; + + // Prepare configuration object + rd_kafka_conf_t *conf = rd_kafka_conf_new(); + set_kafka_config_key(conf, "bootstrap.servers", "linkafka01:9092"); + set_kafka_config_key(conf, "queue.buffering.max.messages", "1e7"); + + // Create the Producer + producer = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); + + if (!producer) { + epicsStdoutPrintf("Failed to create Kafka Producer: %s\n", errstr); + exit(1); + } + + return producer; +} + +/******************************************************************************* + * Static Methods Passed to Epics Threads that should run in the background + */ static void udpPollerTask(void *drvPvt) { asynStreamGeneratorDriver *pSGD = (asynStreamGeneratorDriver *)drvPvt; pSGD->receiveUDP(); @@ -40,54 +66,13 @@ static void detectorProducerTask(void *drvPvt) { pSGD->produceDetector(); } -// UDP Packet Definitions -struct __attribute__((__packed__)) UDPHeader { - uint16_t BufferLength; - uint16_t BufferType; - uint16_t HeaderLength; - uint16_t BufferNumber; - uint16_t RunCmdID; - uint16_t Status : 8; - uint16_t McpdID : 8; - uint16_t TimeStamp[3]; - uint16_t Parameter0[3]; - uint16_t Parameter1[3]; - uint16_t Parameter2[3]; - uint16_t Parameter3[3]; - - inline uint64_t nanosecs() { - uint64_t nsec{((uint64_t)TimeStamp[2]) << 32 | - ((uint64_t)TimeStamp[1]) << 16 | (uint64_t)TimeStamp[0]}; - return nsec * 100; - } -}; - -struct __attribute__((__packed__)) DetectorEvent { - uint64_t TimeStamp : 19; - uint16_t XPosition : 10; - uint16_t YPosition : 10; - uint16_t Amplitude : 8; - uint16_t Id : 1; - inline uint32_t nanosecs() { return TimeStamp * 100; } -}; - -struct __attribute__((__packed__)) MonitorEvent { - uint64_t TimeStamp : 19; - uint64_t Data : 21; - uint64_t DataID : 4; - uint64_t TriggerID : 3; - uint64_t Id : 1; - inline uint32_t nanosecs() { return TimeStamp * 100; } -}; - -/** Constructor for the asynStreamGeneratorDriver class. - * Calls constructor for the asynPortDriver base class. - * \param[in] portName The name of the asyn port driver to be created. */ +/******************************************************************************* + * Stream Generator Methods + */ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, const char *ipPortName, const int numChannels) : asynPortDriver(portName, 1, /* maxAddr */ - // 5, asynInt32Mask | asynInt64Mask | asynDrvUserMask, /* Interface mask */ asynInt32Mask | asynInt64Mask, /* Interrupt mask */ @@ -100,48 +85,23 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, 0), /* Default stack size*/ num_channels(numChannels + 1), monitorQueue(1000, false), detectorQueue(1000, false) { + const char *functionName = "asynStreamGeneratorDriver"; // Parameter Setup char pv_name_buffer[100]; P_Counts = new int[this->num_channels]; asynStatus status; + // Create PVs templated on Channel Number for (size_t i = 0; i < this->num_channels; ++i) { memset(pv_name_buffer, 0, 100); epicsSnprintf(pv_name_buffer, 100, P_CountsString, i); status = createParam(pv_name_buffer, asynParamInt32, P_Counts + i); setIntegerParam(P_Counts[i], 0); - printf("%s %d %d %d\n", pv_name_buffer, P_Counts[i], i, status); } - char errstr[512]; - - // Create client configuration - rd_kafka_conf_t *conf = rd_kafka_conf_new(); - set_config(conf, "bootstrap.servers", "linkafka01:9092"); - set_config(conf, "queue.buffering.max.messages", "1e7"); - - // Create the Monitor Producer instance. - this->monitorProducer = - rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); - if (!this->monitorProducer) { - // TODO - // g_error("Failed to create new producer: %s", errstr); - exit(1); - } - - conf = rd_kafka_conf_new(); - set_config(conf, "bootstrap.servers", "linkafka01:9092"); - set_config(conf, "queue.buffering.max.messages", "1e7"); - - // Create the Detector Producer instance. - this->detectorProducer = - rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); - if (!this->detectorProducer) { - // TODO - // g_error("Failed to create new producer: %s", errstr); - exit(1); - } + this->monitorProducer = create_kafka_producer(); + this->detectorProducer = create_kafka_producer(); // Setup for Thread Producing Monitor Kafka Events status = @@ -150,11 +110,9 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, epicsThreadGetStackSize(epicsThreadStackMedium), (EPICSTHREADFUNC)::monitorProducerTask, this) == NULL); if (status) { - // printf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, - // functionName, status); - printf("%s:%s: epicsThreadCreate failure, status=%d\n", - "StreamGenerator", "init", status); - return; + printf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, + functionName, status); + exit(1); } // Setup for Thread Producing Detector Kafka Events @@ -164,15 +122,19 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, (EPICSTHREADFUNC)::detectorProducerTask, this) == NULL); if (status) { - // printf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, - // functionName, status); - printf("%s:%s: epicsThreadCreate failure, status=%d\n", - "StreamGenerator", "init", status); - return; + printf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, + functionName, status); + exit(1); } // UDP Receive Setup - pasynOctetSyncIO->connect(ipPortName, 0, &pasynUDPUser, NULL); + status = pasynOctetSyncIO->connect(ipPortName, 0, &pasynUDPUser, NULL); + + if (status) { + printf("%s:%s: Couldn't open connection %s, status=%d\n", driverName, + functionName, ipPortName, status); + exit(1); + } /* Create the thread that receives UDP traffic in the background */ status = (asynStatus)(epicsThreadCreate( @@ -180,12 +142,16 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, epicsThreadGetStackSize(epicsThreadStackMedium), (EPICSTHREADFUNC)::udpPollerTask, this) == NULL); if (status) { - // printf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, - // functionName, status); - printf("%s:%s: epicsThreadCreate failure, status=%d\n", - "StreamGenerator", "init", status); - return; + printf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, + functionName, status); + exit(1); } +} + +asynStreamGeneratorDriver::~asynStreamGeneratorDriver() { + // should make sure queues are empty and freed + // and that the kafka producers are flushed and freed + delete[] P_Counts; // TODO add exit should perhaps ensure the queue is flushed // rd_kafka_poll(producer, 0); @@ -194,50 +160,12 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, // epicsStdoutPrintf("Kafka Queue Size %d\n", rd_kafka_outq_len(producer)); } -asynStreamGeneratorDriver::~asynStreamGeneratorDriver() { - // should make sure queues are empty and freed - // and that the kafka producers are flushed and freed - delete[] P_Counts; -} - -// // TODO pretty sure I don't actually need to overwrite this -// asynStatus asynStreamGeneratorDriver::readInt32(asynUser *pasynUser, -// epicsInt32 *value) { -// // asynStatus asynStreamGeneratorDriver::readInt64(asynUser *pasynUser, -// // epicsInt64 *value) { -// -// const char *paramName; -// int function = pasynUser->reason; -// asynStatus status; -// -// // TODO not freed -// getParamName(function, ¶mName); -// -// bool is_p_counts = false; -// for (size_t i = 0; i < num_channels; ++i) { -// is_p_counts = is_p_counts | function == P_Counts[i]; -// } -// -// if (is_p_counts) { -// status = getIntegerParam(function, value); -// -// asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, "%s:%s: function %d %s -// %d\n", -// "StreamGenerator", "readInt64", function, paramName, -// status); -// // return status; -// return asynSuccess; -// } else { -// return asynError; -// } -// return asynSuccess; -// } - void asynStreamGeneratorDriver::receiveUDP() { asynStatus status; int isConnected; - char buffer[1500]; + const size_t buffer_size = 1500; + char buffer[buffer_size]; size_t received; int eomReason; @@ -256,22 +184,21 @@ void asynStreamGeneratorDriver::receiveUDP() { counts[i] = 0; } - // epicsStdoutPrintf("polling!!"); status = pasynManager->isConnected(pasynUDPUser, &isConnected); if (status) { asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, "%s:%s: error calling pasynManager->isConnected, " "status=%d, error=%s\n", - "StreamGenerator", "receiveUDP", status, + driverName, "receiveUDP", status, pasynUDPUser->errorMessage); // driverName, functionName, status, // pasynUserIPPort_->errorMessage); } asynPrint(pasynUserSelf, ASYN_TRACEIO_DRIVER, "%s:%s: isConnected = %d\n", // - "StreamGenerator", "receiveUDP", isConnected); + driverName, "receiveUDP", isConnected); - status = pasynOctetSyncIO->read(pasynUDPUser, buffer, 1500, + status = pasynOctetSyncIO->read(pasynUDPUser, buffer, buffer_size, 0, // timeout &received, &eomReason); @@ -279,13 +206,19 @@ void asynStreamGeneratorDriver::receiveUDP() { // asynPrint( // pasynUserSelf, ASYN_TRACE_ERROR, // "%s:%s: error calling pasynOctetSyncIO->read, status=%d\n", - // "StreamGenerator", "receiveUDP", status); + // driverName, "receiveUDP", status); // buffer[received] = 0; if (received) { - asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, "%s:%s: received %d\n", - "StreamGenerator", "receiveUDP", received); + // asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, "%s:%s: received %f %d + // received\n", + // driverName, "receiveUDP", (double) received / + // 1500., received); + + // asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, "%s:%s: received + // %d\n", + // driverName, "receiveUDP", received); UDPHeader *header = (UDPHeader *)buffer; @@ -293,11 +226,13 @@ void asynStreamGeneratorDriver::receiveUDP() { // TODO lots of checks and validation missing everywhere here if (received == total_events * 6 + 42) { - asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, - "%s:%s: received packet %d with %d events (%" PRIu64 - ")\n", - "StreamGenerator", "receiveUDP", header->BufferNumber, - total_events, header->nanosecs()); + // asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, + // "%s:%s: received packet %d with %d events (%" + // PRIu64 + // ")\n", + // driverName, "receiveUDP", + // header->BufferNumber, total_events, + // header->nanosecs()); for (size_t i = 0; i < total_events; ++i) { char *event = (buffer + 21 * 2 + i * 6); @@ -308,7 +243,7 @@ void asynStreamGeneratorDriver::receiveUDP() { // asynPrint( // pasynUserSelf, ASYN_TRACE_ERROR, // "%s:%s: event (%03d) on monitor %d (%" PRIu64 - // ")\n", "StreamGenerator", "receiveUDP", i, + // ")\n", driverName, "receiveUDP", i, // m_event->DataID, header->nanosecs() + // (uint64_t)m_event->nanosecs()); @@ -342,11 +277,11 @@ void asynStreamGeneratorDriver::receiveUDP() { counts[i] += val; } - asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, - "%s:%s: det: (%d), mon0: (%d), mon1: (%d), mon2: " - "(%d), mon3: (%d)\n", - "StreamGenerator", "receiveUDP", counts[0], counts[1], - counts[2], counts[3], counts[4]); + // asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, + // "%s:%s: det: (%d), mon0: (%d), mon1: (%d), mon2: " + // "(%d), mon3: (%d)\n", + // driverName, "receiveUDP", counts[0], + // counts[1], counts[2], counts[3], counts[4]); lock(); for (size_t i = 0; i < num_channels; ++i) { @@ -356,12 +291,12 @@ void asynStreamGeneratorDriver::receiveUDP() { unlock(); } else { asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, - "%s:%s: invalid UDP packet\n", "StreamGenerator", + "%s:%s: invalid UDP packet\n", driverName, "receiveUDP"); } } - epicsThreadSleep(1); // seconds + // epicsThreadSleep(1); // seconds } } @@ -394,6 +329,8 @@ void asynStreamGeneratorDriver::produceMonitor() { epicsThreadSleep(0.001); // seconds } + // TODO can probably just replace the current + // instead of always getting new object epicsTimeStamp now = epicsTime::getCurrent(); // At least every 0.2 seconds @@ -407,7 +344,10 @@ void asynStreamGeneratorDriver::produceMonitor() { builder.Clear(); auto message = CreateEventMessageDirect( - builder, "monitor", message_id++, 0, &tof, &did); + builder, "monitor", message_id++, + ((uint64_t)now.secPastEpoch) * 1'000'000'000ull + + ((uint64_t)now.nsec), + &tof, &did); builder.Finish(message, "ev42"); // printf("buffer size: %d\n", builder.GetSize()); @@ -432,9 +372,9 @@ void asynStreamGeneratorDriver::produceMonitor() { rd_kafka_poll(monitorProducer, 0); - printf("Monitor Events Queued before sending %d\n", - this->monitorQueue.getHighWaterMark()); - this->monitorQueue.resetHighWaterMark(); + // printf("Monitor Events Queued before sending %d\n", + // this->monitorQueue.getHighWaterMark()); + // this->monitorQueue.resetHighWaterMark(); tof.clear(); did.clear(); @@ -484,7 +424,10 @@ void asynStreamGeneratorDriver::produceDetector() { builder.Clear(); auto message = CreateEventMessageDirect( - builder, "detector", message_id++, 0, &tof, &did); + builder, "detector", message_id++, + ((uint64_t)now.secPastEpoch) * 1'000'000'000ull + + ((uint64_t)now.nsec), + &tof, &did); builder.Finish(message, "ev42"); // printf("buffer size: %d\n", builder.GetSize()); @@ -509,9 +452,9 @@ void asynStreamGeneratorDriver::produceDetector() { rd_kafka_poll(detectorProducer, 0); - printf("Detector Events Queued before sending %d\n", - this->detectorQueue.getHighWaterMark()); - this->detectorQueue.resetHighWaterMark(); + // printf("Detector Events Queued before sending %d\n", + // this->detectorQueue.getHighWaterMark()); + // this->detectorQueue.resetHighWaterMark(); tof.clear(); did.clear(); @@ -520,13 +463,11 @@ void asynStreamGeneratorDriver::produceDetector() { } } -/* Configuration routine. Called directly, or from the iocsh function below */ - +/******************************************************************************* + * Methods exposed to IOC Shell + */ extern "C" { -/** EPICS iocsh callable function to call constructor for the - * asynStreamGeneratorDriver class. \param[in] portName The name of the asyn - * port driver to be created. */ asynStatus asynStreamGeneratorDriverConfigure(const char *portName, const char *ipPortName, const int numChannels) { @@ -534,8 +475,6 @@ asynStatus asynStreamGeneratorDriverConfigure(const char *portName, return asynSuccess; } -/* EPICS iocsh shell commands */ - static const iocshArg initArg0 = {"portName", iocshArgString}; static const iocshArg initArg1 = {"ipPortName", iocshArgString}; static const iocshArg initArg2 = {"numChannels", iocshArgInt}; diff --git a/src/asynStreamGeneratorDriver.h b/src/asynStreamGeneratorDriver.h index 6ee1c6b..5aaf664 100644 --- a/src/asynStreamGeneratorDriver.h +++ b/src/asynStreamGeneratorDriver.h @@ -5,6 +5,51 @@ #include #include +/******************************************************************************* + * UDP Packet Definitions + */ +struct __attribute__((__packed__)) UDPHeader { + uint16_t BufferLength; + uint16_t BufferType; + uint16_t HeaderLength; + uint16_t BufferNumber; + uint16_t RunCmdID; + uint16_t Status : 8; + uint16_t McpdID : 8; + uint16_t TimeStamp[3]; + uint16_t Parameter0[3]; + uint16_t Parameter1[3]; + uint16_t Parameter2[3]; + uint16_t Parameter3[3]; + + inline uint64_t nanosecs() { + uint64_t nsec{((uint64_t)TimeStamp[2]) << 32 | + ((uint64_t)TimeStamp[1]) << 16 | (uint64_t)TimeStamp[0]}; + return nsec * 100; + } +}; + +struct __attribute__((__packed__)) DetectorEvent { + uint64_t TimeStamp : 19; + uint16_t XPosition : 10; + uint16_t YPosition : 10; + uint16_t Amplitude : 8; + uint16_t Id : 1; + inline uint32_t nanosecs() { return TimeStamp * 100; } +}; + +struct __attribute__((__packed__)) MonitorEvent { + uint64_t TimeStamp : 19; + uint64_t Data : 21; + uint64_t DataID : 4; + uint64_t TriggerID : 3; + uint64_t Id : 1; + inline uint32_t nanosecs() { return TimeStamp * 100; } +}; + +/******************************************************************************* + * Simplified Event Struct Definition + */ struct __attribute__((__packed__)) NormalisedMonitorEvent { uint64_t TimeStamp; uint8_t DataID : 4; @@ -15,27 +60,34 @@ struct __attribute__((__packed__)) NormalisedDetectorEvent { uint32_t PixID; }; -/* These are the drvInfo strings that are used to identify the parameters. */ -#define P_CountsString "COUNTS%d" /* asynInt32, r/w */ +/******************************************************************************* + * Parameters for use in DB records + * + * i.e.e drvInfo strings that are used to identify the parameters + */ +#define P_CountsString "COUNTS%d" + +/******************************************************************************* + * Stream Generator Coordinating Class + */ class asynStreamGeneratorDriver : public asynPortDriver { public: asynStreamGeneratorDriver(const char *portName, const char *ipPortName, const int numChannels); virtual ~asynStreamGeneratorDriver(); - // virtual asynStatus readInt64(asynUser *pasynUser, epicsInt64 *value); - // virtual asynStatus readInt32(asynUser *pasynUser, epicsInt32 *value); - void receiveUDP(); void produceMonitor(); void produceDetector(); protected: + // Parameter Identifying IDs int *P_Counts; private: asynUser *pasynUDPUser; + int num_channels; epicsRingPointer monitorQueue; @@ -43,6 +95,8 @@ class asynStreamGeneratorDriver : public asynPortDriver { epicsRingPointer detectorQueue; rd_kafka_t *detectorProducer; + + constexpr static char *driverName = "StreamGenerator"; }; #endif From b9e5f40c2130358728fe7183b1c9d9377ca654f2 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Fri, 31 Oct 2025 10:18:57 +0100 Subject: [PATCH 09/35] removes old python variant --- requirements.txt | 4 - udp_rate.py | 342 ----------------------------------------------- 2 files changed, 346 deletions(-) delete mode 100644 requirements.txt delete mode 100644 udp_rate.py diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 9dfa0e4..0000000 --- a/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -confluent-kafka==2.12.1 -ess-streaming-data-types==0.27.0 -flatbuffers==25.9.23 -numpy==1.26.3 diff --git a/udp_rate.py b/udp_rate.py deleted file mode 100644 index 9e6471f..0000000 --- a/udp_rate.py +++ /dev/null @@ -1,342 +0,0 @@ -import queue -import socket -import time -import threading -from uuid import uuid4 -import math - -from confluent_kafka import Producer -import streaming_data_types - -# receiving directly (can also specify correlation unit ip) -UDP_IP = "" -UDP_PORT = 54321 - -# If redirecting traffic via -# socat -U - udp4-recv:54321 | tee >( socat -u - udp4-datagram:127.0.0.1:54322 ) | socat -u - udp4-datagram:127.0.0.1:54323 -# UDP_IP = "127.0.0.1" -# UDP_PORT = 54323 - -WINDOWSECONDS = 5 -WINDOWSIZE = 20000 * WINDOWSECONDS -MONITORS = 4 # We have max 4 monitors - -time_offset = None # Estimate of clock offset - -time_window = { - i: queue.Queue(maxsize=WINDOWSIZE) - for i in range(MONITORS) -} - -# event_time_window = queue.Queue(maxsize=50000 * WINDOWSECONDS) -EVENT_WINDOWSIZE = 50000 -EVENT_WINDOW_PTR = 0 -event_time_window = [0 for i in range(EVENT_WINDOWSIZE)] - -event_average_rate = 0 -event_last_timestamp = None - -MISSED_PACKETS = -9 # All modules appear to miss the first time due to initialisation as 0 - -# missed_packets_time_window = queue.Queue(maxsize=100) - -def print_monitor_rates(): - while True: - for i in range(MONITORS): - msg = f"Monitor {i+1}: {time_window[i].qsize() / WINDOWSECONDS} cts/s" - try: - earliest = time_window[i].queue[0] - newest = max(time_window[i].queue) - t = time.time() - msg += f', buffer range: {round((newest - earliest) * 1e-7, 3)} s, oldest: {round(time.time() - ((time_offset + earliest) * 1e-7), 3)} s, newest: {round(time.time() - ((time_offset + newest) * 1e-7), 3)} s' - except: - pass - - print(msg) - - # try: - # print(f'Events: {1 / event_average_rate} cts/s') - # except: - # pass - - try: - print(f'Events: {round(1 / (sum(event_time_window) / EVENT_WINDOWSIZE * 1e-7), 2)} cts/s') - except: - pass - - print(f'Missed Packets: {MISSED_PACKETS}') - - # Detector Events - # msg = f"Events : {event_time_window.qsize() / WINDOWSECONDS} cts/s" - # try: - # earliest = event_time_window.queue[0] - # newest = max(event_time_window.queue) - # t = time.time() - # msg += f', buffer range: {round((newest - earliest) * 1e-7, 3)} s, oldest: {round(time.time() - ((time_offset + earliest) * 1e-7), 3)} s, newest: {round(time.time() - ((time_offset + newest) * 1e-7), 3)} s' - # except: - # pass - - # print(msg) - - time.sleep(1) - -threading.Thread(target=print_monitor_rates, daemon=True).start() - -def clean_monitor_rates(): - latest = 0 - while True: - for d_id in range(MONITORS): - t_w = time_window[d_id] - if not t_w.empty(): - # TODO probably should switch to a priority queue - # as the messages might not be in order - # TODO could also just replace with a low-pass filter - # would be a lot more efficient - # TODO the way this is done, we need trigger events - # in order for the signal to decay back to 0. - # If no events come, the rate remains stuck - latest = max(latest, max(t_w.queue)) - # latest = time_window[1].queue[-1] - try: - while t_w.queue[0] < (latest - WINDOWSECONDS * 1e7): - t_w.get_nowait() - except IndexError: - pass - time.sleep(0.01) - -threading.Thread(target=clean_monitor_rates, daemon=True).start() - - -# def clean_event_rates(): -# latest = 0 -# while True: -# t_w = event_time_window -# if not t_w.empty(): -# # TODO probably should switch to a priority queue -# # as the messages might not be in order -# # TODO could also just replace with a low-pass filter -# # would be a lot more efficient -# # TODO the way this is done, we need trigger events -# # in order for the signal to decay back to 0. -# # If no events come, the rate remains stuck -# #latest = max(latest, max(t_w.queue)) -# try: -# latest = time_window[1].queue[-1] -# while t_w.queue[0] < (latest - WINDOWSECONDS * 1e7): -# t_w.get_nowait() -# except IndexError: -# pass -# time.sleep(0.005) -# -# threading.Thread(target=clean_event_rates, daemon=True).start() - - - - -# Event Kafka Producer - -event_queue = queue.Queue() - -def event_producer(): - producer_config = { - 'bootstrap.servers': "linkafka01:9092", - 'queue.buffering.max.messages': 1e7, - } - prod = Producer(producer_config) - - st = time.time() - - msg_id = 0 - - b_size = 10000 - b_ptr = 0 - pixel_buffer = [0 for _ in range(b_size)] - time_buffer = [0 for _ in range(b_size)] - poll_cnt = 0 - - while True: - (p_id, timestamp) = event_queue.get() - - pixel_buffer[b_ptr] = p_id - time_buffer[b_ptr] = timestamp - b_ptr += 1 - - nt = time.time() - if b_ptr == b_size or nt - st > 0.001: - st = nt - - if b_ptr > 0: - message = streaming_data_types.serialise_ev42( - message_id = msg_id, - pulse_time = time_buffer[0] * 100, # int(time.time() * 1_000_000_000), - time_of_flight = time_buffer[0:b_ptr], - detector_id = pixel_buffer[0:b_ptr], - source_name = '', - ) - - msg_id = (msg_id + 1) % 100000000 - b_ptr = 0 - - prod.produce( - topic = "DMC_detector", - value = message, - partition = 0, - ) - - # if poll_cnt % 1000 == 0: - prod.poll(0) - poll_cnt = (poll_cnt + 1) % 1000 - -threading.Thread(target=event_producer, daemon=True).start() - -# Monitor Kafka Producer - -monitor_queue = queue.Queue() - -def monitor_producer(): - producer_config = { - 'bootstrap.servers': "linkafka01:9092", - 'queue.buffering.max.messages': 1e7, - } - prod = Producer(producer_config) - - monitor_buffer = [0 for i in range(MONITORS)] - monitor_time = [0 for i in range(MONITORS)] - - st = time.time() - - poll_cnt = 0 - - while True: - (d_id, timestamp) = monitor_queue.get() - - monitor_buffer[d_id] += 1 - monitor_time[d_id] = timestamp - - nt = time.time() - if nt - st > 0.05: - st = nt - - for i in range(MONITORS): - if monitor_buffer[d_id]: - message = streaming_data_types.serialise_f142( - source_name = f"monitor{d_id+1}", - value = monitor_buffer[d_id], - # ns resolution (supposed to be past epoch, not what the detector returns though) - timestamp_unix_ns = monitor_time[d_id] * 100 # send time of last monitor - ) - - prod.produce( - topic = "DMC_neutron_monitor", - value = message, - partition = 0, - ) - - monitor_buffer[d_id] = 0 - - if poll_cnt % 1000 == 0: - prod.poll(0) - poll_cnt = (poll_cnt + 1) % 1000 - -threading.Thread(target=monitor_producer, daemon=True).start() - - -sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) -sock.bind((UDP_IP, UDP_PORT)) - -val = 0 -start_time = time.time() - -module_counts = [0 for i in range(10)] - -EVENTS = 0 - -while True: - data, addr = sock.recvfrom(2056) # Buffer size is 1024 bytes - raw_header = data[:42] - raw_data = data[42:] - - (buffer_length, buffer_type, header_length, - buffer_number, run_id, mcpd_status, - t_low, t_mid, t_high, *_) = memoryview(raw_header).cast('H') - mcpd_id = ( mcpd_status >> 8 ) & 0xff - mcpd_status = ( mcpd_status ) & 0x3 - running_msg = "running" if (mcpd_status & 0x1) else "stopped" - sync_msg = "in sync" if (mcpd_status & 0x2) else "sync error" - timestamp = ( t_high << 32 ) | ( t_mid << 16 ) | t_low # 100 ns resolution - #print(f'Packet {int(timestamp * 1e-7)}s => buffer: {buffer_number}, length: {int(buffer_length*2/6)} events, status: {mcpd_status} {mcpd_id} {running_msg} with {sync_msg}') - # print(f'Packet => buffer: {mcpd_id}-{buffer_number}, length: {int((buffer_length-21)/3)} events, status: {mcpd_status}') - - if time_offset is None: - time_offset = time.time() * 1e7 - timestamp - - if buffer_number - module_counts[mcpd_id] != 1: - MISSED_PACKETS += 1 - # if missed_packets_time_window.full(): - # missed_packets_time_window.get_nowait() - # missed_packets_time_window.put(timestamp) - - module_counts[mcpd_id] = buffer_number - - for i in range(0, len(raw_data), 6): - event = memoryview(raw_data)[i:i+6] - event_type = event[5] >> 7 - # print(event_type) - - if event_type: # Trigger Event - t_id = ( event[5] >> 4 ) & 0x7 - d_id = event[5] & 0xf - event_timestamp = timestamp + ( ( event[2] << 16 ) & 0x7 ) | ( event[1] << 8 ) | event[0] - # print(f'Trigger event {event_timestamp * 1e-7}s => TrigID: {t_id}, DataID: {d_id}') - - t_w = time_window[d_id] - t_w.put_nowait(event_timestamp) - - monitor_queue.put_nowait((d_id, event_timestamp)) - - else: # Neutron Event - x_pixels = 128 - y_pixels = 128 - amplitude = ( event[5] << 1 ) | ( event[4] >> 7 ) - - # The DMC StreamHistogrammer setup currently expects each module to - # be 128 * 128 pixels but the resolution in the packages is - # actually 10bit. We remove the lowest 3 bits. - x = (( (event[3] & 0x1f) << 5 | (event[2] & 0xf8) >> 3 ) & 0x3ff) >> 3 - y = (( (event[4] & 0x7f) << 3 | (event[3] & 0xe0) >> 5 ) & 0x3ff) >> 3 - event_timestamp = timestamp + ( ( event[2] << 16 ) & 0x7 ) | ( event[1] << 8 ) | event[0] - # print(f'Neutron event {event_timestamp * 1e-7}s: {amplitude}, x: {x}, y: {y}') - - - if event_last_timestamp is None: - event_last_timestamp = event_timestamp - - # Seems like at higher frequencies these come very much out of order - # so this is very approximate - event_time_window[EVENT_WINDOW_PTR] = event_timestamp - event_last_timestamp - EVENT_WINDOW_PTR = (EVENT_WINDOW_PTR + 1) % EVENT_WINDOWSIZE - event_last_timestamp = event_timestamp - - # I suppose this doesn't work mostly due to the timestamps ordering... - # event_timestamp_seconds = event_timestamp * 1e-7 - # if event_last_timestamp is None: - # event_last_timestamp = event_timestamp_seconds - - # f_cutoff = 1e6 # Hz - # tau = 1 / ( 2 * math.pi * f_cutoff) - # dt = event_timestamp_seconds - event_last_timestamp - # if dt > 0: - # w = math.exp(-dt / tau) - # event_average_rate = w * dt + event_average_rate * (1 - w) - # event_last_timestamp = event_timestamp_seconds - - # EVENTS += 1 - # a = (mcpd_id - 1) * x_pixels * y_pixels + x_pixels * x + y - # print((EVENTS, x, y, a, a < 128 * 128 * 9, mcpd_id)) - # if not a < 128 * 128 * 9: - # print((event[3], event[3] << 5, event[2], event[2] >> 3)) - - event_queue.put_nowait(( - (mcpd_id - 1) * x_pixels * y_pixels + x_pixels * x + y, - event_timestamp - )) From 1e853487aaccf7895d7091ed86d9b950a3f2c58c Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Fri, 31 Oct 2025 13:23:55 +0100 Subject: [PATCH 10/35] adds a POC preset based count --- Makefile | 2 +- db/channels.db | 96 +++++++++++++- db/daq_common.db | 212 ++++++++++++++++++++++++++++++ scripts/st.cmd | 5 + src/asynStreamGeneratorDriver.cpp | 182 ++++++++++++++++--------- src/asynStreamGeneratorDriver.h | 25 ++++ 6 files changed, 460 insertions(+), 62 deletions(-) create mode 100644 db/daq_common.db diff --git a/Makefile b/Makefile index f17d207..5ae267d 100644 --- a/Makefile +++ b/Makefile @@ -13,7 +13,7 @@ REQUIRED+=asyn DBDS += src/asynStreamGeneratorDriver.dbd # DB files to include in the release -TEMPLATES += db/channels.db +TEMPLATES += db/channels.db db/daq_common.db # HEADERS += src/asynStreamGeneratorDriver.h diff --git a/db/channels.db b/db/channels.db index ef0175e..7073cef 100644 --- a/db/channels.db +++ b/db/channels.db @@ -3,9 +3,93 @@ # Macros # INSTR - Prefix # NAME - the device name, e.g. EL737 -# PORT - Stream Generator Port +# PORT - StreamGenerator Port # CHANNEL - the number associated with the measurment channel +################################################################################ +# Status Variables + +# # Trigger a change in status as clearing +# record(bo, "$(INSTR)$(NAME):T$(CHANNEL)") +# { +# field(DESC, "Trigger Clearing Status") +# field(VAL, 1) +# field(OUT, "$(INSTR)$(NAME):S$(CHANNEL) PP") +# } +# +# # Trigger a change in status as value returned to 0 +# record(seq, "$(INSTR)$(NAME):O$(CHANNEL)") +# { +# field(DESC, "Trigger Returned to 0 Status") +# field(LNK0, "$(INSTR)$(NAME):S$(CHANNEL) PP") +# field(DO0, 0) +# field(SELM, "Specified") +# field(SELL, "$(INSTR)$(NAME):M$(CHANNEL).VAL") +# } +# +# # Current Status of Channel, i.e. is it ready to count? +# record(bi, "$(INSTR)$(NAME):S$(CHANNEL)") +# { +# field(DESC, "Channel Status") +# field(VAL, 0) +# field(ZNAM, "OK") +# field(ONAM, "CLEARING") +# } + +################################################################################ +# Count Commands + +# # Unfortunately, clearing the channels is somewhat complicated as a result of +# # the addition of more channels over time and minimal changes to the underlying interface +# # +# # Urs Greuter provided the following explanation: +# # +# # bei den Befehlen CC r und HC r ist der Parameter r als bit-Maske zu verstehen: +# # +# # Bit0: Zähler Channel 1 +# # Bit2: Zähler Channel 2 +# # Bit3: Zähler Channel 3 +# # Bit4: Zähler Channel 4 +# # Bit5: Zähler Channel Timer +# # Bit6: Zähler Channel 5 +# # Bit7: Zähler Channel 6 +# # Bit8: Zähler Channel 7 +# # Bit9: Zähler Channel 8 +# # +# # Beispiele: +# # CC 1 setzt den Zähler des Channels 1 zurück +# # CC 4 setzt den Zähler des Channels 3 zurück +# # CC 5 setzt gleichzeitig die Zähler der Channels 1 und 3 zurück +# # CC 16 ist gleichbedeutend wie CT (Timer zurücksetzen) +# # CC 511 setzt gleichzeitig die Zähler aller Kanäle (auch des Timers) zurück. +# +# record(calc, "$(INSTR)$(NAME):BM$(CHANNEL)") +# { +# field(DESC, "Bit Mask for Channel") +# field(INPA, $(CHANNEL)) +# field(CALC, "A > 4 ? 2 ^ A : 2 ^ (A-1)") +# field(PINI, "YES") +# } +# +# record(longout, "$(INSTR)$(NAME):C$(CHANNEL)") +# { +# field(DESC, "Clear the current channel count") +# field(DTYP, "stream") +# field(OMSL, "closed_loop") +# field(DOL, "$(INSTR)$(NAME):BM$(CHANNEL) NPP") +# field(OUT, "@... clearChannel($(INSTR)$(NAME):) $(PORT)") +# field(FLNK, "$(INSTR)$(NAME):T$(CHANNEL)") +# } +# +# record(ao,"$(INSTR)$(NAME):THRESH$(CHANNEL)") +# { +# field(DESC, "Sets min rate for counting to proceed") +# field(OMSL, "supervisory") +# field(OROC, "0") +# field(OUT, "@... setMinRate($(INSTR)$(NAME):, $(CHANNEL)) $(PORT)") +# field(DTYP, "stream") +# } + ################################################################################ # Read all monitors values @@ -15,6 +99,16 @@ record(longin, "$(INSTR)$(NAME):M$(CHANNEL)") field(EGU, "cts") field(DTYP, "asynInt32") field(INP, "@asyn($(PORT),0,$(TIMEOUT=1)) COUNTS$(CHANNEL)") + # This is probably too fast. We could trigger things the same as sinqDAQ to ensure the db is update in the same order field(SCAN, "I/O Intr") field(PINI, "YES") } + +# record(ai, "$(INSTR)$(NAME):R$(CHANNEL)") +# { +# field(DESC, "Rate of DAQ CH$(CHANNEL)") +# field(INP, "@... readRate($(INSTR)$(NAME):, $(CHANNEL)) $(PORT)") +# field(DTYP, "stream") +# field(EGU, "cts/sec") +# field(SCAN, "1 second") +# } diff --git a/db/daq_common.db b/db/daq_common.db new file mode 100644 index 0000000..a5e5501 --- /dev/null +++ b/db/daq_common.db @@ -0,0 +1,212 @@ +# EPICS Database for streamdevice specific to measurement channels +# +# Macros +# INSTR - Prefix +# NAME - the device name, e.g. EL737 +# PORT - StreamGenerator Port + +record(longout, "$(INSTR)$(NAME):FULL-RESET") +{ + field(DESC, "Reset the DAQ") + field(DTYP, "asynInt32") + field(OUT, "@asyn($(PORT),0,$(TIMEOUT=1)) RESET") +} + +################################################################################ +# Status Variables + +# record(stringin, "$(INSTR)$(NAME):MsgTxt") +# { +# field(DESC, "Unexpected received response") +# field(DTYP, "devDAQStringError") +# field(FLNK, "$(INSTR)$(NAME):INVALID-CONFIG") +# } + +record(mbbi, "$(INSTR)$(NAME):STATUS") +{ + field(DESC, "DAQ Status") + field(DTYP, "asynInt32") + field(INP, "@asyn($(PORT),0,$(TIMEOUT=1)) STATUS") + field(ZRVL, "0") + field(ZRST, "Idle") + field(ONVL, "1") + field(ONST, "Counting") + field(TWVL, "2") + field(TWST, "Low rate") + field(THVL, "3") + field(THST, "Paused") + # 4 should never happen, if it does it means the DAQ reports undocumented statusbits + field(FRVL, "4") + field(FRST, "INVALID") + # This is probably too fast. We could trigger things the same as sinqDAQ to ensure the db is update in the same order + field(SCAN, "I/O Intr") + field(PINI, "YES") +} + +record(longin, "$(INSTR)$(NAME):CHANNELS") +{ + field(DESC, "Total Supported Channels") + field(VAL, $(CHANNELS)) + field(DISP, 1) +} + +# # Trigger a change in status as clearing +# record(bo, "$(INSTR)$(NAME):ETT") +# { +# field(DESC, "Trigger Clearing Status") +# field(VAL, 1) +# field(OUT, "$(INSTR)$(NAME):ETS PP") +# } +# +# # Trigger a change in status as value returned to 0 +# record(seq, "$(INSTR)$(NAME):ETO") +# { +# field(DESC, "Trigger Returned to 0 Status") +# field(LNK0, "$(INSTR)$(NAME):ETS PP") +# field(DO0, 0) +# field(SELM, "Specified") +# field(SELL, "$(INSTR)$(NAME):ELAPSED-TIME.VAL") +# } +# +# # Current Status of Channel, i.e. is it ready to count? +# record(bi, "$(INSTR)$(NAME):ETS") +# { +# field(DESC, "Channel Status") +# field(VAL, 0) +# field(ZNAM, "OK") +# field(ONAM, "CLEARING") +# } + +################################################################################ +# Count Commands + +record(ao,"$(INSTR)$(NAME):PRESET-COUNT") +{ + field(DESC, "Count until preset reached") + field(DTYP, "asynInt32") + field(OUT, "@asyn($(PORT),0,$(TIMEOUT=1)) P_CNT") + field(VAL, 0) + field(PREC, 2) +} + +# record(ao,"$(INSTR)$(NAME):PRESET-TIME") +# { +# field(DESC, "Count for specified time") +# field(DTYP, "stream") +# field(OUT, "@... startWithTimePreset$(CHANNELS)($(INSTR)$(NAME):) $(PORT)") +# field(VAL, 0) +# field(PREC, 2) +# field(EGU, "seconds") +# field(FLNK, "$(INSTR)$(NAME):RAW-STATUS") +# } +# +# record(bo,"$(INSTR)$(NAME):PAUSE") +# { +# field(DESC, "Pause the current count") +# field(DTYP, "stream") +# field(OUT, "@... pauseCount($(INSTR)$(NAME):) $(PORT)") +# field(VAL, "0") +# field(FLNK, "$(INSTR)$(NAME):RAW-STATUS") +# } +# +# record(bo,"$(INSTR)$(NAME):CONTINUE") +# { +# field(DESC, "Continue with a count that was paused") +# field(DTYP, "stream") +# field(OUT, "@... continueCount($(INSTR)$(NAME):) $(PORT)") +# field(VAL, "0") +# field(FLNK, "$(INSTR)$(NAME):RAW-STATUS") +# } +# +# record(longout, "$(INSTR)$(NAME):STOP") +# { +# field(DESC, "Stop the current counting operation") +# field(DTYP, "stream") +# field(OUT, "@... stopCount($(INSTR)$(NAME):) $(PORT)") +# field(FLNK, "$(INSTR)$(NAME):RAW-STATUS") +# } + +record(longout, "$(INSTR)$(NAME):MONITOR-CHANNEL") +{ + field(DESC, "PRESET-COUNT Monitors this channel") + field(DTYP, "asynInt32") + field(OUT, "@asyn($(PORT),0,$(TIMEOUT=1)) MONITOR") + field(DRVL, "1") # Smallest Monitor Channel + field(DRVH, "$(CHANNELS)") # Largest Monitor Channel +} + +record(longin, "$(INSTR)$(NAME):MONITOR-CHANNEL_RBV") +{ + field(DESC, "PRESET-COUNT Monitors this channel") + field(DTYP, "asynInt32") + field(INP, "@asyn($(PORT),0,$(TIMEOUT=1)) MONITOR") + field(SCAN, "I/O Intr") + field(PINI, "YES") +} + +# record(calc, "$(INSTR)$(NAME):RATE_MAP") +# { +# field(DESC, "Want a consistent lowrate pv") +# field(INPA, "$(INSTR)$(NAME):RAW-STATUS.B2 NPP") +# field(CALC, "(A=1)?1:0") +# } + +# +# record(ao,"$(INSTR)$(NAME):THRESHOLD") +# { +# field(DESC, "Minimum rate for counting to proceed") +# field(VAL, "1") # Default Rate +# # Could perhaps still be improved. +# # It seems to only accept whole counts? +# field(DRVL, "1") # Minimum Rate +# field(DRVH, "100000") # Maximum Rate +# field(OMSL, "supervisory") +# field(OROC, "0") +# field(OUT, "$(INSTR)$(NAME):THRESHOLD-F PP") +# } +# +# record(ai,"$(INSTR)$(NAME):THRESHOLD_RBV") +# { +# field(DESC, "Minimum rate for counting to proceed") +# field(INP, "@... readMinRate($(INSTR)$(NAME):) $(PORT)") +# field(DTYP, "stream") +# field(SCAN, "1 second") +# field(EGU, "cts/sec") +# } +# +# record(longout,"$(INSTR)$(NAME):THRESHOLD-MONITOR") +# { +# field(DESC, "Channel monitored for minimum rate") +# field(VAL, "1") # Monitor +# field(DRVL, "0") # Smallest Threshold Channel (0 is off) +# field(DRVH, "$(CHANNELS)") # Largest Threshold Channel +# field(OUT, "@... setRateMonitor($(INSTR)$(NAME):) $(PORT)") +# field(DTYP, "stream") +# } +# +# record(longin,"$(INSTR)$(NAME):THRESHOLD-MONITOR_RBV") +# { +# field(DESC, "Channel monitored for minimum rate") +# field(INP, "@... readRateMonitor($(INSTR)$(NAME):) $(PORT)") +# field(DTYP, "stream") +# field(SCAN, "1 second") +# field(EGU, "CH") +# } +# +# record(longout, "$(INSTR)$(NAME):CT") +# { +# field(DESC, "Clear the timer") +# field(DTYP, "stream") +# field(OUT, "@... clearTimer($(INSTR)$(NAME):) $(PORT)") +# field(FLNK, "$(INSTR)$(NAME):ETT") +# } + +################################################################################ +# Read all monitors values + +# record(ai,"$(INSTR)$(NAME):ELAPSED-TIME") +# { +# field(DESC, "DAQ Measured Time") +# field(EGU, "sec") +# field(FLNK, "$(INSTR)$(NAME):ETO") +# } diff --git a/scripts/st.cmd b/scripts/st.cmd index 5d8db12..d8fe540 100755 --- a/scripts/st.cmd +++ b/scripts/st.cmd @@ -11,7 +11,12 @@ epicsEnvSet("NAME", "SG") drvAsynIPPortConfigure("ASYN_IP_PORT", "127.0.0.1:9071:54321 UDP", 0, 0, 0) asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4) +dbLoadRecords("$(StreamGenerator_DB)daq_common.db", "INSTR=$(INSTR), NAME=$(NAME), PORT=ASYN_SG, CHANNELS=5") + +# Detector Count Channel dbLoadRecords("$(StreamGenerator_DB)channels.db", "INSTR=$(INSTR), NAME=$(NAME), PORT=ASYN_SG, CHANNEL=0") + +# Monitor Channels dbLoadRecords("$(StreamGenerator_DB)channels.db", "INSTR=$(INSTR), NAME=$(NAME), PORT=ASYN_SG, CHANNEL=1") dbLoadRecords("$(StreamGenerator_DB)channels.db", "INSTR=$(INSTR), NAME=$(NAME), PORT=ASYN_SG, CHANNEL=2") dbLoadRecords("$(StreamGenerator_DB)channels.db", "INSTR=$(INSTR), NAME=$(NAME), PORT=ASYN_SG, CHANNEL=3") diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index c933a75..96a52b1 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -90,16 +90,44 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, char pv_name_buffer[100]; P_Counts = new int[this->num_channels]; - asynStatus status; + asynStatus status = asynSuccess; + + status = (asynStatus)(status | createParam(P_StatusString, asynParamInt32, + &P_Status)); + status = (asynStatus)(status | setIntegerParam(P_Status, STATUS_IDLE)); + + status = (asynStatus)(status | + createParam(P_ResetString, asynParamInt32, &P_Reset)); + status = (asynStatus)(status | setIntegerParam(P_Reset, 0)); + + status = (asynStatus)(status | createParam(P_CountPresetString, + asynParamInt32, &P_CountPreset)); + status = (asynStatus)(status | setIntegerParam(P_CountPreset, 0)); + + status = + (asynStatus)(status | createParam(P_MonitorChannelString, + asynParamInt32, &P_MonitorChannel)); + status = (asynStatus)(status | setIntegerParam(P_MonitorChannel, 0)); // Create PVs templated on Channel Number for (size_t i = 0; i < this->num_channels; ++i) { memset(pv_name_buffer, 0, 100); epicsSnprintf(pv_name_buffer, 100, P_CountsString, i); - status = createParam(pv_name_buffer, asynParamInt32, P_Counts + i); - setIntegerParam(P_Counts[i], 0); + status = + (asynStatus)(status | createParam(pv_name_buffer, asynParamInt32, + P_Counts + i)); + status = (asynStatus)(status | setIntegerParam(P_Counts[i], 0)); } + if (status) { + printf("%s:%s: failed to create or setup parameters, status=%d\n", + driverName, functionName, status); + exit(1); + } + + // Create Events + this->pausedEventId = epicsEventCreate(epicsEventEmpty); + this->monitorProducer = create_kafka_producer(); this->detectorProducer = create_kafka_producer(); @@ -160,6 +188,53 @@ asynStreamGeneratorDriver::~asynStreamGeneratorDriver() { // epicsStdoutPrintf("Kafka Queue Size %d\n", rd_kafka_outq_len(producer)); } +asynStatus asynStreamGeneratorDriver::writeInt32(asynUser *pasynUser, + epicsInt32 value) { + int function = pasynUser->reason; + asynStatus status = asynSuccess; + const char *paramName; + const char *functionName = "writeInt32"; + getParamName(function, ¶mName); + + // if (status) { + // epicsSnprintf(pasynUser->errorMessage, pasynUser->errorMessageSize, + // "%s:%s: status=%d, function=%d, name=%s, value=%d", + // driverName, functionName, status, function, paramName, + // value); + // return status; + // } + + if (function == P_CountPreset) { + setIntegerParam(function, value); + setIntegerParam(P_Status, STATUS_COUNTING); + status = (asynStatus)callParamCallbacks(); + epicsEventSignal(this->pausedEventId); + } else if (function == P_Reset) { + // TODO should probably set back everything to defaults + setIntegerParam(P_Status, STATUS_IDLE); + status = (asynStatus)callParamCallbacks(); + } else if (function == P_MonitorChannel) { + epicsInt32 currentStatus; + getIntegerParam(this->P_Status, ¤tStatus); + if (!currentStatus) { + setIntegerParam(function, value); + status = (asynStatus)callParamCallbacks(); + } + } else { + setIntegerParam(function, value); + status = (asynStatus)callParamCallbacks(); + } + + if (status) + epicsSnprintf(pasynUser->errorMessage, pasynUser->errorMessageSize, + "%s:%s: status=%d, function=%d, name=%s, value=%d", + driverName, functionName, status, function, paramName, + value); + return status; +} + +// TODO probably I will have to split this function up, so that the system +// can process the UDP messages in parallel void asynStreamGeneratorDriver::receiveUDP() { asynStatus status; int isConnected; @@ -170,56 +245,53 @@ void asynStreamGeneratorDriver::receiveUDP() { int eomReason; epicsInt32 val; + epicsInt32 currentStatus; + epicsInt32 countPreset = 0; + epicsInt32 presetChannel = 1; - const uint32_t x_pixels = 128; - const uint32_t y_pixels = 128; + const char *functionName = "receiveUDP"; // TODO epics doesn't seem to support uint64, you would need an array of // uint32. It does support int64 though.. so we start with that epicsInt32 *counts = new epicsInt32[this->num_channels]; while (true) { - // memset doesn't work with epicsInt32 - for (size_t i = 0; i < this->num_channels; ++i) { - counts[i] = 0; + + status = getIntegerParam(this->P_Status, ¤tStatus); + if (!currentStatus || status) { + + epicsEventWait(this->pausedEventId); + + getIntegerParam(this->P_CountPreset, &countPreset); + getIntegerParam(this->P_MonitorChannel, &presetChannel); + + // memset doesn't work with epicsInt32 + for (size_t i = 0; i < this->num_channels; ++i) { + counts[i] = 0; + } + + lock(); + for (size_t i = 0; i < num_channels; ++i) { + setIntegerParam(P_Counts[i], counts[i]); + } + callParamCallbacks(); + unlock(); + + // Clear the input buffer, in case of stray messages + pasynOctetSyncIO->flush(pasynUDPUser); } status = pasynManager->isConnected(pasynUDPUser, &isConnected); - if (status) { + if (!isConnected) asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, - "%s:%s: error calling pasynManager->isConnected, " - "status=%d, error=%s\n", - driverName, "receiveUDP", status, - pasynUDPUser->errorMessage); - // driverName, functionName, status, - // pasynUserIPPort_->errorMessage); - } - asynPrint(pasynUserSelf, ASYN_TRACEIO_DRIVER, - "%s:%s: isConnected = %d\n", // - driverName, "receiveUDP", isConnected); + "%s:%s: isConnected = %d\n", driverName, functionName, + isConnected); status = pasynOctetSyncIO->read(pasynUDPUser, buffer, buffer_size, 0, // timeout &received, &eomReason); - // if (status) - // asynPrint( - // pasynUserSelf, ASYN_TRACE_ERROR, - // "%s:%s: error calling pasynOctetSyncIO->read, status=%d\n", - // driverName, "receiveUDP", status); - - // buffer[received] = 0; - if (received) { - // asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, "%s:%s: received %f %d - // received\n", - // driverName, "receiveUDP", (double) received / - // 1500., received); - - // asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, "%s:%s: received - // %d\n", - // driverName, "receiveUDP", received); - UDPHeader *header = (UDPHeader *)buffer; size_t total_events = (header->BufferLength - 21) / 3; @@ -230,23 +302,19 @@ void asynStreamGeneratorDriver::receiveUDP() { // "%s:%s: received packet %d with %d events (%" // PRIu64 // ")\n", - // driverName, "receiveUDP", + // driverName, functionName, // header->BufferNumber, total_events, // header->nanosecs()); for (size_t i = 0; i < total_events; ++i) { char *event = (buffer + 21 * 2 + i * 6); + if (countPreset && counts[presetChannel] >= countPreset) + break; + if (event[5] & 0x80) { // Monitor Event MonitorEvent *m_event = (MonitorEvent *)event; - // asynPrint( - // pasynUserSelf, ASYN_TRACE_ERROR, - // "%s:%s: event (%03d) on monitor %d (%" PRIu64 - // ")\n", driverName, "receiveUDP", i, - // m_event->DataID, header->nanosecs() + - // (uint64_t)m_event->nanosecs()); - counts[m_event->DataID + 1] += 1; // needs to be freed!!! @@ -264,25 +332,11 @@ void asynStreamGeneratorDriver::receiveUDP() { auto nde = new NormalisedDetectorEvent(); nde->TimeStamp = header->nanosecs() + (uint64_t)d_event->nanosecs(); - nde->PixID = - (header->McpdID - 1) * x_pixels * y_pixels + - x_pixels * (uint32_t)d_event->XPosition + - (uint32_t)d_event->YPosition; + nde->PixID = d_event->pixelId(header->McpdID); this->detectorQueue.push(nde); } } - for (size_t i = 0; i < num_channels; ++i) { - getIntegerParam(P_Counts[i], &val); - counts[i] += val; - } - - // asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, - // "%s:%s: det: (%d), mon0: (%d), mon1: (%d), mon2: " - // "(%d), mon3: (%d)\n", - // driverName, "receiveUDP", counts[0], - // counts[1], counts[2], counts[3], counts[4]); - lock(); for (size_t i = 0; i < num_channels; ++i) { setIntegerParam(P_Counts[i], counts[i]); @@ -292,7 +346,15 @@ void asynStreamGeneratorDriver::receiveUDP() { } else { asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, "%s:%s: invalid UDP packet\n", driverName, - "receiveUDP"); + functionName); + } + + if (countPreset && counts[presetChannel] >= countPreset) { + lock(); + setIntegerParam(P_Status, STATUS_IDLE); + setIntegerParam(P_CountPreset, 0); + callParamCallbacks(); + unlock(); } } diff --git a/src/asynStreamGeneratorDriver.h b/src/asynStreamGeneratorDriver.h index 5aaf664..f3d2ac4 100644 --- a/src/asynStreamGeneratorDriver.h +++ b/src/asynStreamGeneratorDriver.h @@ -36,6 +36,12 @@ struct __attribute__((__packed__)) DetectorEvent { uint16_t Amplitude : 8; uint16_t Id : 1; inline uint32_t nanosecs() { return TimeStamp * 100; } + inline uint64_t pixelId(uint32_t mpcdId) { + const uint32_t x_pixels = 128; + const uint32_t y_pixels = 128; + return (mpcdId - 1) * x_pixels * y_pixels + + x_pixels * (uint32_t)this->XPosition + (uint32_t)this->YPosition; + } }; struct __attribute__((__packed__)) MonitorEvent { @@ -60,12 +66,24 @@ struct __attribute__((__packed__)) NormalisedDetectorEvent { uint32_t PixID; }; +/******************************************************************************* + * Status values that should match the definition in db/daq_common.db + */ +#define STATUS_IDLE 0 +#define STATUS_COUNTING 1 +#define STATUS_LOWRATE 2 +#define STATUS_PAUSED 3 + /******************************************************************************* * Parameters for use in DB records * * i.e.e drvInfo strings that are used to identify the parameters */ +#define P_StatusString "STATUS" +#define P_ResetString "RESET" +#define P_CountPresetString "P_CNT" +#define P_MonitorChannelString "MONITOR" #define P_CountsString "COUNTS%d" /******************************************************************************* @@ -77,16 +95,23 @@ class asynStreamGeneratorDriver : public asynPortDriver { const int numChannels); virtual ~asynStreamGeneratorDriver(); + virtual asynStatus writeInt32(asynUser *pasynUser, epicsInt32 value); + void receiveUDP(); void produceMonitor(); void produceDetector(); protected: // Parameter Identifying IDs + int P_Status; + int P_Reset; + int P_CountPreset; + int P_MonitorChannel; int *P_Counts; private: asynUser *pasynUDPUser; + epicsEventId pausedEventId; int num_channels; From 7bacc716ccbd4384a53f0e2f7a4a8c362d1a01bb Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Fri, 31 Oct 2025 19:10:59 +0100 Subject: [PATCH 11/35] adds elapsed time and time based preset --- db/channels.db | 17 ++-- db/daq_common.db | 40 ++++----- src/asynStreamGeneratorDriver.cpp | 130 ++++++++++++++++++++++++------ src/asynStreamGeneratorDriver.h | 7 ++ 4 files changed, 141 insertions(+), 53 deletions(-) diff --git a/db/channels.db b/db/channels.db index 7073cef..c2cde1f 100644 --- a/db/channels.db +++ b/db/channels.db @@ -104,11 +104,12 @@ record(longin, "$(INSTR)$(NAME):M$(CHANNEL)") field(PINI, "YES") } -# record(ai, "$(INSTR)$(NAME):R$(CHANNEL)") -# { -# field(DESC, "Rate of DAQ CH$(CHANNEL)") -# field(INP, "@... readRate($(INSTR)$(NAME):, $(CHANNEL)) $(PORT)") -# field(DTYP, "stream") -# field(EGU, "cts/sec") -# field(SCAN, "1 second") -# } +record(ai, "$(INSTR)$(NAME):R$(CHANNEL)") +{ + field(DESC, "Rate of DAQ CH$(CHANNEL)") + field(EGU, "cts/sec") + field(DTYP, "asynInt32") + field(INP, "@asyn($(PORT),0,$(TIMEOUT=1)) RATE$(CHANNEL)") + field(SCAN, "I/O Intr") + field(PINI, "YES") +} diff --git a/db/daq_common.db b/db/daq_common.db index a5e5501..7c91111 100644 --- a/db/daq_common.db +++ b/db/daq_common.db @@ -89,17 +89,16 @@ record(ao,"$(INSTR)$(NAME):PRESET-COUNT") field(PREC, 2) } -# record(ao,"$(INSTR)$(NAME):PRESET-TIME") -# { -# field(DESC, "Count for specified time") -# field(DTYP, "stream") -# field(OUT, "@... startWithTimePreset$(CHANNELS)($(INSTR)$(NAME):) $(PORT)") -# field(VAL, 0) -# field(PREC, 2) -# field(EGU, "seconds") -# field(FLNK, "$(INSTR)$(NAME):RAW-STATUS") -# } -# +record(ao,"$(INSTR)$(NAME):PRESET-TIME") +{ + field(DESC, "Count for specified time") + field(EGU, "seconds") + field(DTYP, "asynInt32") + field(OUT, "@asyn($(PORT),0,$(TIMEOUT=1)) P_TIME") + field(VAL, 0) + field(PREC, 2) +} + # record(bo,"$(INSTR)$(NAME):PAUSE") # { # field(DESC, "Pause the current count") @@ -168,10 +167,10 @@ record(longin, "$(INSTR)$(NAME):MONITOR-CHANNEL_RBV") # record(ai,"$(INSTR)$(NAME):THRESHOLD_RBV") # { # field(DESC, "Minimum rate for counting to proceed") +# field(EGU, "cts/sec") # field(INP, "@... readMinRate($(INSTR)$(NAME):) $(PORT)") # field(DTYP, "stream") # field(SCAN, "1 second") -# field(EGU, "cts/sec") # } # # record(longout,"$(INSTR)$(NAME):THRESHOLD-MONITOR") @@ -187,10 +186,10 @@ record(longin, "$(INSTR)$(NAME):MONITOR-CHANNEL_RBV") # record(longin,"$(INSTR)$(NAME):THRESHOLD-MONITOR_RBV") # { # field(DESC, "Channel monitored for minimum rate") +# field(EGU, "CH") # field(INP, "@... readRateMonitor($(INSTR)$(NAME):) $(PORT)") # field(DTYP, "stream") # field(SCAN, "1 second") -# field(EGU, "CH") # } # # record(longout, "$(INSTR)$(NAME):CT") @@ -204,9 +203,12 @@ record(longin, "$(INSTR)$(NAME):MONITOR-CHANNEL_RBV") ################################################################################ # Read all monitors values -# record(ai,"$(INSTR)$(NAME):ELAPSED-TIME") -# { -# field(DESC, "DAQ Measured Time") -# field(EGU, "sec") -# field(FLNK, "$(INSTR)$(NAME):ETO") -# } +record(ai,"$(INSTR)$(NAME):ELAPSED-TIME") +{ + field(DESC, "DAQ Measured Time") + field(EGU, "sec") + field(DTYP, "asynInt32") + field(INP, "@asyn($(PORT),0,$(TIMEOUT=1)) TIME") + field(SCAN, "I/O Intr") + field(PINI, "YES") +} diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index 96a52b1..0414a0e 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -3,6 +3,7 @@ #include #include #include +#include // Just for printing #define __STDC_FORMAT_MACROS @@ -86,10 +87,8 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, num_channels(numChannels + 1), monitorQueue(1000, false), detectorQueue(1000, false) { const char *functionName = "asynStreamGeneratorDriver"; - // Parameter Setup - char pv_name_buffer[100]; - P_Counts = new int[this->num_channels]; + // Parameter Setup asynStatus status = asynSuccess; status = (asynStatus)(status | createParam(P_StatusString, asynParamInt32, @@ -104,12 +103,23 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, asynParamInt32, &P_CountPreset)); status = (asynStatus)(status | setIntegerParam(P_CountPreset, 0)); + status = (asynStatus)(status | createParam(P_TimePresetString, + asynParamInt32, &P_TimePreset)); + status = (asynStatus)(status | setIntegerParam(P_TimePreset, 0)); + + status = (asynStatus)(status | createParam(P_ElapsedTimeString, + asynParamInt32, &P_ElapsedTime)); + status = (asynStatus)(status | setIntegerParam(P_ElapsedTime, 0)); + status = (asynStatus)(status | createParam(P_MonitorChannelString, asynParamInt32, &P_MonitorChannel)); status = (asynStatus)(status | setIntegerParam(P_MonitorChannel, 0)); - // Create PVs templated on Channel Number + // Create Parameters templated on Channel Number + char pv_name_buffer[100]; + P_Counts = new int[this->num_channels]; + P_Rates = new int[this->num_channels]; for (size_t i = 0; i < this->num_channels; ++i) { memset(pv_name_buffer, 0, 100); epicsSnprintf(pv_name_buffer, 100, P_CountsString, i); @@ -117,6 +127,13 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, (asynStatus)(status | createParam(pv_name_buffer, asynParamInt32, P_Counts + i)); status = (asynStatus)(status | setIntegerParam(P_Counts[i], 0)); + + memset(pv_name_buffer, 0, 100); + epicsSnprintf(pv_name_buffer, 100, P_RateString, i); + status = + (asynStatus)(status | createParam(pv_name_buffer, asynParamInt32, + P_Rates + i)); + status = (asynStatus)(status | setIntegerParam(P_Rates[i], 0)); } if (status) { @@ -128,8 +145,8 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, // Create Events this->pausedEventId = epicsEventCreate(epicsEventEmpty); - this->monitorProducer = create_kafka_producer(); - this->detectorProducer = create_kafka_producer(); + // this->monitorProducer = create_kafka_producer(); + // this->detectorProducer = create_kafka_producer(); // Setup for Thread Producing Monitor Kafka Events status = @@ -180,6 +197,7 @@ asynStreamGeneratorDriver::~asynStreamGeneratorDriver() { // should make sure queues are empty and freed // and that the kafka producers are flushed and freed delete[] P_Counts; + delete[] P_Rates; // TODO add exit should perhaps ensure the queue is flushed // rd_kafka_poll(producer, 0); @@ -205,6 +223,13 @@ asynStatus asynStreamGeneratorDriver::writeInt32(asynUser *pasynUser, // } if (function == P_CountPreset) { + // TODO should block setting a preset when already set + setIntegerParam(function, value); + setIntegerParam(P_Status, STATUS_COUNTING); + status = (asynStatus)callParamCallbacks(); + epicsEventSignal(this->pausedEventId); + } else if (function == P_TimePreset) { + // TODO should block setting a preset when already set setIntegerParam(function, value); setIntegerParam(P_Status, STATUS_COUNTING); status = (asynStatus)callParamCallbacks(); @@ -247,7 +272,8 @@ void asynStreamGeneratorDriver::receiveUDP() { epicsInt32 val; epicsInt32 currentStatus; epicsInt32 countPreset = 0; - epicsInt32 presetChannel = 1; + epicsInt32 timePreset = 0; + epicsInt32 presetChannel = 0; const char *functionName = "receiveUDP"; @@ -255,6 +281,10 @@ void asynStreamGeneratorDriver::receiveUDP() { // uint32. It does support int64 though.. so we start with that epicsInt32 *counts = new epicsInt32[this->num_channels]; + uint64_t start_time = std::numeric_limits::max(); + uint64_t current_time = 0; + epicsInt32 elapsedTime = 0; + while (true) { status = getIntegerParam(this->P_Status, ¤tStatus); @@ -263,6 +293,7 @@ void asynStreamGeneratorDriver::receiveUDP() { epicsEventWait(this->pausedEventId); getIntegerParam(this->P_CountPreset, &countPreset); + getIntegerParam(this->P_TimePreset, &timePreset); getIntegerParam(this->P_MonitorChannel, &presetChannel); // memset doesn't work with epicsInt32 @@ -270,10 +301,15 @@ void asynStreamGeneratorDriver::receiveUDP() { counts[i] = 0; } + start_time = std::numeric_limits::max(); + current_time = 0; + elapsedTime = 0; + lock(); for (size_t i = 0; i < num_channels; ++i) { setIntegerParam(P_Counts[i], counts[i]); } + setIntegerParam(P_ElapsedTime, 0); callParamCallbacks(); unlock(); @@ -296,6 +332,12 @@ void asynStreamGeneratorDriver::receiveUDP() { size_t total_events = (header->BufferLength - 21) / 3; + start_time = + std::min(start_time, (uint64_t)(header->nanosecs() / 1e9)); + // This is maybe safer, in case the time wraps back around? + // if (start_time == std::numeric_limits::max()) + // start_time = header->nanosecs() /1e9; + // TODO lots of checks and validation missing everywhere here if (received == total_events * 6 + 42) { // asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, @@ -324,6 +366,12 @@ void asynStreamGeneratorDriver::receiveUDP() { nme->DataID = m_event->DataID; this->monitorQueue.push(nme); + current_time = std::max( + current_time, + (uint64_t)((header->nanosecs() + + (uint64_t)m_event->nanosecs()) / + 1e9)); + } else { // Detector Event DetectorEvent *d_event = (DetectorEvent *)event; counts[0] += 1; @@ -334,6 +382,12 @@ void asynStreamGeneratorDriver::receiveUDP() { header->nanosecs() + (uint64_t)d_event->nanosecs(); nde->PixID = d_event->pixelId(header->McpdID); this->detectorQueue.push(nde); + + current_time = std::max( + current_time, + (uint64_t)((header->nanosecs() + + (uint64_t)d_event->nanosecs()) / + 1e9)); } } @@ -341,6 +395,8 @@ void asynStreamGeneratorDriver::receiveUDP() { for (size_t i = 0; i < num_channels; ++i) { setIntegerParam(P_Counts[i], counts[i]); } + elapsedTime = current_time - start_time; + setIntegerParam(P_ElapsedTime, elapsedTime); callParamCallbacks(); unlock(); } else { @@ -349,10 +405,11 @@ void asynStreamGeneratorDriver::receiveUDP() { functionName); } - if (countPreset && counts[presetChannel] >= countPreset) { + if ((countPreset && counts[presetChannel] >= countPreset) || (timePreset && elapsedTime >= timePreset)) { lock(); setIntegerParam(P_Status, STATUS_IDLE); setIntegerParam(P_CountPreset, 0); + setIntegerParam(P_TimePreset, 0); callParamCallbacks(); unlock(); } @@ -412,7 +469,6 @@ void asynStreamGeneratorDriver::produceMonitor() { &tof, &did); builder.Finish(message, "ev42"); - // printf("buffer size: %d\n", builder.GetSize()); rd_kafka_resp_err_t err = rd_kafka_producev( monitorProducer, RD_KAFKA_V_TOPIC("NEWEFU_TEST"), @@ -429,15 +485,8 @@ void asynStreamGeneratorDriver::produceMonitor() { // rd_kafka_err2str(err)); } - // epicsStdoutPrintf("Kafka Queue Size %d\n", - // rd_kafka_outq_len(monitorProducer)); - rd_kafka_poll(monitorProducer, 0); - // printf("Monitor Events Queued before sending %d\n", - // this->monitorQueue.getHighWaterMark()); - // this->monitorQueue.resetHighWaterMark(); - tof.clear(); did.clear(); } @@ -447,19 +496,37 @@ void asynStreamGeneratorDriver::produceMonitor() { void asynStreamGeneratorDriver::produceDetector() { + static const size_t bufferSize = 9000; flatbuffers::FlatBufferBuilder builder(1024); std::vector tof; - tof.reserve(9000); + tof.reserve(bufferSize); std::vector did; - did.reserve(9000); + did.reserve(bufferSize); int total = 0; epicsTimeStamp last_sent = epicsTime::getCurrent(); uint64_t message_id = 0; + struct { + bool operator()(const uint64_t l, const uint64_t r) const { + return l > r; + } + } smallestToLargest; + + // This should never be used. It is just instantiated to reserve a buffer + // of specific size. + std::vector queueBuffer; + queueBuffer.reserve(bufferSize); + + std::priority_queue, + decltype(smallestToLargest)> + timeQueue(smallestToLargest, std::move(queueBuffer)); + + uint64_t newest = 0; + while (true) { if (!this->detectorQueue.isEmpty()) { @@ -468,11 +535,30 @@ void asynStreamGeneratorDriver::produceDetector() { auto nde = this->detectorQueue.pop(); tof.push_back(nde->TimeStamp); did.push_back(nde->PixID); + + newest = std::max(newest, nde->TimeStamp); + timeQueue.push(nde->TimeStamp); + delete nde; } else { epicsThreadSleep(0.001); // seconds } + while (!timeQueue.empty() && + (timeQueue.size() >= 8192 || + (newest - timeQueue.top()) > 5'000'000'000ull)) + timeQueue.pop(); + epicsInt32 rate = 0; + if (timeQueue.size() > 1) { + rate = ((double)timeQueue.size() / + ((double)(newest - timeQueue.top()) * 1e-9)); + } + + lock(); + setIntegerParam(P_Rates[0], rate); + callParamCallbacks(); + unlock(); + epicsTimeStamp now = epicsTime::getCurrent(); // At least every 0.2 seconds @@ -492,7 +578,6 @@ void asynStreamGeneratorDriver::produceDetector() { &tof, &did); builder.Finish(message, "ev42"); - // printf("buffer size: %d\n", builder.GetSize()); rd_kafka_resp_err_t err = rd_kafka_producev( detectorProducer, RD_KAFKA_V_TOPIC("NEWEFU_TEST2"), @@ -509,15 +594,8 @@ void asynStreamGeneratorDriver::produceDetector() { // rd_kafka_err2str(err)); } - // epicsStdoutPrintf("Kafka Queue Size %d\n", - // rd_kafka_outq_len(monitorProducer)); - rd_kafka_poll(detectorProducer, 0); - // printf("Detector Events Queued before sending %d\n", - // this->detectorQueue.getHighWaterMark()); - // this->detectorQueue.resetHighWaterMark(); - tof.clear(); did.clear(); } diff --git a/src/asynStreamGeneratorDriver.h b/src/asynStreamGeneratorDriver.h index f3d2ac4..f770cf1 100644 --- a/src/asynStreamGeneratorDriver.h +++ b/src/asynStreamGeneratorDriver.h @@ -83,8 +83,12 @@ struct __attribute__((__packed__)) NormalisedDetectorEvent { #define P_StatusString "STATUS" #define P_ResetString "RESET" #define P_CountPresetString "P_CNT" +#define P_TimePresetString "P_TIME" +#define P_ElapsedTimeString "TIME" #define P_MonitorChannelString "MONITOR" + #define P_CountsString "COUNTS%d" +#define P_RateString "RATE%d" /******************************************************************************* * Stream Generator Coordinating Class @@ -106,8 +110,11 @@ class asynStreamGeneratorDriver : public asynPortDriver { int P_Status; int P_Reset; int P_CountPreset; + int P_TimePreset; + int P_ElapsedTime; int P_MonitorChannel; int *P_Counts; + int *P_Rates; private: asynUser *pasynUDPUser; From a336ca74c98ef76e07dca26be695d61e53a83852 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Mon, 3 Nov 2025 09:26:50 +0100 Subject: [PATCH 12/35] adds remaining missing PVs --- db/channels.db | 109 ++++++------------- db/daq_common.db | 168 ++++++++++++++---------------- src/asynStreamGeneratorDriver.cpp | 32 +++++- src/asynStreamGeneratorDriver.h | 10 ++ 4 files changed, 152 insertions(+), 167 deletions(-) diff --git a/db/channels.db b/db/channels.db index c2cde1f..bb7a3fe 100644 --- a/db/channels.db +++ b/db/channels.db @@ -9,86 +9,43 @@ ################################################################################ # Status Variables -# # Trigger a change in status as clearing -# record(bo, "$(INSTR)$(NAME):T$(CHANNEL)") -# { -# field(DESC, "Trigger Clearing Status") -# field(VAL, 1) -# field(OUT, "$(INSTR)$(NAME):S$(CHANNEL) PP") -# } -# -# # Trigger a change in status as value returned to 0 -# record(seq, "$(INSTR)$(NAME):O$(CHANNEL)") -# { -# field(DESC, "Trigger Returned to 0 Status") -# field(LNK0, "$(INSTR)$(NAME):S$(CHANNEL) PP") -# field(DO0, 0) -# field(SELM, "Specified") -# field(SELL, "$(INSTR)$(NAME):M$(CHANNEL).VAL") -# } -# -# # Current Status of Channel, i.e. is it ready to count? -# record(bi, "$(INSTR)$(NAME):S$(CHANNEL)") -# { -# field(DESC, "Channel Status") -# field(VAL, 0) -# field(ZNAM, "OK") -# field(ONAM, "CLEARING") -# } +# Trigger a change in status as clearing +record(bo, "$(INSTR)$(NAME):T$(CHANNEL)") +{ + field(DESC, "Trigger Clearing Status") + field(VAL, 1) + field(OUT, "$(INSTR)$(NAME):S$(CHANNEL) PP") +} + +# Trigger a change in status as value returned to 0 +record(seq, "$(INSTR)$(NAME):O$(CHANNEL)") +{ + field(DESC, "Trigger Returned to 0 Status") + field(LNK0, "$(INSTR)$(NAME):S$(CHANNEL) PP") + field(DO0, 0) + field(SELM, "Specified") + field(SELL, "$(INSTR)$(NAME):M$(CHANNEL).VAL") +} + +# Current Status of Channel, i.e. is it ready to count? +record(bi, "$(INSTR)$(NAME):S$(CHANNEL)") +{ + field(DESC, "Channel Status") + field(VAL, 0) + field(ZNAM, "OK") + field(ONAM, "CLEARING") +} ################################################################################ # Count Commands -# # Unfortunately, clearing the channels is somewhat complicated as a result of -# # the addition of more channels over time and minimal changes to the underlying interface -# # -# # Urs Greuter provided the following explanation: -# # -# # bei den Befehlen CC r und HC r ist der Parameter r als bit-Maske zu verstehen: -# # -# # Bit0: Zähler Channel 1 -# # Bit2: Zähler Channel 2 -# # Bit3: Zähler Channel 3 -# # Bit4: Zähler Channel 4 -# # Bit5: Zähler Channel Timer -# # Bit6: Zähler Channel 5 -# # Bit7: Zähler Channel 6 -# # Bit8: Zähler Channel 7 -# # Bit9: Zähler Channel 8 -# # -# # Beispiele: -# # CC 1 setzt den Zähler des Channels 1 zurück -# # CC 4 setzt den Zähler des Channels 3 zurück -# # CC 5 setzt gleichzeitig die Zähler der Channels 1 und 3 zurück -# # CC 16 ist gleichbedeutend wie CT (Timer zurücksetzen) -# # CC 511 setzt gleichzeitig die Zähler aller Kanäle (auch des Timers) zurück. -# -# record(calc, "$(INSTR)$(NAME):BM$(CHANNEL)") -# { -# field(DESC, "Bit Mask for Channel") -# field(INPA, $(CHANNEL)) -# field(CALC, "A > 4 ? 2 ^ A : 2 ^ (A-1)") -# field(PINI, "YES") -# } -# -# record(longout, "$(INSTR)$(NAME):C$(CHANNEL)") -# { -# field(DESC, "Clear the current channel count") -# field(DTYP, "stream") -# field(OMSL, "closed_loop") -# field(DOL, "$(INSTR)$(NAME):BM$(CHANNEL) NPP") -# field(OUT, "@... clearChannel($(INSTR)$(NAME):) $(PORT)") -# field(FLNK, "$(INSTR)$(NAME):T$(CHANNEL)") -# } -# -# record(ao,"$(INSTR)$(NAME):THRESH$(CHANNEL)") -# { -# field(DESC, "Sets min rate for counting to proceed") -# field(OMSL, "supervisory") -# field(OROC, "0") -# field(OUT, "@... setMinRate($(INSTR)$(NAME):, $(CHANNEL)) $(PORT)") -# field(DTYP, "stream") -# } +record(longout, "$(INSTR)$(NAME):C$(CHANNEL)") +{ + field(DESC, "Clear the current channel count") + field(DTYP, "asynInt32") + field(OUT, "@asyn($(PORT),0,$(TIMEOUT=1)) C_$(CHANNEL)") + field(FLNK, "$(INSTR)$(NAME):T$(CHANNEL)") +} ################################################################################ # Read all monitors values diff --git a/db/daq_common.db b/db/daq_common.db index 7c91111..96a1cd3 100644 --- a/db/daq_common.db +++ b/db/daq_common.db @@ -50,32 +50,32 @@ record(longin, "$(INSTR)$(NAME):CHANNELS") field(DISP, 1) } -# # Trigger a change in status as clearing -# record(bo, "$(INSTR)$(NAME):ETT") -# { -# field(DESC, "Trigger Clearing Status") -# field(VAL, 1) -# field(OUT, "$(INSTR)$(NAME):ETS PP") -# } -# -# # Trigger a change in status as value returned to 0 -# record(seq, "$(INSTR)$(NAME):ETO") -# { -# field(DESC, "Trigger Returned to 0 Status") -# field(LNK0, "$(INSTR)$(NAME):ETS PP") -# field(DO0, 0) -# field(SELM, "Specified") -# field(SELL, "$(INSTR)$(NAME):ELAPSED-TIME.VAL") -# } -# -# # Current Status of Channel, i.e. is it ready to count? -# record(bi, "$(INSTR)$(NAME):ETS") -# { -# field(DESC, "Channel Status") -# field(VAL, 0) -# field(ZNAM, "OK") -# field(ONAM, "CLEARING") -# } +# Trigger a change in status as clearing +record(bo, "$(INSTR)$(NAME):ETT") +{ + field(DESC, "Trigger Clearing Status") + field(VAL, 1) + field(OUT, "$(INSTR)$(NAME):ETS PP") +} + +# Trigger a change in status as value returned to 0 +record(seq, "$(INSTR)$(NAME):ETO") +{ + field(DESC, "Trigger Returned to 0 Status") + field(LNK0, "$(INSTR)$(NAME):ETS PP") + field(DO0, 0) + field(SELM, "Specified") + field(SELL, "$(INSTR)$(NAME):ELAPSED-TIME.VAL") +} + +# Current Status of Channel, i.e. is it ready to count? +record(bi, "$(INSTR)$(NAME):ETS") +{ + field(DESC, "Channel Status") + field(VAL, 0) + field(ZNAM, "OK") + field(ONAM, "CLEARING") +} ################################################################################ # Count Commands @@ -116,14 +116,13 @@ record(ao,"$(INSTR)$(NAME):PRESET-TIME") # field(VAL, "0") # field(FLNK, "$(INSTR)$(NAME):RAW-STATUS") # } -# -# record(longout, "$(INSTR)$(NAME):STOP") -# { -# field(DESC, "Stop the current counting operation") -# field(DTYP, "stream") -# field(OUT, "@... stopCount($(INSTR)$(NAME):) $(PORT)") -# field(FLNK, "$(INSTR)$(NAME):RAW-STATUS") -# } + +record(longout, "$(INSTR)$(NAME):STOP") +{ + field(DESC, "Stop the current counting operation") + field(DTYP, "asynInt32") + field(OUT, "@asyn($(PORT),0,$(TIMEOUT=1)) STOP") +} record(longout, "$(INSTR)$(NAME):MONITOR-CHANNEL") { @@ -143,62 +142,53 @@ record(longin, "$(INSTR)$(NAME):MONITOR-CHANNEL_RBV") field(PINI, "YES") } -# record(calc, "$(INSTR)$(NAME):RATE_MAP") -# { -# field(DESC, "Want a consistent lowrate pv") -# field(INPA, "$(INSTR)$(NAME):RAW-STATUS.B2 NPP") -# field(CALC, "(A=1)?1:0") -# } +record(ao,"$(INSTR)$(NAME):THRESHOLD") +{ + field(DESC, "Minimum rate for counting to proceed") + field(DTYP, "asynInt32") + field(OUT, "@asyn($(PORT),0,$(TIMEOUT=1)) THRESH") + field(VAL, "1") # Default Rate + field(DRVL, "1") # Minimum Rate + field(DRVH, "100000") # Maximum Rate +} -# -# record(ao,"$(INSTR)$(NAME):THRESHOLD") -# { -# field(DESC, "Minimum rate for counting to proceed") -# field(VAL, "1") # Default Rate -# # Could perhaps still be improved. -# # It seems to only accept whole counts? -# field(DRVL, "1") # Minimum Rate -# field(DRVH, "100000") # Maximum Rate -# field(OMSL, "supervisory") -# field(OROC, "0") -# field(OUT, "$(INSTR)$(NAME):THRESHOLD-F PP") -# } -# -# record(ai,"$(INSTR)$(NAME):THRESHOLD_RBV") -# { -# field(DESC, "Minimum rate for counting to proceed") -# field(EGU, "cts/sec") -# field(INP, "@... readMinRate($(INSTR)$(NAME):) $(PORT)") -# field(DTYP, "stream") -# field(SCAN, "1 second") -# } -# -# record(longout,"$(INSTR)$(NAME):THRESHOLD-MONITOR") -# { -# field(DESC, "Channel monitored for minimum rate") -# field(VAL, "1") # Monitor -# field(DRVL, "0") # Smallest Threshold Channel (0 is off) -# field(DRVH, "$(CHANNELS)") # Largest Threshold Channel -# field(OUT, "@... setRateMonitor($(INSTR)$(NAME):) $(PORT)") -# field(DTYP, "stream") -# } -# -# record(longin,"$(INSTR)$(NAME):THRESHOLD-MONITOR_RBV") -# { -# field(DESC, "Channel monitored for minimum rate") -# field(EGU, "CH") -# field(INP, "@... readRateMonitor($(INSTR)$(NAME):) $(PORT)") -# field(DTYP, "stream") -# field(SCAN, "1 second") -# } -# -# record(longout, "$(INSTR)$(NAME):CT") -# { -# field(DESC, "Clear the timer") -# field(DTYP, "stream") -# field(OUT, "@... clearTimer($(INSTR)$(NAME):) $(PORT)") -# field(FLNK, "$(INSTR)$(NAME):ETT") -# } +record(ai,"$(INSTR)$(NAME):THRESHOLD_RBV") +{ + field(DESC, "Minimum rate for counting to proceed") + field(EGU, "cts/sec") + field(DTYP, "asynInt32") + field(INP, "@asyn($(PORT),0,$(TIMEOUT=1)) THRESH") + field(SCAN, "I/O Intr") + field(PINI, "YES") +} + +record(longout,"$(INSTR)$(NAME):THRESHOLD-MONITOR") +{ + field(DESC, "Channel monitored for minimum rate") + field(DTYP, "asynInt32") + field(OUT, "@asyn($(PORT),0,$(TIMEOUT=1)) THRESH_CH") + field(VAL, "1") # Monitor + field(DRVL, "0") # Smallest Threshold Channel (0 is off) + field(DRVH, "$(CHANNELS)") # Largest Threshold Channel +} + +record(longin,"$(INSTR)$(NAME):THRESHOLD-MONITOR_RBV") +{ + field(DESC, "Channel monitored for minimum rate") + field(EGU, "CH") + field(DTYP, "asynInt32") + field(INP, "@asyn($(PORT),0,$(TIMEOUT=1)) THRESH_CH") + field(SCAN, "I/O Intr") + field(PINI, "YES") +} + +record(longout, "$(INSTR)$(NAME):CT") +{ + field(DESC, "Clear the timer") + field(DTYP, "asynInt32") + field(OUT, "@asyn($(PORT),0,$(TIMEOUT=1)) C_TIME") + field(FLNK, "$(INSTR)$(NAME):ETT") +} ################################################################################ # Read all monitors values diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index 0414a0e..b9f28a2 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -99,6 +99,10 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, createParam(P_ResetString, asynParamInt32, &P_Reset)); status = (asynStatus)(status | setIntegerParam(P_Reset, 0)); + status = (asynStatus)(status | + createParam(P_StopString, asynParamInt32, &P_Stop)); + status = (asynStatus)(status | setIntegerParam(P_Stop, 0)); + status = (asynStatus)(status | createParam(P_CountPresetString, asynParamInt32, &P_CountPreset)); status = (asynStatus)(status | setIntegerParam(P_CountPreset, 0)); @@ -111,15 +115,32 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, asynParamInt32, &P_ElapsedTime)); status = (asynStatus)(status | setIntegerParam(P_ElapsedTime, 0)); + status = (asynStatus)(status | createParam(P_ClearElapsedTimeString, + asynParamInt32, &P_ClearElapsedTime)); + status = (asynStatus)(status | setIntegerParam(P_ClearElapsedTime, 0)); + status = (asynStatus)(status | createParam(P_MonitorChannelString, asynParamInt32, &P_MonitorChannel)); status = (asynStatus)(status | setIntegerParam(P_MonitorChannel, 0)); + status = + (asynStatus)(status | createParam(P_ThresholdString, + asynParamInt32, &P_Threshold)); + status = (asynStatus)(status | setIntegerParam(P_Threshold, 1)); + + status = + (asynStatus)(status | createParam(P_ThresholdChannelString, + asynParamInt32, &P_ThresholdChannel)); + status = (asynStatus)(status | setIntegerParam(P_ThresholdChannel, 1)); + + + // Create Parameters templated on Channel Number char pv_name_buffer[100]; P_Counts = new int[this->num_channels]; P_Rates = new int[this->num_channels]; + P_ClearCounts = new int[this->num_channels]; for (size_t i = 0; i < this->num_channels; ++i) { memset(pv_name_buffer, 0, 100); epicsSnprintf(pv_name_buffer, 100, P_CountsString, i); @@ -134,6 +155,13 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, (asynStatus)(status | createParam(pv_name_buffer, asynParamInt32, P_Rates + i)); status = (asynStatus)(status | setIntegerParam(P_Rates[i], 0)); + + memset(pv_name_buffer, 0, 100); + epicsSnprintf(pv_name_buffer, 100, P_ClearCountsString, i); + status = + (asynStatus)(status | createParam(pv_name_buffer, asynParamInt32, + P_ClearCounts + i)); + status = (asynStatus)(status | setIntegerParam(P_ClearCounts[i], 0)); } if (status) { @@ -145,8 +173,8 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, // Create Events this->pausedEventId = epicsEventCreate(epicsEventEmpty); - // this->monitorProducer = create_kafka_producer(); - // this->detectorProducer = create_kafka_producer(); + this->monitorProducer = create_kafka_producer(); + this->detectorProducer = create_kafka_producer(); // Setup for Thread Producing Monitor Kafka Events status = diff --git a/src/asynStreamGeneratorDriver.h b/src/asynStreamGeneratorDriver.h index f770cf1..7904a24 100644 --- a/src/asynStreamGeneratorDriver.h +++ b/src/asynStreamGeneratorDriver.h @@ -82,13 +82,18 @@ struct __attribute__((__packed__)) NormalisedDetectorEvent { #define P_StatusString "STATUS" #define P_ResetString "RESET" +#define P_StopString "STOP" #define P_CountPresetString "P_CNT" #define P_TimePresetString "P_TIME" #define P_ElapsedTimeString "TIME" +#define P_ClearElapsedTimeString "C_TIME" #define P_MonitorChannelString "MONITOR" +#define P_ThresholdString "THRESH" +#define P_ThresholdChannelString "THRESH_CH" #define P_CountsString "COUNTS%d" #define P_RateString "RATE%d" +#define P_ClearCountsString "C_%d" /******************************************************************************* * Stream Generator Coordinating Class @@ -109,12 +114,17 @@ class asynStreamGeneratorDriver : public asynPortDriver { // Parameter Identifying IDs int P_Status; int P_Reset; + int P_Stop; int P_CountPreset; int P_TimePreset; int P_ElapsedTime; + int P_ClearElapsedTime; int P_MonitorChannel; + int P_Threshold; + int P_ThresholdChannel; int *P_Counts; int *P_Rates; + int *P_ClearCounts; private: asynUser *pasynUDPUser; From e65725609ccef7f973d2e482fa51f9e6eaa84b09 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Mon, 3 Nov 2025 13:29:01 +0100 Subject: [PATCH 13/35] moves more options to ioc function --- scripts/st.cmd | 2 +- src/asynStreamGeneratorDriver.cpp | 131 +++++++++++++----------------- src/asynStreamGeneratorDriver.h | 9 +- 3 files changed, 63 insertions(+), 79 deletions(-) diff --git a/scripts/st.cmd b/scripts/st.cmd index d8fe540..463962b 100755 --- a/scripts/st.cmd +++ b/scripts/st.cmd @@ -9,7 +9,7 @@ epicsEnvSet("INSTR", "SQ:TEST:") epicsEnvSet("NAME", "SG") drvAsynIPPortConfigure("ASYN_IP_PORT", "127.0.0.1:9071:54321 UDP", 0, 0, 0) -asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4) +asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 1000, 8192) dbLoadRecords("$(StreamGenerator_DB)daq_common.db", "INSTR=$(INSTR), NAME=$(NAME), PORT=ASYN_SG, CHANNELS=5") diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index b9f28a2..c2309c1 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -67,12 +67,23 @@ static void detectorProducerTask(void *drvPvt) { pSGD->produceDetector(); } +/******************************************************************************* + * Stream Generator Helper Methods + */ + +asynStatus asynStreamGeneratorDriver::createInt32Param( + // TODO should show error if there is one + asynStatus status, char *name, int *variable, epicsInt32 initialValue) { + return (asynStatus)(status | createParam(name, asynParamInt32, variable) | + setIntegerParam(*variable, initialValue)); +} + /******************************************************************************* * Stream Generator Methods */ -asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, - const char *ipPortName, - const int numChannels) +asynStreamGeneratorDriver::asynStreamGeneratorDriver( + const char *portName, const char *ipPortName, const int numChannels, + const int kafkaQueueSize, const int kafkaMaxPacketSize) : asynPortDriver(portName, 1, /* maxAddr */ asynInt32Mask | asynInt64Mask | asynDrvUserMask, /* Interface mask */ @@ -84,57 +95,27 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, 1, /* Autoconnect */ 0, /* Default priority */ 0), /* Default stack size*/ - num_channels(numChannels + 1), monitorQueue(1000, false), - detectorQueue(1000, false) { + num_channels(numChannels + 1), monitorQueue(kafkaQueueSize, false), + detectorQueue(kafkaQueueSize, false), + kafkaMaxPacketSize(kafkaMaxPacketSize) { const char *functionName = "asynStreamGeneratorDriver"; // Parameter Setup asynStatus status = asynSuccess; - status = (asynStatus)(status | createParam(P_StatusString, asynParamInt32, - &P_Status)); - status = (asynStatus)(status | setIntegerParam(P_Status, STATUS_IDLE)); - - status = (asynStatus)(status | - createParam(P_ResetString, asynParamInt32, &P_Reset)); - status = (asynStatus)(status | setIntegerParam(P_Reset, 0)); - - status = (asynStatus)(status | - createParam(P_StopString, asynParamInt32, &P_Stop)); - status = (asynStatus)(status | setIntegerParam(P_Stop, 0)); - - status = (asynStatus)(status | createParam(P_CountPresetString, - asynParamInt32, &P_CountPreset)); - status = (asynStatus)(status | setIntegerParam(P_CountPreset, 0)); - - status = (asynStatus)(status | createParam(P_TimePresetString, - asynParamInt32, &P_TimePreset)); - status = (asynStatus)(status | setIntegerParam(P_TimePreset, 0)); - - status = (asynStatus)(status | createParam(P_ElapsedTimeString, - asynParamInt32, &P_ElapsedTime)); - status = (asynStatus)(status | setIntegerParam(P_ElapsedTime, 0)); - - status = (asynStatus)(status | createParam(P_ClearElapsedTimeString, - asynParamInt32, &P_ClearElapsedTime)); - status = (asynStatus)(status | setIntegerParam(P_ClearElapsedTime, 0)); - + status = createInt32Param(status, P_StatusString, &P_Status, STATUS_IDLE); + status = createInt32Param(status, P_ResetString, &P_Reset); + status = createInt32Param(status, P_StopString, &P_Stop); + status = createInt32Param(status, P_CountPresetString, &P_CountPreset); + status = createInt32Param(status, P_TimePresetString, &P_TimePreset); + status = createInt32Param(status, P_ElapsedTimeString, &P_ElapsedTime); status = - (asynStatus)(status | createParam(P_MonitorChannelString, - asynParamInt32, &P_MonitorChannel)); - status = (asynStatus)(status | setIntegerParam(P_MonitorChannel, 0)); - + createInt32Param(status, P_ClearElapsedTimeString, &P_ClearElapsedTime); status = - (asynStatus)(status | createParam(P_ThresholdString, - asynParamInt32, &P_Threshold)); - status = (asynStatus)(status | setIntegerParam(P_Threshold, 1)); - - status = - (asynStatus)(status | createParam(P_ThresholdChannelString, - asynParamInt32, &P_ThresholdChannel)); - status = (asynStatus)(status | setIntegerParam(P_ThresholdChannel, 1)); - - + createInt32Param(status, P_MonitorChannelString, &P_MonitorChannel); + status = createInt32Param(status, P_ThresholdString, &P_Threshold, 1); + status = createInt32Param(status, P_ThresholdChannelString, + &P_ThresholdChannel, 1); // Create Parameters templated on Channel Number char pv_name_buffer[100]; @@ -144,24 +125,15 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(const char *portName, for (size_t i = 0; i < this->num_channels; ++i) { memset(pv_name_buffer, 0, 100); epicsSnprintf(pv_name_buffer, 100, P_CountsString, i); - status = - (asynStatus)(status | createParam(pv_name_buffer, asynParamInt32, - P_Counts + i)); - status = (asynStatus)(status | setIntegerParam(P_Counts[i], 0)); + status = createInt32Param(status, pv_name_buffer, P_Counts + i); memset(pv_name_buffer, 0, 100); epicsSnprintf(pv_name_buffer, 100, P_RateString, i); - status = - (asynStatus)(status | createParam(pv_name_buffer, asynParamInt32, - P_Rates + i)); - status = (asynStatus)(status | setIntegerParam(P_Rates[i], 0)); + status = createInt32Param(status, pv_name_buffer, P_Rates + i); memset(pv_name_buffer, 0, 100); epicsSnprintf(pv_name_buffer, 100, P_ClearCountsString, i); - status = - (asynStatus)(status | createParam(pv_name_buffer, asynParamInt32, - P_ClearCounts + i)); - status = (asynStatus)(status | setIntegerParam(P_ClearCounts[i], 0)); + status = createInt32Param(status, pv_name_buffer, P_ClearCounts + i); } if (status) { @@ -251,13 +223,13 @@ asynStatus asynStreamGeneratorDriver::writeInt32(asynUser *pasynUser, // } if (function == P_CountPreset) { - // TODO should block setting a preset when already set + // TODO should block setting a preset when already set setIntegerParam(function, value); setIntegerParam(P_Status, STATUS_COUNTING); status = (asynStatus)callParamCallbacks(); epicsEventSignal(this->pausedEventId); } else if (function == P_TimePreset) { - // TODO should block setting a preset when already set + // TODO should block setting a preset when already set setIntegerParam(function, value); setIntegerParam(P_Status, STATUS_COUNTING); status = (asynStatus)callParamCallbacks(); @@ -331,7 +303,7 @@ void asynStreamGeneratorDriver::receiveUDP() { start_time = std::numeric_limits::max(); current_time = 0; - elapsedTime = 0; + elapsedTime = 0; lock(); for (size_t i = 0; i < num_channels; ++i) { @@ -423,7 +395,7 @@ void asynStreamGeneratorDriver::receiveUDP() { for (size_t i = 0; i < num_channels; ++i) { setIntegerParam(P_Counts[i], counts[i]); } - elapsedTime = current_time - start_time; + elapsedTime = current_time - start_time; setIntegerParam(P_ElapsedTime, elapsedTime); callParamCallbacks(); unlock(); @@ -433,7 +405,8 @@ void asynStreamGeneratorDriver::receiveUDP() { functionName); } - if ((countPreset && counts[presetChannel] >= countPreset) || (timePreset && elapsedTime >= timePreset)) { + if ((countPreset && counts[presetChannel] >= countPreset) || + (timePreset && elapsedTime >= timePreset)) { lock(); setIntegerParam(P_Status, STATUS_IDLE); setIntegerParam(P_CountPreset, 0); @@ -452,10 +425,10 @@ void asynStreamGeneratorDriver::produceMonitor() { flatbuffers::FlatBufferBuilder builder(1024); std::vector tof; - tof.reserve(9000); + tof.reserve(this->kafkaMaxPacketSize + 16); std::vector did; - did.reserve(9000); + did.reserve(this->kafkaMaxPacketSize + 16); int total = 0; epicsTimeStamp last_sent = epicsTime::getCurrent(); @@ -481,7 +454,7 @@ void asynStreamGeneratorDriver::produceMonitor() { epicsTimeStamp now = epicsTime::getCurrent(); // At least every 0.2 seconds - if (total >= 8192 || + if (total >= this->kafkaMaxPacketSize || epicsTimeDiffInNS(&now, &last_sent) > 200'000'000ll) { last_sent = epicsTime::getCurrent(); @@ -524,7 +497,7 @@ void asynStreamGeneratorDriver::produceMonitor() { void asynStreamGeneratorDriver::produceDetector() { - static const size_t bufferSize = 9000; + static const size_t bufferSize = this->kafkaMaxPacketSize + 16; flatbuffers::FlatBufferBuilder builder(1024); std::vector tof; @@ -573,7 +546,7 @@ void asynStreamGeneratorDriver::produceDetector() { } while (!timeQueue.empty() && - (timeQueue.size() >= 8192 || + (timeQueue.size() >= this->kafkaMaxPacketSize || (newest - timeQueue.top()) > 5'000'000'000ull)) timeQueue.pop(); epicsInt32 rate = 0; @@ -590,7 +563,7 @@ void asynStreamGeneratorDriver::produceDetector() { epicsTimeStamp now = epicsTime::getCurrent(); // At least every 0.2 seconds - if (total >= 8192 || + if (total >= this->kafkaMaxPacketSize || epicsTimeDiffInNS(&now, &last_sent) > 200'000'000ll) { last_sent = epicsTime::getCurrent(); @@ -638,19 +611,25 @@ extern "C" { asynStatus asynStreamGeneratorDriverConfigure(const char *portName, const char *ipPortName, - const int numChannels) { - new asynStreamGeneratorDriver(portName, ipPortName, numChannels); + const int numChannels, + const int kafkaQueueSize, + const int kafkaMaxPacketSize) { + new asynStreamGeneratorDriver(portName, ipPortName, numChannels, + kafkaQueueSize, kafkaMaxPacketSize); return asynSuccess; } static const iocshArg initArg0 = {"portName", iocshArgString}; static const iocshArg initArg1 = {"ipPortName", iocshArgString}; static const iocshArg initArg2 = {"numChannels", iocshArgInt}; -static const iocshArg *const initArgs[] = {&initArg0, &initArg1, &initArg2}; -static const iocshFuncDef initFuncDef = {"asynStreamGenerator", 3, initArgs}; +static const iocshArg initArg3 = {"kafkaQueueSize", iocshArgInt}; +static const iocshArg initArg4 = {"kafkaMaxPacketSize", iocshArgInt}; +static const iocshArg *const initArgs[] = {&initArg0, &initArg1, &initArg2, + &initArg3, &initArg4}; +static const iocshFuncDef initFuncDef = {"asynStreamGenerator", 5, initArgs}; static void initCallFunc(const iocshArgBuf *args) { - asynStreamGeneratorDriverConfigure(args[0].sval, args[1].sval, - args[2].ival); + asynStreamGeneratorDriverConfigure(args[0].sval, args[1].sval, args[2].ival, + args[3].ival, args[4].ival); } void asynStreamGeneratorDriverRegister(void) { diff --git a/src/asynStreamGeneratorDriver.h b/src/asynStreamGeneratorDriver.h index 7904a24..d3b6002 100644 --- a/src/asynStreamGeneratorDriver.h +++ b/src/asynStreamGeneratorDriver.h @@ -101,7 +101,8 @@ struct __attribute__((__packed__)) NormalisedDetectorEvent { class asynStreamGeneratorDriver : public asynPortDriver { public: asynStreamGeneratorDriver(const char *portName, const char *ipPortName, - const int numChannels); + const int numChannels, const int kafkaQueueSize, + const int kafkaMaxPacketSize); virtual ~asynStreamGeneratorDriver(); virtual asynStatus writeInt32(asynUser *pasynUser, epicsInt32 value); @@ -130,7 +131,8 @@ class asynStreamGeneratorDriver : public asynPortDriver { asynUser *pasynUDPUser; epicsEventId pausedEventId; - int num_channels; + const int num_channels; + const int kafkaMaxPacketSize; epicsRingPointer monitorQueue; rd_kafka_t *monitorProducer; @@ -139,6 +141,9 @@ class asynStreamGeneratorDriver : public asynPortDriver { rd_kafka_t *detectorProducer; constexpr static char *driverName = "StreamGenerator"; + + asynStatus createInt32Param(asynStatus status, char *name, int *variable, + epicsInt32 initialValue = 0); }; #endif From 81bd3bef7f1b1a6500322069d820673532fd1515 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Mon, 3 Nov 2025 17:31:16 +0100 Subject: [PATCH 14/35] working on correcting the ordering of the messages --- scripts/st.cmd | 4 +- scripts/udp_gen.py | 13 +- src/asynStreamGeneratorDriver.cpp | 278 +++++++++++++++--------------- src/asynStreamGeneratorDriver.h | 15 +- 4 files changed, 160 insertions(+), 150 deletions(-) diff --git a/scripts/st.cmd b/scripts/st.cmd index 463962b..3282bb4 100755 --- a/scripts/st.cmd +++ b/scripts/st.cmd @@ -8,8 +8,8 @@ require StreamGenerator, test epicsEnvSet("INSTR", "SQ:TEST:") epicsEnvSet("NAME", "SG") -drvAsynIPPortConfigure("ASYN_IP_PORT", "127.0.0.1:9071:54321 UDP", 0, 0, 0) -asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 1000, 8192) +drvAsynIPPortConfigure("ASYN_IP_PORT", "127.0.0.1:9071:54321 UDP", 0, 0, 1) +asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 10000, 1000, 8192) dbLoadRecords("$(StreamGenerator_DB)daq_common.db", "INSTR=$(INSTR), NAME=$(NAME), PORT=ASYN_SG, CHANNELS=5") diff --git a/scripts/udp_gen.py b/scripts/udp_gen.py index fcd19fd..297e31b 100644 --- a/scripts/udp_gen.py +++ b/scripts/udp_gen.py @@ -45,16 +45,15 @@ while True: header[16] = t_high & 0xff header[17] = t_high >> 8 - # num_events = random.randint(0, 243) - num_events = 243 + num_events = random.randint(0, 243) + # num_events = 243 + # num_events = 1 # update buffer length buffer_length = 21 + num_events * 3 header[0] = buffer_length & 0xff header[1] = (buffer_length >> 8) & 0xff - tosend = list(header) - # I believe, that in our case we never mix monitor and detector events as # the monitors should have id 0 and the detector events 1-9 so I have # excluded that posibility here. That would, however, if true mean we could @@ -62,6 +61,10 @@ while True: is_monitor = random.randint(0, 9) + header[11] = 0 if is_monitor > 3 else random.randint(1,9) + + tosend = list(header) + if is_monitor > 3: for i in range(num_events): @@ -103,4 +106,4 @@ while True: sock.sendto(bytes(tosend), ('127.0.0.1', 54321)) mv = memoryview(bytes(header)).cast('H') print(f'Sent packet {mv[3]} with {num_events} events {base_timestamp}') - # time.sleep(0.0005) + # time.sleep(0.5) diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index c2309c1..2864e57 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -57,6 +57,11 @@ static void udpPollerTask(void *drvPvt) { pSGD->receiveUDP(); } +static void daqTask(void *drvPvt) { + asynStreamGeneratorDriver *pSGD = (asynStreamGeneratorDriver *)drvPvt; + pSGD->processEvents(); +} + static void monitorProducerTask(void *drvPvt) { asynStreamGeneratorDriver *pSGD = (asynStreamGeneratorDriver *)drvPvt; pSGD->produceMonitor(); @@ -83,7 +88,8 @@ asynStatus asynStreamGeneratorDriver::createInt32Param( */ asynStreamGeneratorDriver::asynStreamGeneratorDriver( const char *portName, const char *ipPortName, const int numChannels, - const int kafkaQueueSize, const int kafkaMaxPacketSize) + const int udpQueueSize, const int kafkaQueueSize, + const int kafkaMaxPacketSize) : asynPortDriver(portName, 1, /* maxAddr */ asynInt32Mask | asynInt64Mask | asynDrvUserMask, /* Interface mask */ @@ -95,8 +101,8 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( 1, /* Autoconnect */ 0, /* Default priority */ 0), /* Default stack size*/ - num_channels(numChannels + 1), monitorQueue(kafkaQueueSize, false), - detectorQueue(kafkaQueueSize, false), + num_channels(numChannels + 1), udpQueue(udpQueueSize, false), + monitorQueue(kafkaQueueSize, false), detectorQueue(kafkaQueueSize, false), kafkaMaxPacketSize(kafkaMaxPacketSize) { const char *functionName = "asynStreamGeneratorDriver"; @@ -122,7 +128,7 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( P_Counts = new int[this->num_channels]; P_Rates = new int[this->num_channels]; P_ClearCounts = new int[this->num_channels]; - for (size_t i = 0; i < this->num_channels; ++i) { + for (std::size_t i = 0; i < this->num_channels; ++i) { memset(pv_name_buffer, 0, 100); epicsSnprintf(pv_name_buffer, 100, P_CountsString, i); status = createInt32Param(status, pv_name_buffer, P_Counts + i); @@ -145,27 +151,42 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( // Create Events this->pausedEventId = epicsEventCreate(epicsEventEmpty); - this->monitorProducer = create_kafka_producer(); - this->detectorProducer = create_kafka_producer(); + // TODO re-enable the kafka stuff + // this->monitorProducer = create_kafka_producer(); + // this->detectorProducer = create_kafka_producer(); - // Setup for Thread Producing Monitor Kafka Events - status = - (asynStatus)(epicsThreadCreate( - "monitor_produce", epicsThreadPriorityMedium, - epicsThreadGetStackSize(epicsThreadStackMedium), - (EPICSTHREADFUNC)::monitorProducerTask, this) == NULL); - if (status) { - printf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, - functionName, status); - exit(1); - } + // // Setup for Thread Producing Monitor Kafka Events + // status = + // (asynStatus)(epicsThreadCreate( + // "monitor_produce", epicsThreadPriorityMedium, + // epicsThreadGetStackSize(epicsThreadStackMedium), + // (EPICSTHREADFUNC)::monitorProducerTask, this) == + // NULL); + // if (status) { + // printf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, + // functionName, status); + // exit(1); + // } - // Setup for Thread Producing Detector Kafka Events + // // Setup for Thread Producing Detector Kafka Events + // status = (asynStatus)(epicsThreadCreate( + // "monitor_produce", epicsThreadPriorityMedium, + // epicsThreadGetStackSize(epicsThreadStackMedium), + // (EPICSTHREADFUNC)::detectorProducerTask, + // this) == NULL); + // if (status) { + // printf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, + // functionName, status); + // exit(1); + // } + // TODO re-enable the kafka stuff + + /* Create the thread that orders the events and acts as our sinqDaq stand-in + */ status = (asynStatus)(epicsThreadCreate( - "monitor_produce", epicsThreadPriorityMedium, + "sinqDAQ", epicsThreadPriorityMedium, epicsThreadGetStackSize(epicsThreadStackMedium), - (EPICSTHREADFUNC)::detectorProducerTask, - this) == NULL); + (EPICSTHREADFUNC)::daqTask, this) == NULL); if (status) { printf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, functionName, status); @@ -258,165 +279,134 @@ asynStatus asynStreamGeneratorDriver::writeInt32(asynUser *pasynUser, return status; } -// TODO probably I will have to split this function up, so that the system -// can process the UDP messages in parallel void asynStreamGeneratorDriver::receiveUDP() { - asynStatus status; - int isConnected; - - const size_t buffer_size = 1500; - char buffer[buffer_size]; - size_t received; - int eomReason; - - epicsInt32 val; - epicsInt32 currentStatus; - epicsInt32 countPreset = 0; - epicsInt32 timePreset = 0; - epicsInt32 presetChannel = 0; const char *functionName = "receiveUDP"; + asynStatus status = asynSuccess; + int isConnected = 1; + std::size_t received; + int eomReason; - // TODO epics doesn't seem to support uint64, you would need an array of - // uint32. It does support int64 though.. so we start with that - epicsInt32 *counts = new epicsInt32[this->num_channels]; - - uint64_t start_time = std::numeric_limits::max(); - uint64_t current_time = 0; - epicsInt32 elapsedTime = 0; + // The correlation unit sents messages with a maximum size of 1500 bytes. + // These messages don't have any obious start or end to synchronise + // against... + const std::size_t bufferSize = 1500; + char buffer[bufferSize + 1]; // so that \0 can fit while (true) { - status = getIntegerParam(this->P_Status, ¤tStatus); - if (!currentStatus || status) { - - epicsEventWait(this->pausedEventId); - - getIntegerParam(this->P_CountPreset, &countPreset); - getIntegerParam(this->P_TimePreset, &timePreset); - getIntegerParam(this->P_MonitorChannel, &presetChannel); - - // memset doesn't work with epicsInt32 - for (size_t i = 0; i < this->num_channels; ++i) { - counts[i] = 0; - } - - start_time = std::numeric_limits::max(); - current_time = 0; - elapsedTime = 0; - - lock(); - for (size_t i = 0; i < num_channels; ++i) { - setIntegerParam(P_Counts[i], counts[i]); - } - setIntegerParam(P_ElapsedTime, 0); - callParamCallbacks(); - unlock(); - - // Clear the input buffer, in case of stray messages - pasynOctetSyncIO->flush(pasynUDPUser); - } - status = pasynManager->isConnected(pasynUDPUser, &isConnected); + if (!isConnected) asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, "%s:%s: isConnected = %d\n", driverName, functionName, isConnected); - status = pasynOctetSyncIO->read(pasynUDPUser, buffer, buffer_size, + status = pasynOctetSyncIO->read(pasynUDPUser, buffer, bufferSize, 0, // timeout &received, &eomReason); if (received) { + UDPHeader *header = (UDPHeader *)buffer; - size_t total_events = (header->BufferLength - 21) / 3; + std::size_t total_events = (header->BufferLength - 21) / 3; - start_time = - std::min(start_time, (uint64_t)(header->nanosecs() / 1e9)); - // This is maybe safer, in case the time wraps back around? - // if (start_time == std::numeric_limits::max()) - // start_time = header->nanosecs() /1e9; - - // TODO lots of checks and validation missing everywhere here if (received == total_events * 6 + 42) { - // asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, - // "%s:%s: received packet %d with %d events (%" - // PRIu64 - // ")\n", - // driverName, functionName, - // header->BufferNumber, total_events, - // header->nanosecs()); - for (size_t i = 0; i < total_events; ++i) { + for (std::size_t i = 0; i < total_events; ++i) { char *event = (buffer + 21 * 2 + i * 6); - if (countPreset && counts[presetChannel] >= countPreset) - break; + NormalisedEvent *ne; if (event[5] & 0x80) { // Monitor Event MonitorEvent *m_event = (MonitorEvent *)event; - counts[m_event->DataID + 1] += 1; - // needs to be freed!!! - auto nme = new NormalisedMonitorEvent(); - nme->TimeStamp = - header->nanosecs() + (uint64_t)m_event->nanosecs(); - nme->DataID = m_event->DataID; - this->monitorQueue.push(nme); - - current_time = std::max( - current_time, - (uint64_t)((header->nanosecs() + - (uint64_t)m_event->nanosecs()) / - 1e9)); + ne = new NormalisedEvent( + header->nanosecs() + (uint64_t)m_event->nanosecs(), + 0, m_event->DataID); } else { // Detector Event DetectorEvent *d_event = (DetectorEvent *)event; - counts[0] += 1; // needs to be freed!!! - auto nde = new NormalisedDetectorEvent(); - nde->TimeStamp = - header->nanosecs() + (uint64_t)d_event->nanosecs(); - nde->PixID = d_event->pixelId(header->McpdID); - this->detectorQueue.push(nde); - - current_time = std::max( - current_time, - (uint64_t)((header->nanosecs() + - (uint64_t)d_event->nanosecs()) / - 1e9)); + ne = new NormalisedEvent( + header->nanosecs() + (uint64_t)d_event->nanosecs(), + header->McpdID, d_event->pixelId(header->McpdID)); } + + this->udpQueue.push(ne); } - lock(); - for (size_t i = 0; i < num_channels; ++i) { - setIntegerParam(P_Counts[i], counts[i]); - } - elapsedTime = current_time - start_time; - setIntegerParam(P_ElapsedTime, elapsedTime); - callParamCallbacks(); - unlock(); } else { asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, "%s:%s: invalid UDP packet\n", driverName, functionName); } + } + } +} - if ((countPreset && counts[presetChannel] >= countPreset) || - (timePreset && elapsedTime >= timePreset)) { - lock(); - setIntegerParam(P_Status, STATUS_IDLE); - setIntegerParam(P_CountPreset, 0); - setIntegerParam(P_TimePreset, 0); - callParamCallbacks(); - unlock(); - } +void asynStreamGeneratorDriver::processEvents() { + + const char *functionName = "processEvents"; + + const size_t queueBufferSize = 10 * this->udpQueue.getSize(); + + struct { + bool operator()(const NormalisedEvent *l, + const NormalisedEvent *r) const { + return l->timestamp > r->timestamp; + } + } smallestToLargest; + + // This should never be used. It is just instantiated to reserve a buffer + // of specific size. + std::vector queueBuffer; + queueBuffer.reserve(queueBufferSize); + + std::priority_queue, + decltype(smallestToLargest)> + timeQueue(smallestToLargest, std::move(queueBuffer)); + + NormalisedEvent *ne; + + uint64_t newest = 0; + + // TODO epics doesn't seem to support uint64, you would need an array of + // uint32. It does support int64 though.. so we start with that + epicsInt32 *counts = new epicsInt32[this->num_channels]; + + while (true) { + + if ((ne = this->udpQueue.pop()) != nullptr) { + // TODO overflow in the correlation unit? + newest = std::max(newest, ne->timestamp); + timeQueue.push(ne); } - // epicsThreadSleep(1); // seconds + // idea is to try and guarantee at least 1 packet per id or the min + // frequency for each id without actually checking all ids + if (timeQueue.size() >= 1500 * 10 || + (timeQueue.size() > 0 && + newest - timeQueue.top()->timestamp >= 200'000'000ull)) { + ne = timeQueue.top(); + timeQueue.pop(); + + counts[ne->source == 0 ? ne->pixelId + 1 : 0] += 1; + + delete ne; + + lock(); + for (size_t i = 0; i < num_channels; ++i) { + setIntegerParam(P_Counts[i], counts[i]); + } + // elapsedTime = current_time - start_time; + // setIntegerParam(P_ElapsedTime, elapsedTime); + callParamCallbacks(); + unlock(); + } } } @@ -497,7 +487,7 @@ void asynStreamGeneratorDriver::produceMonitor() { void asynStreamGeneratorDriver::produceDetector() { - static const size_t bufferSize = this->kafkaMaxPacketSize + 16; + static const std::size_t bufferSize = this->kafkaMaxPacketSize + 16; flatbuffers::FlatBufferBuilder builder(1024); std::vector tof; @@ -612,24 +602,28 @@ extern "C" { asynStatus asynStreamGeneratorDriverConfigure(const char *portName, const char *ipPortName, const int numChannels, + const int udpQueueSize, const int kafkaQueueSize, const int kafkaMaxPacketSize) { new asynStreamGeneratorDriver(portName, ipPortName, numChannels, - kafkaQueueSize, kafkaMaxPacketSize); + udpQueueSize, kafkaQueueSize, + kafkaMaxPacketSize); return asynSuccess; } static const iocshArg initArg0 = {"portName", iocshArgString}; static const iocshArg initArg1 = {"ipPortName", iocshArgString}; static const iocshArg initArg2 = {"numChannels", iocshArgInt}; -static const iocshArg initArg3 = {"kafkaQueueSize", iocshArgInt}; -static const iocshArg initArg4 = {"kafkaMaxPacketSize", iocshArgInt}; +static const iocshArg initArg3 = {"udpQueueSize", iocshArgInt}; +static const iocshArg initArg4 = {"kafkaQueueSize", iocshArgInt}; +static const iocshArg initArg5 = {"kafkaMaxPacketSize", iocshArgInt}; static const iocshArg *const initArgs[] = {&initArg0, &initArg1, &initArg2, - &initArg3, &initArg4}; -static const iocshFuncDef initFuncDef = {"asynStreamGenerator", 5, initArgs}; + &initArg3, &initArg4, &initArg5}; +static const iocshFuncDef initFuncDef = {"asynStreamGenerator", 6, initArgs}; static void initCallFunc(const iocshArgBuf *args) { asynStreamGeneratorDriverConfigure(args[0].sval, args[1].sval, args[2].ival, - args[3].ival, args[4].ival); + args[3].ival, args[4].ival, + args[5].ival); } void asynStreamGeneratorDriverRegister(void) { diff --git a/src/asynStreamGeneratorDriver.h b/src/asynStreamGeneratorDriver.h index d3b6002..6f8da61 100644 --- a/src/asynStreamGeneratorDriver.h +++ b/src/asynStreamGeneratorDriver.h @@ -66,6 +66,15 @@ struct __attribute__((__packed__)) NormalisedDetectorEvent { uint32_t PixID; }; +struct __attribute__((__packed__)) NormalisedEvent { + uint64_t timestamp; + uint8_t source; + uint32_t pixelId; + + inline NormalisedEvent(uint64_t timestamp, uint8_t source, uint32_t pixelId) + : timestamp(timestamp), source(source), pixelId(pixelId){}; +}; + /******************************************************************************* * Status values that should match the definition in db/daq_common.db */ @@ -101,13 +110,15 @@ struct __attribute__((__packed__)) NormalisedDetectorEvent { class asynStreamGeneratorDriver : public asynPortDriver { public: asynStreamGeneratorDriver(const char *portName, const char *ipPortName, - const int numChannels, const int kafkaQueueSize, + const int numChannels, const int udpQueueSize, + const int kafkaQueueSize, const int kafkaMaxPacketSize); virtual ~asynStreamGeneratorDriver(); virtual asynStatus writeInt32(asynUser *pasynUser, epicsInt32 value); void receiveUDP(); + void processEvents(); void produceMonitor(); void produceDetector(); @@ -134,6 +145,8 @@ class asynStreamGeneratorDriver : public asynPortDriver { const int num_channels; const int kafkaMaxPacketSize; + epicsRingPointer udpQueue; + epicsRingPointer monitorQueue; rd_kafka_t *monitorProducer; From 60aa1652c3a01d3ccf92f7225b08559fee60b5f9 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Tue, 4 Nov 2025 10:24:25 +0100 Subject: [PATCH 15/35] again at the point that I can do preset based counts, but now with the priority queue built in so that the events are sorted --- scripts/st.cmd | 2 +- scripts/udp_gen.py | 2 +- src/asynStreamGeneratorDriver.cpp | 199 +++++++++++++++++++++--------- src/asynStreamGeneratorDriver.h | 1 + 4 files changed, 145 insertions(+), 59 deletions(-) diff --git a/scripts/st.cmd b/scripts/st.cmd index 3282bb4..b5e6cc0 100755 --- a/scripts/st.cmd +++ b/scripts/st.cmd @@ -9,7 +9,7 @@ epicsEnvSet("INSTR", "SQ:TEST:") epicsEnvSet("NAME", "SG") drvAsynIPPortConfigure("ASYN_IP_PORT", "127.0.0.1:9071:54321 UDP", 0, 0, 1) -asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 10000, 1000, 8192) +asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 10000, 0, 1000, 8192) dbLoadRecords("$(StreamGenerator_DB)daq_common.db", "INSTR=$(INSTR), NAME=$(NAME), PORT=ASYN_SG, CHANNELS=5") diff --git a/scripts/udp_gen.py b/scripts/udp_gen.py index 297e31b..bb62832 100644 --- a/scripts/udp_gen.py +++ b/scripts/udp_gen.py @@ -106,4 +106,4 @@ while True: sock.sendto(bytes(tosend), ('127.0.0.1', 54321)) mv = memoryview(bytes(header)).cast('H') print(f'Sent packet {mv[3]} with {num_events} events {base_timestamp}') - # time.sleep(0.5) + # time.sleep(1) diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index 2864e57..508540a 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -88,8 +88,8 @@ asynStatus asynStreamGeneratorDriver::createInt32Param( */ asynStreamGeneratorDriver::asynStreamGeneratorDriver( const char *portName, const char *ipPortName, const int numChannels, - const int udpQueueSize, const int kafkaQueueSize, - const int kafkaMaxPacketSize) + const int udpQueueSize, const bool enableKafkaStream, + const int kafkaQueueSize, const int kafkaMaxPacketSize) : asynPortDriver(portName, 1, /* maxAddr */ asynInt32Mask | asynInt64Mask | asynDrvUserMask, /* Interface mask */ @@ -151,35 +151,36 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( // Create Events this->pausedEventId = epicsEventCreate(epicsEventEmpty); - // TODO re-enable the kafka stuff - // this->monitorProducer = create_kafka_producer(); - // this->detectorProducer = create_kafka_producer(); + if (enableKafkaStream) { + this->monitorProducer = create_kafka_producer(); + this->detectorProducer = create_kafka_producer(); - // // Setup for Thread Producing Monitor Kafka Events - // status = - // (asynStatus)(epicsThreadCreate( - // "monitor_produce", epicsThreadPriorityMedium, - // epicsThreadGetStackSize(epicsThreadStackMedium), - // (EPICSTHREADFUNC)::monitorProducerTask, this) == - // NULL); - // if (status) { - // printf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, - // functionName, status); - // exit(1); - // } + // Setup for Thread Producing Monitor Kafka Events + status = + (asynStatus)(epicsThreadCreate( + "monitor_produce", epicsThreadPriorityMedium, + epicsThreadGetStackSize(epicsThreadStackMedium), + (EPICSTHREADFUNC)::monitorProducerTask, + this) == NULL); + if (status) { + printf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, + functionName, status); + exit(1); + } - // // Setup for Thread Producing Detector Kafka Events - // status = (asynStatus)(epicsThreadCreate( - // "monitor_produce", epicsThreadPriorityMedium, - // epicsThreadGetStackSize(epicsThreadStackMedium), - // (EPICSTHREADFUNC)::detectorProducerTask, - // this) == NULL); - // if (status) { - // printf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, - // functionName, status); - // exit(1); - // } - // TODO re-enable the kafka stuff + // Setup for Thread Producing Detector Kafka Events + status = + (asynStatus)(epicsThreadCreate( + "monitor_produce", epicsThreadPriorityMedium, + epicsThreadGetStackSize(epicsThreadStackMedium), + (EPICSTHREADFUNC)::detectorProducerTask, + this) == NULL); + if (status) { + printf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, + functionName, status); + exit(1); + } + } /* Create the thread that orders the events and acts as our sinqDaq stand-in */ @@ -281,6 +282,9 @@ asynStatus asynStreamGeneratorDriver::writeInt32(asynUser *pasynUser, void asynStreamGeneratorDriver::receiveUDP() { + // TODO fix time overflows + // TODO check for lost packets + const char *functionName = "receiveUDP"; asynStatus status = asynSuccess; int isConnected = 1; @@ -370,19 +374,29 @@ void asynStreamGeneratorDriver::processEvents() { decltype(smallestToLargest)> timeQueue(smallestToLargest, std::move(queueBuffer)); - NormalisedEvent *ne; - - uint64_t newest = 0; - // TODO epics doesn't seem to support uint64, you would need an array of // uint32. It does support int64 though.. so we start with that epicsInt32 *counts = new epicsInt32[this->num_channels]; + asynStatus status = asynSuccess; + NormalisedEvent *ne; + uint64_t newestTimestamp = 0; + uint64_t startTimestamp = std::numeric_limits::max(); + uint64_t currTimestamp; + epicsInt32 elapsedSeconds = 0; + epicsInt32 prevStatus = STATUS_IDLE; + epicsInt32 currStatus = STATUS_IDLE; + epicsInt32 countPreset = 0; + epicsInt32 timePreset = 0; + epicsInt32 presetChannel = 0; + while (true) { if ((ne = this->udpQueue.pop()) != nullptr) { - // TODO overflow in the correlation unit? - newest = std::max(newest, ne->timestamp); + // we should reastart this ioc at least every few years, as at ns + // resolution with a uint64_t we will have an overflow after around + // 4 years + newestTimestamp = std::max(newestTimestamp, ne->timestamp); timeQueue.push(ne); } @@ -390,22 +404,93 @@ void asynStreamGeneratorDriver::processEvents() { // frequency for each id without actually checking all ids if (timeQueue.size() >= 1500 * 10 || (timeQueue.size() > 0 && - newest - timeQueue.top()->timestamp >= 200'000'000ull)) { + newestTimestamp - timeQueue.top()->timestamp >= 200'000'000ull)) { ne = timeQueue.top(); timeQueue.pop(); - counts[ne->source == 0 ? ne->pixelId + 1 : 0] += 1; + status = getIntegerParam(this->P_Status, &currStatus); + + if (currStatus == STATUS_COUNTING && prevStatus == STATUS_IDLE) { + // Starting a new count + + // get current count configuration + getIntegerParam(this->P_CountPreset, &countPreset); + getIntegerParam(this->P_TimePreset, &timePreset); + getIntegerParam(this->P_MonitorChannel, &presetChannel); + + // reset status variables + startTimestamp = std::numeric_limits::max(); + for (size_t i = 0; i < this->num_channels; ++i) { + counts[i] = 0; + } + + // reset pvs + lock(); + for (size_t i = 0; i < num_channels; ++i) { + setIntegerParam(P_Counts[i], counts[i]); + } + setIntegerParam(P_ElapsedTime, 0); + callParamCallbacks(); + unlock(); + + // TODO might consider throwing out current buffer as it is + // from before count started? then again, 0.2 ms or whatever is + // set above is quite a small preceeding amount of time, so + // maybe it doesn't matter + } + + prevStatus = currStatus; + + if (currStatus == STATUS_COUNTING) { + startTimestamp = std::min(startTimestamp, ne->timestamp); + counts[ne->source == 0 ? ne->pixelId + 1 : 0] += 1; + currTimestamp = ne->timestamp; + elapsedSeconds = + 0 ? currTimestamp <= startTimestamp + : ((double)(currTimestamp - startTimestamp)) / 1e9; + } delete ne; - lock(); - for (size_t i = 0; i < num_channels; ++i) { - setIntegerParam(P_Counts[i], counts[i]); + // is our count finished? + if ((countPreset && counts[presetChannel] >= countPreset) || + (timePreset && elapsedSeconds >= timePreset)) { + + // add any remaining events with the same timestamp + // we could theoretically have a small overrun if the + // timestamps are identical on the monitor channel + while (!timeQueue.empty() && + !timeQueue.top()->timestamp == currTimestamp) { + ne = timeQueue.top(); + timeQueue.pop(); + counts[ne->source == 0 ? ne->pixelId + 1 : 0] += 1; + delete ne; + } + + countPreset = 0; + timePreset = 0; + + lock(); + for (size_t i = 0; i < num_channels; ++i) { + setIntegerParam(P_Counts[i], counts[i]); + } + setIntegerParam(P_ElapsedTime, elapsedSeconds); + setIntegerParam(P_CountPreset, countPreset); + setIntegerParam(P_TimePreset, timePreset); + callParamCallbacks(); + setIntegerParam(P_Status, STATUS_IDLE); + callParamCallbacks(); + unlock(); + + } else if (currStatus == STATUS_COUNTING) { + lock(); + for (size_t i = 0; i < num_channels; ++i) { + setIntegerParam(P_Counts[i], counts[i]); + } + setIntegerParam(P_ElapsedTime, elapsedSeconds); + callParamCallbacks(); + unlock(); } - // elapsedTime = current_time - start_time; - // setIntegerParam(P_ElapsedTime, elapsedTime); - callParamCallbacks(); - unlock(); } } } @@ -599,15 +684,13 @@ void asynStreamGeneratorDriver::produceDetector() { */ extern "C" { -asynStatus asynStreamGeneratorDriverConfigure(const char *portName, - const char *ipPortName, - const int numChannels, - const int udpQueueSize, - const int kafkaQueueSize, - const int kafkaMaxPacketSize) { +asynStatus asynStreamGeneratorDriverConfigure( + const char *portName, const char *ipPortName, const int numChannels, + const int udpQueueSize, const bool enableKafkaStream, + const int kafkaQueueSize, const int kafkaMaxPacketSize) { new asynStreamGeneratorDriver(portName, ipPortName, numChannels, - udpQueueSize, kafkaQueueSize, - kafkaMaxPacketSize); + udpQueueSize, enableKafkaStream, + kafkaQueueSize, kafkaMaxPacketSize); return asynSuccess; } @@ -615,15 +698,17 @@ static const iocshArg initArg0 = {"portName", iocshArgString}; static const iocshArg initArg1 = {"ipPortName", iocshArgString}; static const iocshArg initArg2 = {"numChannels", iocshArgInt}; static const iocshArg initArg3 = {"udpQueueSize", iocshArgInt}; -static const iocshArg initArg4 = {"kafkaQueueSize", iocshArgInt}; -static const iocshArg initArg5 = {"kafkaMaxPacketSize", iocshArgInt}; +static const iocshArg initArg4 = {"enableKafkaStream", iocshArgInt}; +static const iocshArg initArg5 = {"kafkaQueueSize", iocshArgInt}; +static const iocshArg initArg6 = {"kafkaMaxPacketSize", iocshArgInt}; static const iocshArg *const initArgs[] = {&initArg0, &initArg1, &initArg2, - &initArg3, &initArg4, &initArg5}; + &initArg3, &initArg4, &initArg5, + &initArg6}; static const iocshFuncDef initFuncDef = {"asynStreamGenerator", 6, initArgs}; static void initCallFunc(const iocshArgBuf *args) { asynStreamGeneratorDriverConfigure(args[0].sval, args[1].sval, args[2].ival, - args[3].ival, args[4].ival, - args[5].ival); + args[3].ival, args[4].ival, args[5].ival, + args[6].ival); } void asynStreamGeneratorDriverRegister(void) { diff --git a/src/asynStreamGeneratorDriver.h b/src/asynStreamGeneratorDriver.h index 6f8da61..cc965ff 100644 --- a/src/asynStreamGeneratorDriver.h +++ b/src/asynStreamGeneratorDriver.h @@ -111,6 +111,7 @@ class asynStreamGeneratorDriver : public asynPortDriver { public: asynStreamGeneratorDriver(const char *portName, const char *ipPortName, const int numChannels, const int udpQueueSize, + const bool enableKafkaStream, const int kafkaQueueSize, const int kafkaMaxPacketSize); virtual ~asynStreamGeneratorDriver(); From 2c47f338c278d69d1ce5f5721980e487be304210 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Tue, 4 Nov 2025 13:56:44 +0100 Subject: [PATCH 16/35] can send kafka messages again and can set the broker and topics in the start command --- db/daq_common.db | 2 +- scripts/st.cmd | 3 +- src/asynStreamGeneratorDriver.cpp | 174 ++++++++++++++++-------------- src/asynStreamGeneratorDriver.h | 11 +- 4 files changed, 103 insertions(+), 87 deletions(-) diff --git a/db/daq_common.db b/db/daq_common.db index 96a1cd3..631fd8a 100644 --- a/db/daq_common.db +++ b/db/daq_common.db @@ -129,7 +129,7 @@ record(longout, "$(INSTR)$(NAME):MONITOR-CHANNEL") field(DESC, "PRESET-COUNT Monitors this channel") field(DTYP, "asynInt32") field(OUT, "@asyn($(PORT),0,$(TIMEOUT=1)) MONITOR") - field(DRVL, "1") # Smallest Monitor Channel + field(DRVL, "0") # Smallest Monitor Channel field(DRVH, "$(CHANNELS)") # Largest Monitor Channel } diff --git a/scripts/st.cmd b/scripts/st.cmd index b5e6cc0..5802ad2 100755 --- a/scripts/st.cmd +++ b/scripts/st.cmd @@ -9,7 +9,8 @@ epicsEnvSet("INSTR", "SQ:TEST:") epicsEnvSet("NAME", "SG") drvAsynIPPortConfigure("ASYN_IP_PORT", "127.0.0.1:9071:54321 UDP", 0, 0, 1) -asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 10000, 0, 1000, 8192) +asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 10000, "linkafka01:9092", "NEWEFU_TEST", "NEWEFU_TEST2", 1000, 8192) +# asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 10000, "", "", "", 0, 0) dbLoadRecords("$(StreamGenerator_DB)daq_common.db", "INSTR=$(INSTR), NAME=$(NAME), PORT=ASYN_SG, CHANNELS=5") diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index 508540a..dfe1033 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -28,15 +28,17 @@ static void set_kafka_config_key(rd_kafka_conf_t *conf, char *key, } } -static rd_kafka_t *create_kafka_producer() { +static rd_kafka_t *create_kafka_producer(const char *kafkaBroker) { char errstr[512]; rd_kafka_t *producer; // Prepare configuration object rd_kafka_conf_t *conf = rd_kafka_conf_new(); - set_kafka_config_key(conf, "bootstrap.servers", "linkafka01:9092"); - set_kafka_config_key(conf, "queue.buffering.max.messages", "1e7"); + // TODO feel not great about this + set_kafka_config_key(conf, "bootstrap.servers", + const_cast(kafkaBroker)); + set_kafka_config_key(conf, "queue.buffering.max.messages", "10000000"); // Create the Producer producer = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); @@ -89,7 +91,9 @@ asynStatus asynStreamGeneratorDriver::createInt32Param( asynStreamGeneratorDriver::asynStreamGeneratorDriver( const char *portName, const char *ipPortName, const int numChannels, const int udpQueueSize, const bool enableKafkaStream, - const int kafkaQueueSize, const int kafkaMaxPacketSize) + const char *kafkaBroker, const char *monitorTopic, + const char *detectorTopic, const int kafkaQueueSize, + const int kafkaMaxPacketSize) : asynPortDriver(portName, 1, /* maxAddr */ asynInt32Mask | asynInt64Mask | asynDrvUserMask, /* Interface mask */ @@ -101,8 +105,10 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( 1, /* Autoconnect */ 0, /* Default priority */ 0), /* Default stack size*/ - num_channels(numChannels + 1), udpQueue(udpQueueSize, false), - monitorQueue(kafkaQueueSize, false), detectorQueue(kafkaQueueSize, false), + num_channels(numChannels + 1), kafkaEnabled(enableKafkaStream), + monitorTopic(monitorTopic), detectorTopic(detectorTopic), + udpQueue(udpQueueSize, false), monitorQueue(kafkaQueueSize, false), + detectorQueue(kafkaQueueSize, false), kafkaMaxPacketSize(kafkaMaxPacketSize) { const char *functionName = "asynStreamGeneratorDriver"; @@ -143,8 +149,9 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( } if (status) { - printf("%s:%s: failed to create or setup parameters, status=%d\n", - driverName, functionName, status); + epicsStdoutPrintf( + "%s:%s: failed to create or setup parameters, status=%d\n", + driverName, functionName, status); exit(1); } @@ -152,8 +159,21 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( this->pausedEventId = epicsEventCreate(epicsEventEmpty); if (enableKafkaStream) { - this->monitorProducer = create_kafka_producer(); - this->detectorProducer = create_kafka_producer(); + + epicsStdoutPrintf( + "Detector Kafka Config: broker=%s, topic=%s\n " + " queue size:%d, max events per packet: %d\n", + kafkaBroker, this->detectorTopic, kafkaQueueSize, + this->kafkaMaxPacketSize); + + epicsStdoutPrintf( + "Monitors Kafka Config: broker=%s, topic=%s\n " + " queue size:%d, max events per packet: %d\n", + kafkaBroker, this->monitorTopic, kafkaQueueSize, + this->kafkaMaxPacketSize); + + this->monitorProducer = create_kafka_producer(kafkaBroker); + this->detectorProducer = create_kafka_producer(kafkaBroker); // Setup for Thread Producing Monitor Kafka Events status = @@ -163,8 +183,8 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( (EPICSTHREADFUNC)::monitorProducerTask, this) == NULL); if (status) { - printf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, - functionName, status); + epicsStdoutPrintf("%s:%s: epicsThreadCreate failure, status=%d\n", + driverName, functionName, status); exit(1); } @@ -176,10 +196,13 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( (EPICSTHREADFUNC)::detectorProducerTask, this) == NULL); if (status) { - printf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, - functionName, status); + epicsStdoutPrintf("%s:%s: epicsThreadCreate failure, status=%d\n", + driverName, functionName, status); exit(1); } + } else { + + epicsStdoutPrintf("Kafka Stream Disabled\n"); } /* Create the thread that orders the events and acts as our sinqDaq stand-in @@ -189,8 +212,8 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( epicsThreadGetStackSize(epicsThreadStackMedium), (EPICSTHREADFUNC)::daqTask, this) == NULL); if (status) { - printf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, - functionName, status); + epicsStdoutPrintf("%s:%s: epicsThreadCreate failure, status=%d\n", + driverName, functionName, status); exit(1); } @@ -198,8 +221,8 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( status = pasynOctetSyncIO->connect(ipPortName, 0, &pasynUDPUser, NULL); if (status) { - printf("%s:%s: Couldn't open connection %s, status=%d\n", driverName, - functionName, ipPortName, status); + epicsStdoutPrintf("%s:%s: Couldn't open connection %s, status=%d\n", + driverName, functionName, ipPortName, status); exit(1); } @@ -209,8 +232,8 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( epicsThreadGetStackSize(epicsThreadStackMedium), (EPICSTHREADFUNC)::udpPollerTask, this) == NULL); if (status) { - printf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, - functionName, status); + epicsStdoutPrintf("%s:%s: epicsThreadCreate failure, status=%d\n", + driverName, functionName, status); exit(1); } } @@ -352,6 +375,18 @@ void asynStreamGeneratorDriver::receiveUDP() { } } +inline void asynStreamGeneratorDriver::queueForKafka(NormalisedEvent *ne) { + + if (this->kafkaEnabled) { + if (ne->source == 0) + this->monitorQueue.push(ne); + else + this->detectorQueue.push(ne); + } else { + delete ne; + } +} + void asynStreamGeneratorDriver::processEvents() { const char *functionName = "processEvents"; @@ -448,9 +483,11 @@ void asynStreamGeneratorDriver::processEvents() { elapsedSeconds = 0 ? currTimestamp <= startTimestamp : ((double)(currTimestamp - startTimestamp)) / 1e9; - } - delete ne; + this->queueForKafka(ne); + } else { + delete ne; + } // is our count finished? if ((countPreset && counts[presetChannel] >= countPreset) || @@ -464,7 +501,7 @@ void asynStreamGeneratorDriver::processEvents() { ne = timeQueue.top(); timeQueue.pop(); counts[ne->source == 0 ? ne->pixelId + 1 : 0] += 1; - delete ne; + this->queueForKafka(ne); } countPreset = 0; @@ -516,8 +553,8 @@ void asynStreamGeneratorDriver::produceMonitor() { ++total; auto nme = this->monitorQueue.pop(); - tof.push_back(nme->TimeStamp); - did.push_back(nme->DataID); + tof.push_back(nme->timestamp); + did.push_back(nme->pixelId); delete nme; } else { @@ -547,7 +584,7 @@ void asynStreamGeneratorDriver::produceMonitor() { builder.Finish(message, "ev42"); rd_kafka_resp_err_t err = rd_kafka_producev( - monitorProducer, RD_KAFKA_V_TOPIC("NEWEFU_TEST"), + monitorProducer, RD_KAFKA_V_TOPIC(this->monitorTopic), RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), // RD_KAFKA_V_KEY((void *)key, key_len), RD_KAFKA_V_VALUE((void *)builder.GetBufferPointer(), @@ -556,9 +593,9 @@ void asynStreamGeneratorDriver::produceMonitor() { RD_KAFKA_V_END); if (err) { - // TODO - // g_error("Failed to produce to topic %s: %s", topic, - // rd_kafka_err2str(err)); + epicsStdoutPrintf("Failed to produce to topic %s: %s\n", + this->monitorTopic, + rd_kafka_err2str(err)); } rd_kafka_poll(monitorProducer, 0); @@ -586,55 +623,22 @@ void asynStreamGeneratorDriver::produceDetector() { uint64_t message_id = 0; - struct { - bool operator()(const uint64_t l, const uint64_t r) const { - return l > r; - } - } smallestToLargest; - - // This should never be used. It is just instantiated to reserve a buffer - // of specific size. - std::vector queueBuffer; - queueBuffer.reserve(bufferSize); - - std::priority_queue, - decltype(smallestToLargest)> - timeQueue(smallestToLargest, std::move(queueBuffer)); - - uint64_t newest = 0; - while (true) { if (!this->detectorQueue.isEmpty()) { ++total; auto nde = this->detectorQueue.pop(); - tof.push_back(nde->TimeStamp); - did.push_back(nde->PixID); - - newest = std::max(newest, nde->TimeStamp); - timeQueue.push(nde->TimeStamp); - + tof.push_back(nde->timestamp); + did.push_back(nde->pixelId); delete nde; + } else { + // TODO + // rd_kafka_flush(detectorProducer, 10 * 1000); epicsThreadSleep(0.001); // seconds } - while (!timeQueue.empty() && - (timeQueue.size() >= this->kafkaMaxPacketSize || - (newest - timeQueue.top()) > 5'000'000'000ull)) - timeQueue.pop(); - epicsInt32 rate = 0; - if (timeQueue.size() > 1) { - rate = ((double)timeQueue.size() / - ((double)(newest - timeQueue.top()) * 1e-9)); - } - - lock(); - setIntegerParam(P_Rates[0], rate); - callParamCallbacks(); - unlock(); - epicsTimeStamp now = epicsTime::getCurrent(); // At least every 0.2 seconds @@ -656,7 +660,7 @@ void asynStreamGeneratorDriver::produceDetector() { builder.Finish(message, "ev42"); rd_kafka_resp_err_t err = rd_kafka_producev( - detectorProducer, RD_KAFKA_V_TOPIC("NEWEFU_TEST2"), + detectorProducer, RD_KAFKA_V_TOPIC(this->detectorTopic), RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), // RD_KAFKA_V_KEY((void *)key, key_len), RD_KAFKA_V_VALUE((void *)builder.GetBufferPointer(), @@ -665,9 +669,9 @@ void asynStreamGeneratorDriver::produceDetector() { RD_KAFKA_V_END); if (err) { - // TODO - // g_error("Failed to produce to topic %s: %s", topic, - // rd_kafka_err2str(err)); + epicsStdoutPrintf("Failed to produce to topic %s: %s\n", + this->detectorTopic, + rd_kafka_err2str(err)); } rd_kafka_poll(detectorProducer, 0); @@ -686,11 +690,13 @@ extern "C" { asynStatus asynStreamGeneratorDriverConfigure( const char *portName, const char *ipPortName, const int numChannels, - const int udpQueueSize, const bool enableKafkaStream, - const int kafkaQueueSize, const int kafkaMaxPacketSize) { + const int udpQueueSize, const char *kafkaBroker, const char *monitorTopic, + const char *detectorTopic, const int kafkaQueueSize, + const int kafkaMaxPacketSize) { new asynStreamGeneratorDriver(portName, ipPortName, numChannels, - udpQueueSize, enableKafkaStream, - kafkaQueueSize, kafkaMaxPacketSize); + udpQueueSize, kafkaBroker[0], kafkaBroker, + monitorTopic, detectorTopic, kafkaQueueSize, + kafkaMaxPacketSize); return asynSuccess; } @@ -698,17 +704,19 @@ static const iocshArg initArg0 = {"portName", iocshArgString}; static const iocshArg initArg1 = {"ipPortName", iocshArgString}; static const iocshArg initArg2 = {"numChannels", iocshArgInt}; static const iocshArg initArg3 = {"udpQueueSize", iocshArgInt}; -static const iocshArg initArg4 = {"enableKafkaStream", iocshArgInt}; -static const iocshArg initArg5 = {"kafkaQueueSize", iocshArgInt}; -static const iocshArg initArg6 = {"kafkaMaxPacketSize", iocshArgInt}; +static const iocshArg initArg4 = {"kafkaBroker", iocshArgString}; +static const iocshArg initArg5 = {"monitorTopic", iocshArgString}; +static const iocshArg initArg6 = {"detectorTopic", iocshArgString}; +static const iocshArg initArg7 = {"kafkaQueueSize", iocshArgInt}; +static const iocshArg initArg8 = {"kafkaMaxPacketSize", iocshArgInt}; static const iocshArg *const initArgs[] = {&initArg0, &initArg1, &initArg2, &initArg3, &initArg4, &initArg5, - &initArg6}; -static const iocshFuncDef initFuncDef = {"asynStreamGenerator", 6, initArgs}; + &initArg6, &initArg7, &initArg8}; +static const iocshFuncDef initFuncDef = {"asynStreamGenerator", 9, initArgs}; static void initCallFunc(const iocshArgBuf *args) { - asynStreamGeneratorDriverConfigure(args[0].sval, args[1].sval, args[2].ival, - args[3].ival, args[4].ival, args[5].ival, - args[6].ival); + asynStreamGeneratorDriverConfigure( + args[0].sval, args[1].sval, args[2].ival, args[3].ival, args[4].sval, + args[5].sval, args[6].sval, args[7].ival, args[8].ival); } void asynStreamGeneratorDriverRegister(void) { diff --git a/src/asynStreamGeneratorDriver.h b/src/asynStreamGeneratorDriver.h index cc965ff..72075e2 100644 --- a/src/asynStreamGeneratorDriver.h +++ b/src/asynStreamGeneratorDriver.h @@ -112,6 +112,8 @@ class asynStreamGeneratorDriver : public asynPortDriver { asynStreamGeneratorDriver(const char *portName, const char *ipPortName, const int numChannels, const int udpQueueSize, const bool enableKafkaStream, + const char *kafkaBroker, const char *monitorTopic, + const char *detectorTopic, const int kafkaQueueSize, const int kafkaMaxPacketSize); virtual ~asynStreamGeneratorDriver(); @@ -144,20 +146,25 @@ class asynStreamGeneratorDriver : public asynPortDriver { epicsEventId pausedEventId; const int num_channels; + const bool kafkaEnabled; const int kafkaMaxPacketSize; epicsRingPointer udpQueue; - epicsRingPointer monitorQueue; + epicsRingPointer monitorQueue; rd_kafka_t *monitorProducer; + const char *monitorTopic; - epicsRingPointer detectorQueue; + epicsRingPointer detectorQueue; rd_kafka_t *detectorProducer; + const char *detectorTopic; constexpr static char *driverName = "StreamGenerator"; asynStatus createInt32Param(asynStatus status, char *name, int *variable, epicsInt32 initialValue = 0); + + inline void queueForKafka(NormalisedEvent *ne); }; #endif From ecc6e98f4c4bbc90cae9ffae37694fc212427147 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Tue, 4 Nov 2025 15:31:28 +0100 Subject: [PATCH 17/35] can stop count and clear channels --- db/channels.db | 2 + db/daq_common.db | 3 + scripts/st.cmd | 4 +- src/asynStreamGeneratorDriver.cpp | 167 +++++++++++++++++++----------- 4 files changed, 116 insertions(+), 60 deletions(-) diff --git a/db/channels.db b/db/channels.db index bb7a3fe..0b94f18 100644 --- a/db/channels.db +++ b/db/channels.db @@ -25,6 +25,7 @@ record(seq, "$(INSTR)$(NAME):O$(CHANNEL)") field(DO0, 0) field(SELM, "Specified") field(SELL, "$(INSTR)$(NAME):M$(CHANNEL).VAL") + field(SCAN, ".1 second") } # Current Status of Channel, i.e. is it ready to count? @@ -34,6 +35,7 @@ record(bi, "$(INSTR)$(NAME):S$(CHANNEL)") field(VAL, 0) field(ZNAM, "OK") field(ONAM, "CLEARING") + field(PINI, 1) } ################################################################################ diff --git a/db/daq_common.db b/db/daq_common.db index 631fd8a..410b80d 100644 --- a/db/daq_common.db +++ b/db/daq_common.db @@ -66,6 +66,7 @@ record(seq, "$(INSTR)$(NAME):ETO") field(DO0, 0) field(SELM, "Specified") field(SELL, "$(INSTR)$(NAME):ELAPSED-TIME.VAL") + field(SCAN, ".1 second") } # Current Status of Channel, i.e. is it ready to count? @@ -75,6 +76,7 @@ record(bi, "$(INSTR)$(NAME):ETS") field(VAL, 0) field(ZNAM, "OK") field(ONAM, "CLEARING") + field(PINI, 1) } ################################################################################ @@ -201,4 +203,5 @@ record(ai,"$(INSTR)$(NAME):ELAPSED-TIME") field(INP, "@asyn($(PORT),0,$(TIMEOUT=1)) TIME") field(SCAN, "I/O Intr") field(PINI, "YES") + # field(FLNK, "$(INSTR)$(NAME):ETO") } diff --git a/scripts/st.cmd b/scripts/st.cmd index 5802ad2..489cf84 100755 --- a/scripts/st.cmd +++ b/scripts/st.cmd @@ -9,8 +9,8 @@ epicsEnvSet("INSTR", "SQ:TEST:") epicsEnvSet("NAME", "SG") drvAsynIPPortConfigure("ASYN_IP_PORT", "127.0.0.1:9071:54321 UDP", 0, 0, 1) -asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 10000, "linkafka01:9092", "NEWEFU_TEST", "NEWEFU_TEST2", 1000, 8192) -# asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 10000, "", "", "", 0, 0) +# asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 10000, "linkafka01:9092", "NEWEFU_TEST", "NEWEFU_TEST2", 1000, 8192) +asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 10000, "", "", "", 0, 0) dbLoadRecords("$(StreamGenerator_DB)daq_common.db", "INSTR=$(INSTR), NAME=$(NAME), PORT=ASYN_SG, CHANNELS=5") diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index dfe1033..4862233 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -156,7 +156,7 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( } // Create Events - this->pausedEventId = epicsEventCreate(epicsEventEmpty); + // this->pausedEventId = epicsEventCreate(epicsEventEmpty); if (enableKafkaStream) { @@ -259,36 +259,77 @@ asynStatus asynStreamGeneratorDriver::writeInt32(asynUser *pasynUser, const char *functionName = "writeInt32"; getParamName(function, ¶mName); - // if (status) { - // epicsSnprintf(pasynUser->errorMessage, pasynUser->errorMessageSize, - // "%s:%s: status=%d, function=%d, name=%s, value=%d", - // driverName, functionName, status, function, paramName, - // value); - // return status; - // } + // TODO should maybe lock mutex for this + epicsInt32 currentStatus; + status = getIntegerParam(this->P_Status, ¤tStatus); + if (status) { + epicsSnprintf(pasynUser->errorMessage, pasynUser->errorMessageSize, + "%s:%s: status=%d, function=%d, name=%s, value=%d", + driverName, functionName, status, function, paramName, + value); + return status; + } + + // TODO clean up + bool isClearCount = false; + size_t channelToClear; + for (size_t i = 0; i < this->num_channels; ++i) { + isClearCount |= function == P_ClearCounts[i]; + if (isClearCount) { + channelToClear = i; + break; + } + } + + // TODO should check everything... if (function == P_CountPreset) { - // TODO should block setting a preset when already set - setIntegerParam(function, value); - setIntegerParam(P_Status, STATUS_COUNTING); - status = (asynStatus)callParamCallbacks(); - epicsEventSignal(this->pausedEventId); + if (!currentStatus) { + setIntegerParam(function, value); + setIntegerParam(P_Status, STATUS_COUNTING); + status = (asynStatus)callParamCallbacks(); + } else { + return asynError; + } } else if (function == P_TimePreset) { - // TODO should block setting a preset when already set - setIntegerParam(function, value); - setIntegerParam(P_Status, STATUS_COUNTING); - status = (asynStatus)callParamCallbacks(); - epicsEventSignal(this->pausedEventId); + if (!currentStatus) { + setIntegerParam(function, value); + setIntegerParam(P_Status, STATUS_COUNTING); + status = (asynStatus)callParamCallbacks(); + } else { + return asynError; + } + } else if (function == P_ClearElapsedTime) { + if (!currentStatus) { + setIntegerParam(P_ElapsedTime, 0); + status = (asynStatus)callParamCallbacks(); + } else { + return asynError; + } + } else if (isClearCount) { + if (!currentStatus) { + setIntegerParam(P_Counts[channelToClear], 0); + status = (asynStatus)callParamCallbacks(); + } else { + return asynError; + } } else if (function == P_Reset) { + lock(); // TODO should probably set back everything to defaults setIntegerParam(P_Status, STATUS_IDLE); status = (asynStatus)callParamCallbacks(); + unlock(); + } else if (function == P_Stop) { + lock(); + setIntegerParam(P_Status, STATUS_IDLE); + status = (asynStatus)callParamCallbacks(); + unlock(); } else if (function == P_MonitorChannel) { - epicsInt32 currentStatus; - getIntegerParam(this->P_Status, ¤tStatus); if (!currentStatus) { setIntegerParam(function, value); status = (asynStatus)callParamCallbacks(); + } else { + return asynError; } } else { setIntegerParam(function, value); @@ -478,55 +519,65 @@ void asynStreamGeneratorDriver::processEvents() { if (currStatus == STATUS_COUNTING) { startTimestamp = std::min(startTimestamp, ne->timestamp); - counts[ne->source == 0 ? ne->pixelId + 1 : 0] += 1; currTimestamp = ne->timestamp; elapsedSeconds = 0 ? currTimestamp <= startTimestamp : ((double)(currTimestamp - startTimestamp)) / 1e9; - this->queueForKafka(ne); - } else { - delete ne; - } + // is our count finished? + if ((countPreset && counts[presetChannel] >= countPreset) || + (timePreset && elapsedSeconds >= timePreset)) { - // is our count finished? - if ((countPreset && counts[presetChannel] >= countPreset) || - (timePreset && elapsedSeconds >= timePreset)) { + // filter out events that occured after the specified time + if (ne->timestamp - startTimestamp <= countPreset) { + counts[ne->source == 0 ? ne->pixelId + 1 : 0] += 1; + this->queueForKafka(ne); + + // add any remaining events with the same timestamp + // we could theoretically have a small overrun if the + // timestamps are identical on the monitor channel + while (!timeQueue.empty() && + !timeQueue.top()->timestamp == currTimestamp) { + ne = timeQueue.top(); + timeQueue.pop(); + counts[ne->source == 0 ? ne->pixelId + 1 : 0] += 1; + this->queueForKafka(ne); + } + } else { + delete ne; + } + + countPreset = 0; + timePreset = 0; + + lock(); + for (size_t i = 0; i < num_channels; ++i) { + setIntegerParam(P_Counts[i], counts[i]); + } + setIntegerParam(P_ElapsedTime, elapsedSeconds); + setIntegerParam(P_CountPreset, countPreset); + setIntegerParam(P_TimePreset, timePreset); + callParamCallbacks(); + setIntegerParam(P_Status, STATUS_IDLE); + callParamCallbacks(); + unlock(); + + } else { - // add any remaining events with the same timestamp - // we could theoretically have a small overrun if the - // timestamps are identical on the monitor channel - while (!timeQueue.empty() && - !timeQueue.top()->timestamp == currTimestamp) { - ne = timeQueue.top(); - timeQueue.pop(); counts[ne->source == 0 ? ne->pixelId + 1 : 0] += 1; this->queueForKafka(ne); + + lock(); + for (size_t i = 0; i < num_channels; ++i) { + setIntegerParam(P_Counts[i], counts[i]); + } + setIntegerParam(P_ElapsedTime, elapsedSeconds); + callParamCallbacks(); + unlock(); } - countPreset = 0; - timePreset = 0; - - lock(); - for (size_t i = 0; i < num_channels; ++i) { - setIntegerParam(P_Counts[i], counts[i]); - } - setIntegerParam(P_ElapsedTime, elapsedSeconds); - setIntegerParam(P_CountPreset, countPreset); - setIntegerParam(P_TimePreset, timePreset); - callParamCallbacks(); - setIntegerParam(P_Status, STATUS_IDLE); - callParamCallbacks(); - unlock(); - - } else if (currStatus == STATUS_COUNTING) { - lock(); - for (size_t i = 0; i < num_channels; ++i) { - setIntegerParam(P_Counts[i], counts[i]); - } - setIntegerParam(P_ElapsedTime, elapsedSeconds); - callParamCallbacks(); - unlock(); + } else { + delete ne; } } } From 1ce7f93e95f122d722ba6ea90a754833fa04bb67 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Tue, 4 Nov 2025 16:19:28 +0100 Subject: [PATCH 18/35] adds a simple rate calculation --- src/asynStreamGeneratorDriver.cpp | 45 ++++++++++++++++++++++++++++++- src/asynStreamGeneratorDriver.h | 9 ------- 2 files changed, 44 insertions(+), 10 deletions(-) diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index 4862233..b7c9880 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -359,7 +359,13 @@ void asynStreamGeneratorDriver::receiveUDP() { // These messages don't have any obious start or end to synchronise // against... const std::size_t bufferSize = 1500; - char buffer[bufferSize + 1]; // so that \0 can fit + char buffer[bufferSize]; + + // We have 10 mcpdids + uint64_t lastBufferNumber* = new uint64_t[10]; + for (size_t i = 0; i < 10; ++i) { + lastBufferNumber[i] = 0; + } while (true) { @@ -454,6 +460,12 @@ void asynStreamGeneratorDriver::processEvents() { // uint32. It does support int64 though.. so we start with that epicsInt32 *counts = new epicsInt32[this->num_channels]; + size_t countDiffsPtr = 0; + epicsInt32 *rates = new epicsInt32[this->num_channels]; + epicsInt32 *countDiff = new epicsInt32[this->num_channels]; + epicsInt32 *countDiffs = new epicsInt32[this->num_channels * 10]; + epicsTimeStamp lastRateUpdate = epicsTime::getCurrent(); + asynStatus status = asynSuccess; NormalisedEvent *ne; uint64_t newestTimestamp = 0; @@ -473,6 +485,9 @@ void asynStreamGeneratorDriver::processEvents() { // resolution with a uint64_t we will have an overflow after around // 4 years newestTimestamp = std::max(newestTimestamp, ne->timestamp); + + ++countDiff[ne->source == 0 ? ne->pixelId + 1 : 0]; + timeQueue.push(ne); } @@ -580,6 +595,34 @@ void asynStreamGeneratorDriver::processEvents() { delete ne; } } + + // Careful changing any of these magic numbers until I clean this up + // as you might end up calculating the wrong rate + epicsTimeStamp currentTime = epicsTime::getCurrent(); + if (epicsTimeDiffInNS(¤tTime, &lastRateUpdate) > 100'000'000ll) { + lastRateUpdate = currentTime; + + for (size_t i = 0; i <= this->num_channels; ++i) { + countDiffs[i * 10 + countDiffsPtr] = countDiff[i]; + + uint64_t cnt = 0; + for (size_t j = 0; j <= 10; ++j) { + cnt += countDiffs[i * 10 + j]; + } + rates[i] = cnt / 10.; + + countDiff[i] = 0; + } + + countDiffsPtr = (countDiffsPtr + 1) % 10; + + lock(); + for (size_t i = 0; i < num_channels; ++i) { + setIntegerParam(P_Rates[i], rates[i]); + } + callParamCallbacks(); + unlock(); + } } } diff --git a/src/asynStreamGeneratorDriver.h b/src/asynStreamGeneratorDriver.h index 72075e2..0cc6579 100644 --- a/src/asynStreamGeneratorDriver.h +++ b/src/asynStreamGeneratorDriver.h @@ -56,15 +56,6 @@ struct __attribute__((__packed__)) MonitorEvent { /******************************************************************************* * Simplified Event Struct Definition */ -struct __attribute__((__packed__)) NormalisedMonitorEvent { - uint64_t TimeStamp; - uint8_t DataID : 4; -}; - -struct __attribute__((__packed__)) NormalisedDetectorEvent { - uint64_t TimeStamp; - uint32_t PixID; -}; struct __attribute__((__packed__)) NormalisedEvent { uint64_t timestamp; From 056b0a5f8a9472d8166b4d2f8c5b9d1b4590e191 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Tue, 4 Nov 2025 17:01:18 +0100 Subject: [PATCH 19/35] check for udp packets being missed --- scripts/udp_gen.py | 17 +++++++++++++---- src/asynStreamGeneratorDriver.cpp | 18 ++++++++++++++++-- 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/scripts/udp_gen.py b/scripts/udp_gen.py index bb62832..3f01887 100644 --- a/scripts/udp_gen.py +++ b/scripts/udp_gen.py @@ -28,10 +28,11 @@ data = [ start_time = time.time_ns() // 100 +buffer_ids = { + i: (0, 0) for i in range(10) +} + while True: - # update buffer number - header[6] = (header[6] + 1) % 0xff - header[7] = (header[7] + (header[6] == 0)) % 0xff # update timestamp base_timestamp = time.time_ns() // 100 - start_time @@ -60,9 +61,16 @@ while True: # reduce also the number of checks on the parsing side of things... is_monitor = random.randint(0, 9) + # is_monitor = 4 header[11] = 0 if is_monitor > 3 else random.randint(1,9) + # update buffer number (each mcpdid has its own buffer number count) + header[6], header[7] = buffer_ids[header[11]] + header[6] = (header[6] + 1) % (0xff + 1) + header[7] = (header[7] + (header[6] == 0)) % (0xff + 1) + buffer_ids[header[11]] = header[6], header[7] + tosend = list(header) if is_monitor > 3: @@ -71,6 +79,7 @@ while True: d = list(data) monitor = random.randint(0,3) + # monitor = 0 d[5] = (1 << 7) | monitor @@ -106,4 +115,4 @@ while True: sock.sendto(bytes(tosend), ('127.0.0.1', 54321)) mv = memoryview(bytes(header)).cast('H') print(f'Sent packet {mv[3]} with {num_events} events {base_timestamp}') - # time.sleep(1) + # time.sleep(.01) diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index b7c9880..dc2b3d6 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -208,7 +208,7 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( /* Create the thread that orders the events and acts as our sinqDaq stand-in */ status = (asynStatus)(epicsThreadCreate( - "sinqDAQ", epicsThreadPriorityMedium, + "sinqDAQ", epicsThreadPriorityMax, epicsThreadGetStackSize(epicsThreadStackMedium), (EPICSTHREADFUNC)::daqTask, this) == NULL); if (status) { @@ -362,7 +362,7 @@ void asynStreamGeneratorDriver::receiveUDP() { char buffer[bufferSize]; // We have 10 mcpdids - uint64_t lastBufferNumber* = new uint64_t[10]; + uint64_t *lastBufferNumber = new uint64_t[10]; for (size_t i = 0; i < 10; ++i) { lastBufferNumber[i] = 0; } @@ -388,6 +388,20 @@ void asynStreamGeneratorDriver::receiveUDP() { if (received == total_events * 6 + 42) { + if (header->BufferNumber - lastBufferNumber[header->McpdID] > + 1 && + lastBufferNumber[header->McpdID] != + std::numeric_limits< + decltype(header->BufferNumber)>::max()) { + asynPrint( + pasynUserSelf, ASYN_TRACE_ERROR, + "%s:%s: missed packet on id: %d. Received: %" PRIu64 + ", last: %" PRIu64 "\n", + driverName, functionName, header->McpdID, + header->BufferNumber, lastBufferNumber[header->McpdID]); + } + lastBufferNumber[header->McpdID] = header->BufferNumber; + for (std::size_t i = 0; i < total_events; ++i) { char *event = (buffer + 21 * 2 + i * 6); From 70c04af034d0f1a38a66fc15bc84f11adc44a39e Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Wed, 5 Nov 2025 08:00:17 +0100 Subject: [PATCH 20/35] slow rate updates --- src/asynStreamGeneratorDriver.cpp | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index dc2b3d6..c20b7ab 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -347,7 +347,6 @@ asynStatus asynStreamGeneratorDriver::writeInt32(asynUser *pasynUser, void asynStreamGeneratorDriver::receiveUDP() { // TODO fix time overflows - // TODO check for lost packets const char *functionName = "receiveUDP"; asynStatus status = asynSuccess; @@ -630,12 +629,14 @@ void asynStreamGeneratorDriver::processEvents() { countDiffsPtr = (countDiffsPtr + 1) % 10; - lock(); - for (size_t i = 0; i < num_channels; ++i) { - setIntegerParam(P_Rates[i], rates[i]); + if (countDiffsPtr % 5 == 0) { + lock(); + for (size_t i = 0; i < num_channels; ++i) { + setIntegerParam(P_Rates[i], rates[i]); + } + callParamCallbacks(); + unlock(); } - callParamCallbacks(); - unlock(); } } } From e5cb019143930132a343abf6bfc28fda589c20b8 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Wed, 5 Nov 2025 09:25:01 +0100 Subject: [PATCH 21/35] no pointers, just bytes buffers of fixed size --- src/asynStreamGeneratorDriver.cpp | 214 +++++++++++------------------- src/asynStreamGeneratorDriver.h | 18 ++- 2 files changed, 86 insertions(+), 146 deletions(-) diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index c20b7ab..f76055e 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -107,8 +107,11 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( 0), /* Default stack size*/ num_channels(numChannels + 1), kafkaEnabled(enableKafkaStream), monitorTopic(monitorTopic), detectorTopic(detectorTopic), - udpQueue(udpQueueSize, false), monitorQueue(kafkaQueueSize, false), - detectorQueue(kafkaQueueSize, false), + udpQueue(epicsRingBytesCreate(udpQueueSize * sizeof(NormalisedEvent))), + monitorQueue( + epicsRingBytesCreate(kafkaQueueSize * sizeof(NormalisedEvent))), + detectorQueue( + epicsRingBytesCreate(kafkaQueueSize * sizeof(NormalisedEvent))), kafkaMaxPacketSize(kafkaMaxPacketSize) { const char *functionName = "asynStreamGeneratorDriver"; @@ -366,6 +369,8 @@ void asynStreamGeneratorDriver::receiveUDP() { lastBufferNumber[i] = 0; } + NormalisedEvent ne; + while (true) { status = pasynManager->isConnected(pasynUDPUser, &isConnected); @@ -404,26 +409,25 @@ void asynStreamGeneratorDriver::receiveUDP() { for (std::size_t i = 0; i < total_events; ++i) { char *event = (buffer + 21 * 2 + i * 6); - NormalisedEvent *ne; - if (event[5] & 0x80) { // Monitor Event MonitorEvent *m_event = (MonitorEvent *)event; - // needs to be freed!!! - ne = new NormalisedEvent( - header->nanosecs() + (uint64_t)m_event->nanosecs(), - 0, m_event->DataID); + ne.timestamp = + header->nanosecs() + (uint64_t)m_event->nanosecs(); + ne.source = 0; + ne.pixelId = m_event->DataID; } else { // Detector Event DetectorEvent *d_event = (DetectorEvent *)event; - // needs to be freed!!! - ne = new NormalisedEvent( - header->nanosecs() + (uint64_t)d_event->nanosecs(), - header->McpdID, d_event->pixelId(header->McpdID)); + ne.timestamp = + header->nanosecs() + (uint64_t)d_event->nanosecs(); + ne.source = header->McpdID; + ne.pixelId = d_event->pixelId(header->McpdID); } - this->udpQueue.push(ne); + epicsRingBytesPut(this->udpQueue, (char *)&ne, + sizeof(NormalisedEvent)); } } else { @@ -435,15 +439,14 @@ void asynStreamGeneratorDriver::receiveUDP() { } } -inline void asynStreamGeneratorDriver::queueForKafka(NormalisedEvent *ne) { - +inline void asynStreamGeneratorDriver::queueForKafka(NormalisedEvent &&ne) { if (this->kafkaEnabled) { - if (ne->source == 0) - this->monitorQueue.push(ne); + if (ne.source == 0) + epicsRingBytesPut(this->monitorQueue, (char *)&ne, + sizeof(NormalisedEvent)); else - this->detectorQueue.push(ne); - } else { - delete ne; + epicsRingBytesPut(this->detectorQueue, (char *)&ne, + sizeof(NormalisedEvent)); } } @@ -451,21 +454,22 @@ void asynStreamGeneratorDriver::processEvents() { const char *functionName = "processEvents"; - const size_t queueBufferSize = 10 * this->udpQueue.getSize(); + const size_t queueBufferSize = + 10 * epicsRingBytesSize(this->udpQueue) / sizeof(NormalisedEvent); struct { - bool operator()(const NormalisedEvent *l, - const NormalisedEvent *r) const { - return l->timestamp > r->timestamp; + bool operator()(const NormalisedEvent l, + const NormalisedEvent r) const { + return l.timestamp > r.timestamp; } } smallestToLargest; // This should never be used. It is just instantiated to reserve a buffer // of specific size. - std::vector queueBuffer; + std::vector queueBuffer; queueBuffer.reserve(queueBufferSize); - std::priority_queue, + std::priority_queue, decltype(smallestToLargest)> timeQueue(smallestToLargest, std::move(queueBuffer)); @@ -480,7 +484,7 @@ void asynStreamGeneratorDriver::processEvents() { epicsTimeStamp lastRateUpdate = epicsTime::getCurrent(); asynStatus status = asynSuccess; - NormalisedEvent *ne; + NormalisedEvent ne; uint64_t newestTimestamp = 0; uint64_t startTimestamp = std::numeric_limits::max(); uint64_t currTimestamp; @@ -493,22 +497,24 @@ void asynStreamGeneratorDriver::processEvents() { while (true) { - if ((ne = this->udpQueue.pop()) != nullptr) { + // TODO depending on how this is implemented, I may also need to check + // that there is is enough bytes, in case it does partial writes... + if (epicsRingBytesGet(udpQueue, (char *)&ne, sizeof(NormalisedEvent))) { // we should reastart this ioc at least every few years, as at ns // resolution with a uint64_t we will have an overflow after around // 4 years - newestTimestamp = std::max(newestTimestamp, ne->timestamp); + newestTimestamp = std::max(newestTimestamp, ne.timestamp); - ++countDiff[ne->source == 0 ? ne->pixelId + 1 : 0]; + ++countDiff[ne.source == 0 ? ne.pixelId + 1 : 0]; - timeQueue.push(ne); + timeQueue.push(std::move(ne)); } // idea is to try and guarantee at least 1 packet per id or the min // frequency for each id without actually checking all ids if (timeQueue.size() >= 1500 * 10 || (timeQueue.size() > 0 && - newestTimestamp - timeQueue.top()->timestamp >= 200'000'000ull)) { + newestTimestamp - timeQueue.top().timestamp >= 200'000'000ull)) { ne = timeQueue.top(); timeQueue.pop(); @@ -546,8 +552,8 @@ void asynStreamGeneratorDriver::processEvents() { prevStatus = currStatus; if (currStatus == STATUS_COUNTING) { - startTimestamp = std::min(startTimestamp, ne->timestamp); - currTimestamp = ne->timestamp; + startTimestamp = std::min(startTimestamp, ne.timestamp); + currTimestamp = ne.timestamp; elapsedSeconds = 0 ? currTimestamp <= startTimestamp : ((double)(currTimestamp - startTimestamp)) / 1e9; @@ -557,22 +563,20 @@ void asynStreamGeneratorDriver::processEvents() { (timePreset && elapsedSeconds >= timePreset)) { // filter out events that occured after the specified time - if (ne->timestamp - startTimestamp <= countPreset) { - counts[ne->source == 0 ? ne->pixelId + 1 : 0] += 1; - this->queueForKafka(ne); + if (ne.timestamp - startTimestamp <= countPreset) { + counts[ne.source == 0 ? ne.pixelId + 1 : 0] += 1; + this->queueForKafka(std::move(ne)); // add any remaining events with the same timestamp // we could theoretically have a small overrun if the // timestamps are identical on the monitor channel while (!timeQueue.empty() && - !timeQueue.top()->timestamp == currTimestamp) { + !timeQueue.top().timestamp == currTimestamp) { ne = timeQueue.top(); timeQueue.pop(); - counts[ne->source == 0 ? ne->pixelId + 1 : 0] += 1; - this->queueForKafka(ne); + counts[ne.source == 0 ? ne.pixelId + 1 : 0] += 1; + this->queueForKafka(std::move(ne)); } - } else { - delete ne; } countPreset = 0; @@ -592,8 +596,8 @@ void asynStreamGeneratorDriver::processEvents() { } else { - counts[ne->source == 0 ? ne->pixelId + 1 : 0] += 1; - this->queueForKafka(ne); + counts[ne.source == 0 ? ne.pixelId + 1 : 0] += 1; + this->queueForKafka(std::move(ne)); lock(); for (size_t i = 0; i < num_channels; ++i) { @@ -603,9 +607,6 @@ void asynStreamGeneratorDriver::processEvents() { callParamCallbacks(); unlock(); } - - } else { - delete ne; } } @@ -622,7 +623,8 @@ void asynStreamGeneratorDriver::processEvents() { for (size_t j = 0; j <= 10; ++j) { cnt += countDiffs[i * 10 + j]; } - rates[i] = cnt / 10.; + rates[i] = + cnt; // would / 10 to average than * 10 as want per second countDiff[i] = 0; } @@ -641,38 +643,41 @@ void asynStreamGeneratorDriver::processEvents() { } } -void asynStreamGeneratorDriver::produceMonitor() { +void asynStreamGeneratorDriver::produce(epicsRingBytesId eventQueue, + rd_kafka_t *kafkaProducer, + const char *topic, const char *source) { flatbuffers::FlatBufferBuilder builder(1024); + const std::size_t bufferSize = this->kafkaMaxPacketSize + 16; + std::vector tof; - tof.reserve(this->kafkaMaxPacketSize + 16); + tof.reserve(bufferSize); std::vector did; - did.reserve(this->kafkaMaxPacketSize + 16); + did.reserve(bufferSize); - int total = 0; epicsTimeStamp last_sent = epicsTime::getCurrent(); - + epicsTimeStamp now = last_sent; + int total = 0; uint64_t message_id = 0; + NormalisedEvent ne; + while (true) { - if (!this->monitorQueue.isEmpty()) { + if (!epicsRingBytesIsEmpty(eventQueue)) { ++total; - auto nme = this->monitorQueue.pop(); - tof.push_back(nme->timestamp); - did.push_back(nme->pixelId); - delete nme; + epicsRingBytesGet(eventQueue, (char *)&ne, sizeof(NormalisedEvent)); + tof.push_back(ne.timestamp); + did.push_back(ne.pixelId); } else { epicsThreadSleep(0.001); // seconds } - // TODO can probably just replace the current - // instead of always getting new object - epicsTimeStamp now = epicsTime::getCurrent(); + now = epicsTime::getCurrent(); // At least every 0.2 seconds if (total >= this->kafkaMaxPacketSize || @@ -685,7 +690,7 @@ void asynStreamGeneratorDriver::produceMonitor() { builder.Clear(); auto message = CreateEventMessageDirect( - builder, "monitor", message_id++, + builder, source, message_id++, ((uint64_t)now.secPastEpoch) * 1'000'000'000ull + ((uint64_t)now.nsec), &tof, &did); @@ -693,7 +698,7 @@ void asynStreamGeneratorDriver::produceMonitor() { builder.Finish(message, "ev42"); rd_kafka_resp_err_t err = rd_kafka_producev( - monitorProducer, RD_KAFKA_V_TOPIC(this->monitorTopic), + kafkaProducer, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), // RD_KAFKA_V_KEY((void *)key, key_len), RD_KAFKA_V_VALUE((void *)builder.GetBufferPointer(), @@ -703,11 +708,10 @@ void asynStreamGeneratorDriver::produceMonitor() { if (err) { epicsStdoutPrintf("Failed to produce to topic %s: %s\n", - this->monitorTopic, - rd_kafka_err2str(err)); + topic, rd_kafka_err2str(err)); } - rd_kafka_poll(monitorProducer, 0); + rd_kafka_poll(kafkaProducer, 0); tof.clear(); did.clear(); @@ -716,80 +720,12 @@ void asynStreamGeneratorDriver::produceMonitor() { } } +void asynStreamGeneratorDriver::produceMonitor() { + this->produce(monitorQueue, monitorProducer, monitorTopic, "monitor"); +} + void asynStreamGeneratorDriver::produceDetector() { - - static const std::size_t bufferSize = this->kafkaMaxPacketSize + 16; - flatbuffers::FlatBufferBuilder builder(1024); - - std::vector tof; - tof.reserve(bufferSize); - - std::vector did; - did.reserve(bufferSize); - - int total = 0; - epicsTimeStamp last_sent = epicsTime::getCurrent(); - - uint64_t message_id = 0; - - while (true) { - - if (!this->detectorQueue.isEmpty()) { - - ++total; - auto nde = this->detectorQueue.pop(); - tof.push_back(nde->timestamp); - did.push_back(nde->pixelId); - delete nde; - - } else { - // TODO - // rd_kafka_flush(detectorProducer, 10 * 1000); - epicsThreadSleep(0.001); // seconds - } - - epicsTimeStamp now = epicsTime::getCurrent(); - - // At least every 0.2 seconds - if (total >= this->kafkaMaxPacketSize || - epicsTimeDiffInNS(&now, &last_sent) > 200'000'000ll) { - last_sent = epicsTime::getCurrent(); - - if (total) { - total = 0; - - builder.Clear(); - - auto message = CreateEventMessageDirect( - builder, "detector", message_id++, - ((uint64_t)now.secPastEpoch) * 1'000'000'000ull + - ((uint64_t)now.nsec), - &tof, &did); - - builder.Finish(message, "ev42"); - - rd_kafka_resp_err_t err = rd_kafka_producev( - detectorProducer, RD_KAFKA_V_TOPIC(this->detectorTopic), - RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), - // RD_KAFKA_V_KEY((void *)key, key_len), - RD_KAFKA_V_VALUE((void *)builder.GetBufferPointer(), - builder.GetSize()), - // RD_KAFKA_V_OPAQUE(NULL), - RD_KAFKA_V_END); - - if (err) { - epicsStdoutPrintf("Failed to produce to topic %s: %s\n", - this->detectorTopic, - rd_kafka_err2str(err)); - } - - rd_kafka_poll(detectorProducer, 0); - - tof.clear(); - did.clear(); - } - } - } + this->produce(detectorQueue, detectorProducer, detectorTopic, "detector"); } /******************************************************************************* diff --git a/src/asynStreamGeneratorDriver.h b/src/asynStreamGeneratorDriver.h index 0cc6579..e3dfa25 100644 --- a/src/asynStreamGeneratorDriver.h +++ b/src/asynStreamGeneratorDriver.h @@ -2,7 +2,7 @@ #define asynStreamGeneratorDriver_H #include "asynPortDriver.h" -#include +#include #include /******************************************************************************* @@ -62,8 +62,9 @@ struct __attribute__((__packed__)) NormalisedEvent { uint8_t source; uint32_t pixelId; - inline NormalisedEvent(uint64_t timestamp, uint8_t source, uint32_t pixelId) - : timestamp(timestamp), source(source), pixelId(pixelId){}; + // inline NormalisedEvent(uint64_t timestamp, uint8_t source, uint32_t + // pixelId) + // : timestamp(timestamp), source(source), pixelId(pixelId){}; }; /******************************************************************************* @@ -140,13 +141,13 @@ class asynStreamGeneratorDriver : public asynPortDriver { const bool kafkaEnabled; const int kafkaMaxPacketSize; - epicsRingPointer udpQueue; + epicsRingBytesId udpQueue; - epicsRingPointer monitorQueue; + epicsRingBytesId monitorQueue; rd_kafka_t *monitorProducer; const char *monitorTopic; - epicsRingPointer detectorQueue; + epicsRingBytesId detectorQueue; rd_kafka_t *detectorProducer; const char *detectorTopic; @@ -155,7 +156,10 @@ class asynStreamGeneratorDriver : public asynPortDriver { asynStatus createInt32Param(asynStatus status, char *name, int *variable, epicsInt32 initialValue = 0); - inline void queueForKafka(NormalisedEvent *ne); + inline void queueForKafka(NormalisedEvent &&ne); + + void produce(epicsRingBytesId eventQueue, rd_kafka_t *kafkaProducer, + const char *topic, const char *source); }; #endif From 617dd3153b33e4e8d407c464d4dae19cf173665a Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Wed, 5 Nov 2025 09:57:05 +0100 Subject: [PATCH 22/35] not 100% this rate calculation is right, but might be better than before? --- src/asynStreamGeneratorDriver.cpp | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index f76055e..5ba860b 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -477,10 +477,13 @@ void asynStreamGeneratorDriver::processEvents() { // uint32. It does support int64 though.. so we start with that epicsInt32 *counts = new epicsInt32[this->num_channels]; + const uint64_t minRateSamplePeriod = 100'000'000ll; + const size_t rateAverageWindow = 20; size_t countDiffsPtr = 0; epicsInt32 *rates = new epicsInt32[this->num_channels]; epicsInt32 *countDiff = new epicsInt32[this->num_channels]; - epicsInt32 *countDiffs = new epicsInt32[this->num_channels * 10]; + epicsInt32 *countDiffs = new epicsInt32[this->num_channels * rateAverageWindow]; + uint64_t *timeSpans = new uint64_t[this->num_channels]; epicsTimeStamp lastRateUpdate = epicsTime::getCurrent(); asynStatus status = asynSuccess; @@ -613,23 +616,30 @@ void asynStreamGeneratorDriver::processEvents() { // Careful changing any of these magic numbers until I clean this up // as you might end up calculating the wrong rate epicsTimeStamp currentTime = epicsTime::getCurrent(); - if (epicsTimeDiffInNS(¤tTime, &lastRateUpdate) > 100'000'000ll) { + if (epicsTimeDiffInNS(¤tTime, &lastRateUpdate) > minRateSamplePeriod) { + timeSpans[countDiffsPtr] = epicsTimeDiffInNS(¤tTime, &lastRateUpdate); + + uint64_t totalTime = 0; + for (size_t i = 0; i <= rateAverageWindow; ++i) { + totalTime += timeSpans[i]; + } + lastRateUpdate = currentTime; for (size_t i = 0; i <= this->num_channels; ++i) { - countDiffs[i * 10 + countDiffsPtr] = countDiff[i]; + countDiffs[i * rateAverageWindow + countDiffsPtr] = countDiff[i]; uint64_t cnt = 0; - for (size_t j = 0; j <= 10; ++j) { - cnt += countDiffs[i * 10 + j]; + for (size_t j = 0; j <= rateAverageWindow; ++j) { + cnt += countDiffs[i * rateAverageWindow + j]; } rates[i] = - cnt; // would / 10 to average than * 10 as want per second + cnt / (totalTime * 1e-9); countDiff[i] = 0; } - countDiffsPtr = (countDiffsPtr + 1) % 10; + countDiffsPtr = (countDiffsPtr + 1) % rateAverageWindow; if (countDiffsPtr % 5 == 0) { lock(); From 2ccf37ce33269ce34c681995602e764cb7428448 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Wed, 5 Nov 2025 10:13:08 +0100 Subject: [PATCH 23/35] comments on time overflow --- src/asynStreamGeneratorDriver.cpp | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index 5ba860b..86a55aa 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -350,6 +350,13 @@ asynStatus asynStreamGeneratorDriver::writeInt32(asynUser *pasynUser, void asynStreamGeneratorDriver::receiveUDP() { // TODO fix time overflows + // Regarding time overflow. + // * the header time stamp is 3 words, i.e. 48 bits. + // * it has a resolution of 100ns + // * so we can cover a maximum of (2^(3*16) - 1) * 1e-7 = 28147497 seconds + // * or about 325 days + // * so maybe this isn't necessary to solve, as long as we restart the + // electronics at least once a year... const char *functionName = "receiveUDP"; asynStatus status = asynSuccess; @@ -482,7 +489,8 @@ void asynStreamGeneratorDriver::processEvents() { size_t countDiffsPtr = 0; epicsInt32 *rates = new epicsInt32[this->num_channels]; epicsInt32 *countDiff = new epicsInt32[this->num_channels]; - epicsInt32 *countDiffs = new epicsInt32[this->num_channels * rateAverageWindow]; + epicsInt32 *countDiffs = + new epicsInt32[this->num_channels * rateAverageWindow]; uint64_t *timeSpans = new uint64_t[this->num_channels]; epicsTimeStamp lastRateUpdate = epicsTime::getCurrent(); @@ -616,25 +624,27 @@ void asynStreamGeneratorDriver::processEvents() { // Careful changing any of these magic numbers until I clean this up // as you might end up calculating the wrong rate epicsTimeStamp currentTime = epicsTime::getCurrent(); - if (epicsTimeDiffInNS(¤tTime, &lastRateUpdate) > minRateSamplePeriod) { - timeSpans[countDiffsPtr] = epicsTimeDiffInNS(¤tTime, &lastRateUpdate); + if (epicsTimeDiffInNS(¤tTime, &lastRateUpdate) > + minRateSamplePeriod) { + timeSpans[countDiffsPtr] = + epicsTimeDiffInNS(¤tTime, &lastRateUpdate); - uint64_t totalTime = 0; - for (size_t i = 0; i <= rateAverageWindow; ++i) { - totalTime += timeSpans[i]; - } + uint64_t totalTime = 0; + for (size_t i = 0; i <= rateAverageWindow; ++i) { + totalTime += timeSpans[i]; + } lastRateUpdate = currentTime; for (size_t i = 0; i <= this->num_channels; ++i) { - countDiffs[i * rateAverageWindow + countDiffsPtr] = countDiff[i]; + countDiffs[i * rateAverageWindow + countDiffsPtr] = + countDiff[i]; uint64_t cnt = 0; for (size_t j = 0; j <= rateAverageWindow; ++j) { cnt += countDiffs[i * rateAverageWindow + j]; } - rates[i] = - cnt / (totalTime * 1e-9); + rates[i] = cnt / (totalTime * 1e-9); countDiff[i] = 0; } From 5f95e82a3cfbf4333e0f9b1ecfbe49b691866d67 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Thu, 6 Nov 2025 11:58:19 +0100 Subject: [PATCH 24/35] in the process of switching to a more batch processing approach. so far, seems like it can keep up --- db/channels.db | 6 +- db/daq_common.db | 31 +- scripts/st.cmd | 4 + src/asynStreamGeneratorDriver.cpp | 587 ++++++++++++++++++++---------- src/asynStreamGeneratorDriver.h | 12 +- 5 files changed, 452 insertions(+), 188 deletions(-) diff --git a/db/channels.db b/db/channels.db index 0b94f18..9df5d06 100644 --- a/db/channels.db +++ b/db/channels.db @@ -59,7 +59,8 @@ record(longin, "$(INSTR)$(NAME):M$(CHANNEL)") field(DTYP, "asynInt32") field(INP, "@asyn($(PORT),0,$(TIMEOUT=1)) COUNTS$(CHANNEL)") # This is probably too fast. We could trigger things the same as sinqDAQ to ensure the db is update in the same order - field(SCAN, "I/O Intr") + # field(SCAN, "I/O Intr") + field(SCAN, ".2 second") field(PINI, "YES") } @@ -69,6 +70,7 @@ record(ai, "$(INSTR)$(NAME):R$(CHANNEL)") field(EGU, "cts/sec") field(DTYP, "asynInt32") field(INP, "@asyn($(PORT),0,$(TIMEOUT=1)) RATE$(CHANNEL)") - field(SCAN, "I/O Intr") + field(SCAN, ".2 second") + # field(SCAN, "I/O Intr") field(PINI, "YES") } diff --git a/db/daq_common.db b/db/daq_common.db index 410b80d..ef9f2ee 100644 --- a/db/daq_common.db +++ b/db/daq_common.db @@ -39,7 +39,8 @@ record(mbbi, "$(INSTR)$(NAME):STATUS") field(FRVL, "4") field(FRST, "INVALID") # This is probably too fast. We could trigger things the same as sinqDAQ to ensure the db is update in the same order - field(SCAN, "I/O Intr") + #field(SCAN, "I/O Intr") + field(SCAN, ".5 second") field(PINI, "YES") } @@ -201,7 +202,33 @@ record(ai,"$(INSTR)$(NAME):ELAPSED-TIME") field(EGU, "sec") field(DTYP, "asynInt32") field(INP, "@asyn($(PORT),0,$(TIMEOUT=1)) TIME") - field(SCAN, "I/O Intr") + # field(SCAN, "I/O Intr") + field(SCAN, ".5 second") field(PINI, "YES") # field(FLNK, "$(INSTR)$(NAME):ETO") } + +################################################################################ +# Stream Generator Status PVs + +record(longin,"$(INSTR)$(NAME):UDP_WATERMARK") +{ + field(DESC, "Max Events in Queue") + field(EGU, "Events") + field(DTYP, "asynInt32") + field(INP, "@asyn($(PORT),0,$(TIMEOUT=1)) UDP") + # field(SCAN, "I/O Intr") + field(SCAN, "1 second") + field(PINI, "YES") +} + +record(longin,"$(INSTR)$(NAME):SORTED_WATERMARK") +{ + field(DESC, "Max Events in Queue") + field(EGU, "Events") + field(DTYP, "asynInt32") + field(INP, "@asyn($(PORT),0,$(TIMEOUT=1)) SORT") + # field(SCAN, "I/O Intr") + field(SCAN, "1 second") + field(PINI, "YES") +} diff --git a/scripts/st.cmd b/scripts/st.cmd index 489cf84..3ba498b 100755 --- a/scripts/st.cmd +++ b/scripts/st.cmd @@ -9,6 +9,10 @@ epicsEnvSet("INSTR", "SQ:TEST:") epicsEnvSet("NAME", "SG") drvAsynIPPortConfigure("ASYN_IP_PORT", "127.0.0.1:9071:54321 UDP", 0, 0, 1) + +# With a udpQueue and sortQueue size of 10'000 packets, we can hold in memory +# 10'000 * 243 = 2.43e6 events + # asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 10000, "linkafka01:9092", "NEWEFU_TEST", "NEWEFU_TEST2", 1000, 8192) asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 10000, "", "", "", 0, 0) diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index 86a55aa..e7efa35 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -59,6 +59,11 @@ static void udpPollerTask(void *drvPvt) { pSGD->receiveUDP(); } +static void sortTask(void *drvPvt) { + asynStreamGeneratorDriver *pSGD = (asynStreamGeneratorDriver *)drvPvt; + pSGD->partialSortEvents(); +} + static void daqTask(void *drvPvt) { asynStreamGeneratorDriver *pSGD = (asynStreamGeneratorDriver *)drvPvt; pSGD->processEvents(); @@ -107,7 +112,12 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( 0), /* Default stack size*/ num_channels(numChannels + 1), kafkaEnabled(enableKafkaStream), monitorTopic(monitorTopic), detectorTopic(detectorTopic), - udpQueue(epicsRingBytesCreate(udpQueueSize * sizeof(NormalisedEvent))), + // so these first to are measured in max packet sizes + udpQueue( + epicsRingBytesCreate(243 * udpQueueSize * sizeof(NormalisedEvent))), + // TODO configurable sizes + sortedQueue(epicsRingBytesCreate(243 * udpQueueSize * sizeof(NormalisedEvent))), + // and these two are currently measured in event sizes... monitorQueue( epicsRingBytesCreate(kafkaQueueSize * sizeof(NormalisedEvent))), detectorQueue( @@ -151,6 +161,11 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( status = createInt32Param(status, pv_name_buffer, P_ClearCounts + i); } + status = createInt32Param(status, P_UdpQueueHighWaterMarkString, + &P_UdpQueueHighWaterMark); + status = createInt32Param(status, P_SortedQueueHighWaterMarkString, + &P_SortedQueueHighWaterMark); + if (status) { epicsStdoutPrintf( "%s:%s: failed to create or setup parameters, status=%d\n", @@ -210,10 +225,26 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( /* Create the thread that orders the events and acts as our sinqDaq stand-in */ - status = (asynStatus)(epicsThreadCreate( - "sinqDAQ", epicsThreadPriorityMax, - epicsThreadGetStackSize(epicsThreadStackMedium), - (EPICSTHREADFUNC)::daqTask, this) == NULL); + status = + (asynStatus)(epicsThreadCreate( + "sinqDAQ", + epicsThreadPriorityMedium, // epicsThreadPriorityMax, + epicsThreadGetStackSize(epicsThreadStackMedium), + (EPICSTHREADFUNC)::daqTask, this) == NULL); + if (status) { + epicsStdoutPrintf("%s:%s: epicsThreadCreate failure, status=%d\n", + driverName, functionName, status); + exit(1); + } + + /* Create the thread that orders packets of in preparation for our sinqDAQ stand-in + */ + status = + (asynStatus)(epicsThreadCreate( + "partialSort", + epicsThreadPriorityMedium, + epicsThreadGetStackSize(epicsThreadStackMedium), + (EPICSTHREADFUNC)::sortTask, this) == NULL); if (status) { epicsStdoutPrintf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, functionName, status); @@ -254,6 +285,32 @@ asynStreamGeneratorDriver::~asynStreamGeneratorDriver() { // epicsStdoutPrintf("Kafka Queue Size %d\n", rd_kafka_outq_len(producer)); } +asynStatus asynStreamGeneratorDriver::readInt32(asynUser *pasynUser, epicsInt32 *value) { + + int function = pasynUser->reason; + asynStatus status = asynSuccess; + const char *paramName; + const char *functionName = "readInt32"; + getParamName(function, ¶mName); + + if (function == P_UdpQueueHighWaterMark) { + *value = + epicsRingBytesHighWaterMark(this->udpQueue) / sizeof(NormalisedEvent); + // Aparently resetting the watermark causes problems... + // at least concurrently :D + // epicsRingBytesResetHighWaterMark(this->udpQueue); + return asynSuccess; + } else if (function == P_SortedQueueHighWaterMark) { + *value = + epicsRingBytesHighWaterMark(this->sortedQueue) / sizeof(NormalisedEvent); + // epicsRingBytesResetHighWaterMark(this->sortedQueue); + return asynSuccess; + } + + return asynPortDriver::readInt32(pasynUser, value); + +} + asynStatus asynStreamGeneratorDriver::writeInt32(asynUser *pasynUser, epicsInt32 value) { int function = pasynUser->reason; @@ -446,6 +503,57 @@ void asynStreamGeneratorDriver::receiveUDP() { } } + +struct { + bool operator()(const NormalisedEvent l, + const NormalisedEvent r) const { + return l.timestamp > r.timestamp; + } +} reverseSortEventsByTime; + +inline int eventsInQueue(epicsRingBytesId id) { + return epicsRingBytesUsedBytes(id) / sizeof(NormalisedEvent); +} + +void asynStreamGeneratorDriver::partialSortEvents() { + + const char *functionName = "partialSortEvents"; + + // x * number of ids * max events in packet + int bufferedEvents = 5 * 10 * 243; + NormalisedEvent *events = new NormalisedEvent[bufferedEvents]; + + int queuedEvents = 0; + epicsTimeStamp lastSort = epicsTime::getCurrent(); + epicsTimeStamp currentTime = lastSort; + + while (true) { + + queuedEvents = eventsInQueue(this->udpQueue); // in case we can't wait + lastSort = epicsTime::getCurrent(); + currentTime = lastSort; + + // wait for mininmum packet frequency or enough packets to ensure we could potentially + // have at least 1 packet per mcpdid + while (queuedEvents < bufferedEvents && epicsTimeDiffInNS(¤tTime, &lastSort) < 250'000'000ull) { + epicsThreadSleep(0.0001); // seconds + currentTime = epicsTime::getCurrent(); + queuedEvents = eventsInQueue(this->udpQueue); + } + + queuedEvents = std::min(queuedEvents, bufferedEvents); + + if (queuedEvents) { + epicsRingBytesGet(this->udpQueue, (char *)events, queuedEvents * sizeof(NormalisedEvent)); + + std::sort(events, events + queuedEvents, reverseSortEventsByTime); + + epicsRingBytesPut(this->sortedQueue, (char *)events, queuedEvents * sizeof(NormalisedEvent)); + } + } + +} + inline void asynStreamGeneratorDriver::queueForKafka(NormalisedEvent &&ne) { if (this->kafkaEnabled) { if (ne.source == 0) @@ -461,206 +569,319 @@ void asynStreamGeneratorDriver::processEvents() { const char *functionName = "processEvents"; - const size_t queueBufferSize = - 10 * epicsRingBytesSize(this->udpQueue) / sizeof(NormalisedEvent); + // x * number of ids * max events in packet * event size + int bufferedEvents = 5 * 10 * 243; + // we need a little extra space for merge sorting in + int extraBufferedEvents = 1 * 10 * 243; - struct { - bool operator()(const NormalisedEvent l, - const NormalisedEvent r) const { - return l.timestamp > r.timestamp; - } - } smallestToLargest; + // we have two buffers. We alternate between reading data into one of them, + // and then merge sorting into the other + NormalisedEvent *eventsA = new NormalisedEvent[(bufferedEvents + extraBufferedEvents)]; + NormalisedEvent *eventsB = new NormalisedEvent[(bufferedEvents + extraBufferedEvents)]; + NormalisedEvent *eventsBLastStart = eventsB + bufferedEvents; + NormalisedEvent *eventsBLastEnd = eventsBLastStart; - // This should never be used. It is just instantiated to reserve a buffer - // of specific size. - std::vector queueBuffer; - queueBuffer.reserve(queueBufferSize); + int queuedEvents = 0; - std::priority_queue, - decltype(smallestToLargest)> - timeQueue(smallestToLargest, std::move(queueBuffer)); + epicsTimeStamp lastProcess = epicsTime::getCurrent(); + epicsTimeStamp currentTime = lastProcess; - // TODO epics doesn't seem to support uint64, you would need an array of - // uint32. It does support int64 though.. so we start with that epicsInt32 *counts = new epicsInt32[this->num_channels]; - const uint64_t minRateSamplePeriod = 100'000'000ll; - const size_t rateAverageWindow = 20; - size_t countDiffsPtr = 0; - epicsInt32 *rates = new epicsInt32[this->num_channels]; - epicsInt32 *countDiff = new epicsInt32[this->num_channels]; - epicsInt32 *countDiffs = - new epicsInt32[this->num_channels * rateAverageWindow]; - uint64_t *timeSpans = new uint64_t[this->num_channels]; - epicsTimeStamp lastRateUpdate = epicsTime::getCurrent(); - - asynStatus status = asynSuccess; - NormalisedEvent ne; - uint64_t newestTimestamp = 0; - uint64_t startTimestamp = std::numeric_limits::max(); - uint64_t currTimestamp; - epicsInt32 elapsedSeconds = 0; - epicsInt32 prevStatus = STATUS_IDLE; - epicsInt32 currStatus = STATUS_IDLE; - epicsInt32 countPreset = 0; - epicsInt32 timePreset = 0; - epicsInt32 presetChannel = 0; + epicsInt32 udpQueueHighWaterMark = 0; + epicsInt32 sortedQueueHighWaterMark = 0; while (true) { - // TODO depending on how this is implemented, I may also need to check - // that there is is enough bytes, in case it does partial writes... - if (epicsRingBytesGet(udpQueue, (char *)&ne, sizeof(NormalisedEvent))) { - // we should reastart this ioc at least every few years, as at ns - // resolution with a uint64_t we will have an overflow after around - // 4 years - newestTimestamp = std::max(newestTimestamp, ne.timestamp); + queuedEvents = eventsInQueue(this->sortedQueue); // in case we can't wait + lastProcess = epicsTime::getCurrent(); + currentTime = lastProcess; - ++countDiff[ne.source == 0 ? ne.pixelId + 1 : 0]; - - timeQueue.push(std::move(ne)); + // wait for mininmum packet frequency or enough packets to ensure we could potentially + // have at least 1 packet per mcpdid + while (queuedEvents < bufferedEvents && epicsTimeDiffInNS(¤tTime, &lastProcess) < 250'000'000ull) { + epicsThreadSleep(0.0001); // seconds + currentTime = epicsTime::getCurrent(); + queuedEvents = eventsInQueue(this->sortedQueue); } - // idea is to try and guarantee at least 1 packet per id or the min - // frequency for each id without actually checking all ids - if (timeQueue.size() >= 1500 * 10 || - (timeQueue.size() > 0 && - newestTimestamp - timeQueue.top().timestamp >= 200'000'000ull)) { - ne = timeQueue.top(); - timeQueue.pop(); + queuedEvents = std::min(queuedEvents, bufferedEvents); - status = getIntegerParam(this->P_Status, &currStatus); + NormalisedEvent *newStartPtr = eventsA + extraBufferedEvents; - if (currStatus == STATUS_COUNTING && prevStatus == STATUS_IDLE) { - // Starting a new count + // We read into the array, such that we have enough space, that the + // entirety of the leftover from the previous read can fit before this + // new read, in the case that all new events are newer timewise, and + // therefore, all events from eventsB have to be placed in a preceeding + // position. + epicsRingBytesGet(this->sortedQueue, (char *)newStartPtr, queuedEvents * sizeof(NormalisedEvent)); - // get current count configuration - getIntegerParam(this->P_CountPreset, &countPreset); - getIntegerParam(this->P_TimePreset, &timePreset); - getIntegerParam(this->P_MonitorChannel, &presetChannel); + int toProcess = eventsBLastEnd - eventsBLastStart + queuedEvents * 4 / 5; - // reset status variables - startTimestamp = std::numeric_limits::max(); - for (size_t i = 0; i < this->num_channels; ++i) { - counts[i] = 0; - } + // TODO could also consider an in-place merge + eventsBLastEnd = std::merge( + newStartPtr, newStartPtr + queuedEvents, + eventsBLastStart, eventsBLastEnd, + eventsA, reverseSortEventsByTime + ); - // reset pvs - lock(); - for (size_t i = 0; i < num_channels; ++i) { - setIntegerParam(P_Counts[i], counts[i]); - } - setIntegerParam(P_ElapsedTime, 0); - callParamCallbacks(); - unlock(); + eventsBLastStart = eventsA + toProcess; - // TODO might consider throwing out current buffer as it is - // from before count started? then again, 0.2 ms or whatever is - // set above is quite a small preceeding amount of time, so - // maybe it doesn't matter - } - - prevStatus = currStatus; - - if (currStatus == STATUS_COUNTING) { - startTimestamp = std::min(startTimestamp, ne.timestamp); - currTimestamp = ne.timestamp; - elapsedSeconds = - 0 ? currTimestamp <= startTimestamp - : ((double)(currTimestamp - startTimestamp)) / 1e9; - - // is our count finished? - if ((countPreset && counts[presetChannel] >= countPreset) || - (timePreset && elapsedSeconds >= timePreset)) { - - // filter out events that occured after the specified time - if (ne.timestamp - startTimestamp <= countPreset) { - counts[ne.source == 0 ? ne.pixelId + 1 : 0] += 1; - this->queueForKafka(std::move(ne)); - - // add any remaining events with the same timestamp - // we could theoretically have a small overrun if the - // timestamps are identical on the monitor channel - while (!timeQueue.empty() && - !timeQueue.top().timestamp == currTimestamp) { - ne = timeQueue.top(); - timeQueue.pop(); - counts[ne.source == 0 ? ne.pixelId + 1 : 0] += 1; - this->queueForKafka(std::move(ne)); - } - } - - countPreset = 0; - timePreset = 0; - - lock(); - for (size_t i = 0; i < num_channels; ++i) { - setIntegerParam(P_Counts[i], counts[i]); - } - setIntegerParam(P_ElapsedTime, elapsedSeconds); - setIntegerParam(P_CountPreset, countPreset); - setIntegerParam(P_TimePreset, timePreset); - callParamCallbacks(); - setIntegerParam(P_Status, STATUS_IDLE); - callParamCallbacks(); - unlock(); - - } else { - - counts[ne.source == 0 ? ne.pixelId + 1 : 0] += 1; - this->queueForKafka(std::move(ne)); - - lock(); - for (size_t i = 0; i < num_channels; ++i) { - setIntegerParam(P_Counts[i], counts[i]); - } - setIntegerParam(P_ElapsedTime, elapsedSeconds); - callParamCallbacks(); - unlock(); - } - } + for (size_t i = 0; i < toProcess; ++i) { + counts[eventsA[i].source == 0 ? eventsA[i].pixelId + 1 : 0] += 1; } - // Careful changing any of these magic numbers until I clean this up - // as you might end up calculating the wrong rate - epicsTimeStamp currentTime = epicsTime::getCurrent(); - if (epicsTimeDiffInNS(¤tTime, &lastRateUpdate) > - minRateSamplePeriod) { - timeSpans[countDiffsPtr] = - epicsTimeDiffInNS(¤tTime, &lastRateUpdate); - - uint64_t totalTime = 0; - for (size_t i = 0; i <= rateAverageWindow; ++i) { - totalTime += timeSpans[i]; - } - - lastRateUpdate = currentTime; - - for (size_t i = 0; i <= this->num_channels; ++i) { - countDiffs[i * rateAverageWindow + countDiffsPtr] = - countDiff[i]; - - uint64_t cnt = 0; - for (size_t j = 0; j <= rateAverageWindow; ++j) { - cnt += countDiffs[i * rateAverageWindow + j]; - } - rates[i] = cnt / (totalTime * 1e-9); - - countDiff[i] = 0; - } - - countDiffsPtr = (countDiffsPtr + 1) % rateAverageWindow; - - if (countDiffsPtr % 5 == 0) { - lock(); - for (size_t i = 0; i < num_channels; ++i) { - setIntegerParam(P_Rates[i], rates[i]); - } - callParamCallbacks(); - unlock(); - } + for (size_t i = 0; i < num_channels; ++i) { + setIntegerParam(P_Counts[i], counts[i]); } + + //setIntegerParam(P_ElapsedTime, elapsedSeconds); + + std::swap(eventsA, eventsB); + } + + // // TODO this is totally decoupled!!! + // const size_t queueBufferSize = + // 10 * epicsRingBytesSize(this->udpQueue) / sizeof(NormalisedEvent); + + // //struct { + // // bool operator()(const NormalisedEvent l, + // // const NormalisedEvent r) const { + // // return l.timestamp > r.timestamp; + // // } + // //} smallestToLargest; + + // //// This should never be used. It is just instantiated to reserve a buffer + // //// of specific size. + // //std::vector queueBuffer; + // //queueBuffer.reserve(queueBufferSize); + + // //std::priority_queue, + // // decltype(smallestToLargest)> + // // timeQueue(smallestToLargest, std::move(queueBuffer)); + + // NormalisedEvent* timeQueue = new NormalisedEvent[queueBufferSize]; + + // // TODO epics doesn't seem to support uint64, you would need an array of + // // uint32. It does support int64 though.. so we start with that + // epicsInt32 *counts = new epicsInt32[this->num_channels]; + + // const uint64_t minRateSamplePeriod = 100'000'000ll; + // const size_t rateAverageWindow = 20; + // size_t countDiffsPtr = 0; + // epicsInt32 *rates = new epicsInt32[this->num_channels]; + // epicsInt32 *countDiff = new epicsInt32[this->num_channels]; + // epicsInt32 *countDiffs = + // new epicsInt32[this->num_channels * rateAverageWindow]; + // uint64_t *timeSpans = new uint64_t[this->num_channels]; + // epicsTimeStamp lastRateUpdate = epicsTime::getCurrent(); + + // asynStatus status = asynSuccess; + // NormalisedEvent ne; + // uint64_t newestTimestamp = 0; + // uint64_t startTimestamp = std::numeric_limits::max(); + // uint64_t currTimestamp; + // epicsInt32 elapsedSeconds = 0; + // epicsInt32 prevStatus = STATUS_IDLE; + // epicsInt32 currStatus = STATUS_IDLE; + // epicsInt32 countPreset = 0; + // epicsInt32 timePreset = 0; + // epicsInt32 presetChannel = 0; + // epicsInt32 udpQueueHighWaterMark = 0; + + // while (true) { + + // // I think mostly everything should already by sorted + // // could probably in the other thread guarantee that each packet is sorted + // // but probably it already is... + // // + // // so really we just need to merge sort chunks + + // // idea is to try and guarantee at least 1 packet per id or the min + // // frequency for each id without actually checking all ids + // // size_t timeQueuePtr = 0; + // // while (timeQueuePtr < 1500 * 10) { + + // // // TODO depending on how this is implemented, I may also need to + // // // check that there is is enough bytes, in case it does partial + // // // writes... + // // if (epicsRingBytesGet(udpQueue, (char *)&ne, + // // sizeof(NormalisedEvent))) { + // // // we should restart this ioc at least every few years, as at ns + // // // resolution with a uint64_t we will have an overflow after + // // // around 4 years + // // newestTimestamp = std::max(newestTimestamp, ne.timestamp); + + // // ++countDiff[ne.source == 0 ? ne.pixelId + 1 : 0]; + + // // timeQueue.push(std::move(ne)); + // // } + + // // } + + + // // while (timeQueue.empty() || + // // (timeQueue.size() < 1500 * 10 && + // // newestTimestamp - timeQueue.top().timestamp < 200'000'000ull)) { + + // // // TODO depending on how this is implemented, I may also need to + // // // check that there is is enough bytes, in case it does partial + // // // writes... + // // if (epicsRingBytesGet(udpQueue, (char *)&ne, + // // sizeof(NormalisedEvent))) { + // // // we should restart this ioc at least every few years, as at ns + // // // resolution with a uint64_t we will have an overflow after + // // // around 4 years + // // newestTimestamp = std::max(newestTimestamp, ne.timestamp); + + // // ++countDiff[ne.source == 0 ? ne.pixelId + 1 : 0]; + + // // timeQueue.push(std::move(ne)); + // // } + // // } + + // // ne = timeQueue.top(); + // // timeQueue.pop(); + + // // status = getIntegerParam(this->P_Status, &currStatus); + + // // udpQueueHighWaterMark = + // // epicsRingBytesHighWaterMark(udpQueue) / sizeof(NormalisedEvent); + + // // // if (currStatus == STATUS_COUNTING && prevStatus == STATUS_IDLE) { + // // // // Starting a new count + + // // // // get current count configuration + // // // getIntegerParam(this->P_CountPreset, &countPreset); + // // // getIntegerParam(this->P_TimePreset, &timePreset); + // // // getIntegerParam(this->P_MonitorChannel, &presetChannel); + + // // // // reset status variables + // // // startTimestamp = std::numeric_limits::max(); + // // // for (size_t i = 0; i < this->num_channels; ++i) { + // // // counts[i] = 0; + // // // } + + // // // // reset pvs + // // // // lock(); + // // // // for (size_t i = 0; i < num_channels; ++i) { + // // // // setIntegerParam(P_Counts[i], counts[i]); + // // // // } + // // // // setIntegerParam(P_ElapsedTime, 0); + // // // // callParamCallbacks(); + // // // // unlock(); + + // // // // TODO might consider throwing out current buffer as it is + // // // // from before count started? then again, 0.2 ms or whatever is + // // // // set above is quite a small preceeding amount of time, so + // // // // maybe it doesn't matter + // // // } + + // // // prevStatus = currStatus; + + // // //if (currStatus == STATUS_COUNTING) { + // // startTimestamp = std::min(startTimestamp, ne.timestamp); + // // currTimestamp = ne.timestamp; + // // elapsedSeconds = + // // 0 ? currTimestamp <= startTimestamp + // // : ((double)(currTimestamp - startTimestamp)) / 1e9; + + // // // is our count finished? + // // // if ((countPreset && counts[presetChannel] >= countPreset) || + // // // (timePreset && elapsedSeconds >= timePreset)) { + + // // // // filter out events that occured after the specified time + // // // if (ne.timestamp - startTimestamp <= countPreset) { + // // // counts[ne.source == 0 ? ne.pixelId + 1 : 0] += 1; + // // // this->queueForKafka(std::move(ne)); + + // // // // add any remaining events with the same timestamp + // // // // we could theoretically have a small overrun if the + // // // // timestamps are identical on the monitor channel + // // // while (!timeQueue.empty() && + // // // !timeQueue.top().timestamp == currTimestamp) { + // // // ne = timeQueue.top(); + // // // timeQueue.pop(); + // // // counts[ne.source == 0 ? ne.pixelId + 1 : 0] += 1; + // // // this->queueForKafka(std::move(ne)); + // // // } + // // // } + + // // // countPreset = 0; + // // // timePreset = 0; + + // // // // lock(); + // // // for (size_t i = 0; i < num_channels; ++i) { + // // // setIntegerParam(P_Counts[i], counts[i]); + // // // } + // // // setIntegerParam(P_ElapsedTime, elapsedSeconds); + // // // setIntegerParam(P_CountPreset, countPreset); + // // // setIntegerParam(P_TimePreset, timePreset); + // // // setIntegerParam(P_UdpQueueHighWaterMark, udpQueueHighWaterMark); + // // // // callParamCallbacks(); + // // // setIntegerParam(P_Status, STATUS_IDLE); + // // // // callParamCallbacks(); + // // // // unlock(); + + // // // epicsRingBytesResetHighWaterMark(udpQueue); + + // // // } else { + + // // counts[ne.source == 0 ? ne.pixelId + 1 : 0] += 1; + // // this->queueForKafka(std::move(ne)); + + // // // lock(); + // // for (size_t i = 0; i < num_channels; ++i) { + // // setIntegerParam(P_Counts[i], counts[i]); + // // } + // // setIntegerParam(P_ElapsedTime, elapsedSeconds); + // // setIntegerParam(P_UdpQueueHighWaterMark, udpQueueHighWaterMark); + // // // callParamCallbacks(); + // // // unlock(); + // // // } + // // //} + + // // // Careful changing any of these magic numbers until I clean this up + // // // as you might end up calculating the wrong rate + // // // epicsTimeStamp currentTime = epicsTime::getCurrent(); + // // // if (epicsTimeDiffInNS(¤tTime, &lastRateUpdate) > + // // // minRateSamplePeriod) { + // // // timeSpans[countDiffsPtr] = + // // // epicsTimeDiffInNS(¤tTime, &lastRateUpdate); + + // // // uint64_t totalTime = 0; + // // // for (size_t i = 0; i <= rateAverageWindow; ++i) { + // // // totalTime += timeSpans[i]; + // // // } + + // // // lastRateUpdate = currentTime; + + // // // for (size_t i = 0; i <= this->num_channels; ++i) { + // // // countDiffs[i * rateAverageWindow + countDiffsPtr] = + // // // countDiff[i]; + + // // // uint64_t cnt = 0; + // // // for (size_t j = 0; j <= rateAverageWindow; ++j) { + // // // cnt += countDiffs[i * rateAverageWindow + j]; + // // // } + // // // rates[i] = cnt / (totalTime * 1e-9); + + // // // countDiff[i] = 0; + // // // } + + // // // countDiffsPtr = (countDiffsPtr + 1) % rateAverageWindow; + + // // // if (countDiffsPtr % 5 == 0) { + // // // // lock(); + // // // for (size_t i = 0; i < num_channels; ++i) { + // // // setIntegerParam(P_Rates[i], rates[i]); + // // // } + // // // // callParamCallbacks(); + // // // // unlock(); + // // // } + // // // } + // } } void asynStreamGeneratorDriver::produce(epicsRingBytesId eventQueue, diff --git a/src/asynStreamGeneratorDriver.h b/src/asynStreamGeneratorDriver.h index e3dfa25..fa5358d 100644 --- a/src/asynStreamGeneratorDriver.h +++ b/src/asynStreamGeneratorDriver.h @@ -59,8 +59,8 @@ struct __attribute__((__packed__)) MonitorEvent { struct __attribute__((__packed__)) NormalisedEvent { uint64_t timestamp; + uint32_t pixelId : 24; uint8_t source; - uint32_t pixelId; // inline NormalisedEvent(uint64_t timestamp, uint8_t source, uint32_t // pixelId) @@ -96,6 +96,9 @@ struct __attribute__((__packed__)) NormalisedEvent { #define P_RateString "RATE%d" #define P_ClearCountsString "C_%d" +#define P_UdpQueueHighWaterMarkString "UDP" +#define P_SortedQueueHighWaterMarkString "SORT" + /******************************************************************************* * Stream Generator Coordinating Class */ @@ -110,9 +113,11 @@ class asynStreamGeneratorDriver : public asynPortDriver { const int kafkaMaxPacketSize); virtual ~asynStreamGeneratorDriver(); + virtual asynStatus readInt32(asynUser *pasynUser, epicsInt32 *value); virtual asynStatus writeInt32(asynUser *pasynUser, epicsInt32 value); void receiveUDP(); + void partialSortEvents(); void processEvents(); void produceMonitor(); void produceDetector(); @@ -133,6 +138,10 @@ class asynStreamGeneratorDriver : public asynPortDriver { int *P_Rates; int *P_ClearCounts; + // System Status Parameter Identifying IDs + int P_UdpQueueHighWaterMark; + int P_SortedQueueHighWaterMark; + private: asynUser *pasynUDPUser; epicsEventId pausedEventId; @@ -142,6 +151,7 @@ class asynStreamGeneratorDriver : public asynPortDriver { const int kafkaMaxPacketSize; epicsRingBytesId udpQueue; + epicsRingBytesId sortedQueue; epicsRingBytesId monitorQueue; rd_kafka_t *monitorProducer; From e53a2a4f409eeb049c9627227d87d302ca3e91b4 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Thu, 6 Nov 2025 15:30:25 +0100 Subject: [PATCH 25/35] finished converting the processing to a batch-wise variant --- Makefile | 6 +- scripts/st.cmd | 3 +- src/asynStreamGeneratorDriver.cpp | 421 ++++++++---------------------- src/asynStreamGeneratorDriver.h | 2 +- 4 files changed, 123 insertions(+), 309 deletions(-) diff --git a/Makefile b/Makefile index 5ae267d..400d0cb 100644 --- a/Makefile +++ b/Makefile @@ -20,9 +20,11 @@ TEMPLATES += db/channels.db db/daq_common.db # Source files to build SOURCES += src/asynStreamGeneratorDriver.cpp -USR_CFLAGS += -Wall -Wextra -Wunused-result -Werror -fvisibility=hidden # -Wpedantic // Does not work because EPICS macros trigger warnings +# I don't think specifying the optimisation level like this is correct... +# but I doesn't hurt :D +USR_CFLAGS += -O3 -Wall -Wextra -Wunused-result -Werror -fvisibility=hidden # -Wpedantic // Does not work because EPICS macros trigger warnings # Required to support EV42/44 -USR_CXXFLAGS += -I../dep/flatbuffers/include/ -I../schemas +USR_CXXFLAGS += -O3 -I../dep/flatbuffers/include/ -I../schemas LIB_SYS_LIBS += rdkafka diff --git a/scripts/st.cmd b/scripts/st.cmd index 3ba498b..9c3d3a6 100755 --- a/scripts/st.cmd +++ b/scripts/st.cmd @@ -14,7 +14,8 @@ drvAsynIPPortConfigure("ASYN_IP_PORT", "127.0.0.1:9071:54321 UDP", 0, 0, 1) # 10'000 * 243 = 2.43e6 events # asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 10000, "linkafka01:9092", "NEWEFU_TEST", "NEWEFU_TEST2", 1000, 8192) -asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 10000, "", "", "", 0, 0) +asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 10000, "ess01:9092", "NEWEFU_TEST", "NEWEFU_TEST2", 10000, 20480) +# asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 10000, "", "", "", 0, 0) dbLoadRecords("$(StreamGenerator_DB)daq_common.db", "INSTR=$(INSTR), NAME=$(NAME), PORT=ASYN_SG, CHANNELS=5") diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index e7efa35..dec026f 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -1,5 +1,6 @@ #include "asynOctetSyncIO.h" #include "ev42_events_generated.h" +#include #include #include #include @@ -112,16 +113,16 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( 0), /* Default stack size*/ num_channels(numChannels + 1), kafkaEnabled(enableKafkaStream), monitorTopic(monitorTopic), detectorTopic(detectorTopic), - // so these first to are measured in max packet sizes + // measured in max packet sizes udpQueue( epicsRingBytesCreate(243 * udpQueueSize * sizeof(NormalisedEvent))), // TODO configurable sizes - sortedQueue(epicsRingBytesCreate(243 * udpQueueSize * sizeof(NormalisedEvent))), - // and these two are currently measured in event sizes... + sortedQueue( + epicsRingBytesCreate(243 * udpQueueSize * sizeof(NormalisedEvent))), monitorQueue( - epicsRingBytesCreate(kafkaQueueSize * sizeof(NormalisedEvent))), + epicsRingBytesCreate(243 * kafkaQueueSize * sizeof(NormalisedEvent))), detectorQueue( - epicsRingBytesCreate(kafkaQueueSize * sizeof(NormalisedEvent))), + epicsRingBytesCreate(243 * kafkaQueueSize * sizeof(NormalisedEvent))), kafkaMaxPacketSize(kafkaMaxPacketSize) { const char *functionName = "asynStreamGeneratorDriver"; @@ -237,14 +238,13 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( exit(1); } - /* Create the thread that orders packets of in preparation for our sinqDAQ stand-in + /* Create the thread that orders packets of in preparation for our sinqDAQ + * stand-in */ - status = - (asynStatus)(epicsThreadCreate( - "partialSort", - epicsThreadPriorityMedium, - epicsThreadGetStackSize(epicsThreadStackMedium), - (EPICSTHREADFUNC)::sortTask, this) == NULL); + status = (asynStatus)(epicsThreadCreate( + "partialSort", epicsThreadPriorityMedium, + epicsThreadGetStackSize(epicsThreadStackMedium), + (EPICSTHREADFUNC)::sortTask, this) == NULL); if (status) { epicsStdoutPrintf("%s:%s: epicsThreadCreate failure, status=%d\n", driverName, functionName, status); @@ -285,7 +285,8 @@ asynStreamGeneratorDriver::~asynStreamGeneratorDriver() { // epicsStdoutPrintf("Kafka Queue Size %d\n", rd_kafka_outq_len(producer)); } -asynStatus asynStreamGeneratorDriver::readInt32(asynUser *pasynUser, epicsInt32 *value) { +asynStatus asynStreamGeneratorDriver::readInt32(asynUser *pasynUser, + epicsInt32 *value) { int function = pasynUser->reason; asynStatus status = asynSuccess; @@ -294,21 +295,20 @@ asynStatus asynStreamGeneratorDriver::readInt32(asynUser *pasynUser, epicsInt32 getParamName(function, ¶mName); if (function == P_UdpQueueHighWaterMark) { - *value = - epicsRingBytesHighWaterMark(this->udpQueue) / sizeof(NormalisedEvent); - // Aparently resetting the watermark causes problems... - // at least concurrently :D - // epicsRingBytesResetHighWaterMark(this->udpQueue); - return asynSuccess; + *value = epicsRingBytesHighWaterMark(this->udpQueue) / + sizeof(NormalisedEvent); + // Aparently resetting the watermark causes problems... + // at least concurrently :D + // epicsRingBytesResetHighWaterMark(this->udpQueue); + return asynSuccess; } else if (function == P_SortedQueueHighWaterMark) { - *value = - epicsRingBytesHighWaterMark(this->sortedQueue) / sizeof(NormalisedEvent); - // epicsRingBytesResetHighWaterMark(this->sortedQueue); - return asynSuccess; + *value = epicsRingBytesHighWaterMark(this->sortedQueue) / + sizeof(NormalisedEvent); + // epicsRingBytesResetHighWaterMark(this->sortedQueue); + return asynSuccess; } return asynPortDriver::readInt32(pasynUser, value); - } asynStatus asynStreamGeneratorDriver::writeInt32(asynUser *pasynUser, @@ -503,13 +503,11 @@ void asynStreamGeneratorDriver::receiveUDP() { } } - struct { - bool operator()(const NormalisedEvent l, - const NormalisedEvent r) const { - return l.timestamp > r.timestamp; + bool operator()(const NormalisedEvent l, const NormalisedEvent r) const { + return l.timestamp < r.timestamp; } -} reverseSortEventsByTime; +} oldestEventsFirst; inline int eventsInQueue(epicsRingBytesId id) { return epicsRingBytesUsedBytes(id) / sizeof(NormalisedEvent); @@ -526,35 +524,37 @@ void asynStreamGeneratorDriver::partialSortEvents() { int queuedEvents = 0; epicsTimeStamp lastSort = epicsTime::getCurrent(); epicsTimeStamp currentTime = lastSort; - + while (true) { queuedEvents = eventsInQueue(this->udpQueue); // in case we can't wait lastSort = epicsTime::getCurrent(); currentTime = lastSort; - // wait for mininmum packet frequency or enough packets to ensure we could potentially - // have at least 1 packet per mcpdid - while (queuedEvents < bufferedEvents && epicsTimeDiffInNS(¤tTime, &lastSort) < 250'000'000ull) { + // wait for mininmum packet frequency or enough packets to ensure we + // could potentially have at least 1 packet per mcpdid + while (queuedEvents < bufferedEvents && + epicsTimeDiffInNS(¤tTime, &lastSort) < 250'000'000ull) { epicsThreadSleep(0.0001); // seconds - currentTime = epicsTime::getCurrent(); - queuedEvents = eventsInQueue(this->udpQueue); - } + currentTime = epicsTime::getCurrent(); + queuedEvents = eventsInQueue(this->udpQueue); + } queuedEvents = std::min(queuedEvents, bufferedEvents); - if (queuedEvents) { - epicsRingBytesGet(this->udpQueue, (char *)events, queuedEvents * sizeof(NormalisedEvent)); + if (queuedEvents) { + epicsRingBytesGet(this->udpQueue, (char *)events, + queuedEvents * sizeof(NormalisedEvent)); - std::sort(events, events + queuedEvents, reverseSortEventsByTime); + std::sort(events, events + queuedEvents, oldestEventsFirst); - epicsRingBytesPut(this->sortedQueue, (char *)events, queuedEvents * sizeof(NormalisedEvent)); - } + epicsRingBytesPut(this->sortedQueue, (char *)events, + queuedEvents * sizeof(NormalisedEvent)); + } } - } -inline void asynStreamGeneratorDriver::queueForKafka(NormalisedEvent &&ne) { +inline void asynStreamGeneratorDriver::queueForKafka(NormalisedEvent &ne) { if (this->kafkaEnabled) { if (ne.source == 0) epicsRingBytesPut(this->monitorQueue, (char *)&ne, @@ -576,8 +576,10 @@ void asynStreamGeneratorDriver::processEvents() { // we have two buffers. We alternate between reading data into one of them, // and then merge sorting into the other - NormalisedEvent *eventsA = new NormalisedEvent[(bufferedEvents + extraBufferedEvents)]; - NormalisedEvent *eventsB = new NormalisedEvent[(bufferedEvents + extraBufferedEvents)]; + NormalisedEvent *eventsA = + new NormalisedEvent[(bufferedEvents + extraBufferedEvents)]; + NormalisedEvent *eventsB = + new NormalisedEvent[(bufferedEvents + extraBufferedEvents)]; NormalisedEvent *eventsBLastStart = eventsB + bufferedEvents; NormalisedEvent *eventsBLastEnd = eventsBLastStart; @@ -587,24 +589,36 @@ void asynStreamGeneratorDriver::processEvents() { epicsTimeStamp currentTime = lastProcess; epicsInt32 *counts = new epicsInt32[this->num_channels]; + double elapsedSeconds = 0; + uint64_t startTimestamp = std::numeric_limits::max(); + uint64_t currTimestamp; + epicsInt32 currStatus = STATUS_IDLE; + epicsInt32 prevStatus = STATUS_IDLE; + epicsInt32 countPreset; + epicsInt32 timePreset; + epicsInt32 presetChannel; epicsInt32 udpQueueHighWaterMark = 0; epicsInt32 sortedQueueHighWaterMark = 0; while (true) { - queuedEvents = eventsInQueue(this->sortedQueue); // in case we can't wait + queuedEvents = + eventsInQueue(this->sortedQueue); // in case we can't wait lastProcess = epicsTime::getCurrent(); currentTime = lastProcess; - // wait for mininmum packet frequency or enough packets to ensure we could potentially - // have at least 1 packet per mcpdid - while (queuedEvents < bufferedEvents && epicsTimeDiffInNS(¤tTime, &lastProcess) < 250'000'000ull) { + // wait for mininmum packet frequency or enough packets to ensure we + // could potentially have at least 1 packet per mcpdid + while (queuedEvents < bufferedEvents && + epicsTimeDiffInNS(¤tTime, &lastProcess) < 250'000'000ull) { epicsThreadSleep(0.0001); // seconds currentTime = epicsTime::getCurrent(); queuedEvents = eventsInQueue(this->sortedQueue); } + getIntegerParam(this->P_Status, &currStatus); + queuedEvents = std::min(queuedEvents, bufferedEvents); NormalisedEvent *newStartPtr = eventsA + extraBufferedEvents; @@ -614,274 +628,71 @@ void asynStreamGeneratorDriver::processEvents() { // new read, in the case that all new events are newer timewise, and // therefore, all events from eventsB have to be placed in a preceeding // position. - epicsRingBytesGet(this->sortedQueue, (char *)newStartPtr, queuedEvents * sizeof(NormalisedEvent)); + epicsRingBytesGet(this->sortedQueue, (char *)newStartPtr, + queuedEvents * sizeof(NormalisedEvent)); - int toProcess = eventsBLastEnd - eventsBLastStart + queuedEvents * 4 / 5; + int toProcess = + eventsBLastEnd - eventsBLastStart + queuedEvents * 4 / 5; // TODO could also consider an in-place merge - eventsBLastEnd = std::merge( - newStartPtr, newStartPtr + queuedEvents, - eventsBLastStart, eventsBLastEnd, - eventsA, reverseSortEventsByTime - ); + eventsBLastEnd = std::merge(newStartPtr, newStartPtr + queuedEvents, + eventsBLastStart, eventsBLastEnd, eventsA, + oldestEventsFirst); eventsBLastStart = eventsA + toProcess; - for (size_t i = 0; i < toProcess; ++i) { - counts[eventsA[i].source == 0 ? eventsA[i].pixelId + 1 : 0] += 1; + // TODO I haven't really taken care of the case that there are no events + + if (prevStatus == STATUS_IDLE && currStatus == STATUS_COUNTING) { + + getIntegerParam(this->P_CountPreset, &countPreset); + getIntegerParam(this->P_TimePreset, &timePreset); + getIntegerParam(this->P_MonitorChannel, &presetChannel); + + // reset status variables + startTimestamp = eventsA[0].timestamp; + elapsedSeconds = 0; + for (size_t i = 0; i < this->num_channels; ++i) { + counts[i] = 0; + } } - for (size_t i = 0; i < num_channels; ++i) { - setIntegerParam(P_Counts[i], counts[i]); + if (currStatus == STATUS_COUNTING) { + + // The elapsedSeconds are round differently depending on whether we + // are using them for comparison, or for showing to the user, to + // try and make sure the data we send to kafka is correct, while + // the measurement time also appears intuitive. + for (size_t i = 0; i < toProcess; ++i) { + counts[eventsA[i].source == 0 ? eventsA[i].pixelId + 1 : 0] += + 1; + elapsedSeconds = (eventsA[i].timestamp - startTimestamp) / 1e9; + + if ((countPreset && counts[presetChannel] >= countPreset) || + (timePreset && elapsedSeconds > (double)timePreset)) + break; + + // TODO also batchwise? + this->queueForKafka(eventsA[i]); + } + + for (size_t i = 0; i < num_channels; ++i) { + setIntegerParam(P_Counts[i], counts[i]); + } + setIntegerParam(P_ElapsedTime, (epicsInt32)elapsedSeconds); + + if ((countPreset && counts[presetChannel] >= countPreset) || + (timePreset && elapsedSeconds > (double)timePreset)) { + setIntegerParam(this->P_Status, STATUS_IDLE); + setIntegerParam(this->P_CountPreset, 0); + setIntegerParam(this->P_TimePreset, 0); + } } - //setIntegerParam(P_ElapsedTime, elapsedSeconds); + prevStatus = currStatus; std::swap(eventsA, eventsB); - } - - // // TODO this is totally decoupled!!! - // const size_t queueBufferSize = - // 10 * epicsRingBytesSize(this->udpQueue) / sizeof(NormalisedEvent); - - // //struct { - // // bool operator()(const NormalisedEvent l, - // // const NormalisedEvent r) const { - // // return l.timestamp > r.timestamp; - // // } - // //} smallestToLargest; - - // //// This should never be used. It is just instantiated to reserve a buffer - // //// of specific size. - // //std::vector queueBuffer; - // //queueBuffer.reserve(queueBufferSize); - - // //std::priority_queue, - // // decltype(smallestToLargest)> - // // timeQueue(smallestToLargest, std::move(queueBuffer)); - - // NormalisedEvent* timeQueue = new NormalisedEvent[queueBufferSize]; - - // // TODO epics doesn't seem to support uint64, you would need an array of - // // uint32. It does support int64 though.. so we start with that - // epicsInt32 *counts = new epicsInt32[this->num_channels]; - - // const uint64_t minRateSamplePeriod = 100'000'000ll; - // const size_t rateAverageWindow = 20; - // size_t countDiffsPtr = 0; - // epicsInt32 *rates = new epicsInt32[this->num_channels]; - // epicsInt32 *countDiff = new epicsInt32[this->num_channels]; - // epicsInt32 *countDiffs = - // new epicsInt32[this->num_channels * rateAverageWindow]; - // uint64_t *timeSpans = new uint64_t[this->num_channels]; - // epicsTimeStamp lastRateUpdate = epicsTime::getCurrent(); - - // asynStatus status = asynSuccess; - // NormalisedEvent ne; - // uint64_t newestTimestamp = 0; - // uint64_t startTimestamp = std::numeric_limits::max(); - // uint64_t currTimestamp; - // epicsInt32 elapsedSeconds = 0; - // epicsInt32 prevStatus = STATUS_IDLE; - // epicsInt32 currStatus = STATUS_IDLE; - // epicsInt32 countPreset = 0; - // epicsInt32 timePreset = 0; - // epicsInt32 presetChannel = 0; - // epicsInt32 udpQueueHighWaterMark = 0; - - // while (true) { - - // // I think mostly everything should already by sorted - // // could probably in the other thread guarantee that each packet is sorted - // // but probably it already is... - // // - // // so really we just need to merge sort chunks - - // // idea is to try and guarantee at least 1 packet per id or the min - // // frequency for each id without actually checking all ids - // // size_t timeQueuePtr = 0; - // // while (timeQueuePtr < 1500 * 10) { - - // // // TODO depending on how this is implemented, I may also need to - // // // check that there is is enough bytes, in case it does partial - // // // writes... - // // if (epicsRingBytesGet(udpQueue, (char *)&ne, - // // sizeof(NormalisedEvent))) { - // // // we should restart this ioc at least every few years, as at ns - // // // resolution with a uint64_t we will have an overflow after - // // // around 4 years - // // newestTimestamp = std::max(newestTimestamp, ne.timestamp); - - // // ++countDiff[ne.source == 0 ? ne.pixelId + 1 : 0]; - - // // timeQueue.push(std::move(ne)); - // // } - - // // } - - - // // while (timeQueue.empty() || - // // (timeQueue.size() < 1500 * 10 && - // // newestTimestamp - timeQueue.top().timestamp < 200'000'000ull)) { - - // // // TODO depending on how this is implemented, I may also need to - // // // check that there is is enough bytes, in case it does partial - // // // writes... - // // if (epicsRingBytesGet(udpQueue, (char *)&ne, - // // sizeof(NormalisedEvent))) { - // // // we should restart this ioc at least every few years, as at ns - // // // resolution with a uint64_t we will have an overflow after - // // // around 4 years - // // newestTimestamp = std::max(newestTimestamp, ne.timestamp); - - // // ++countDiff[ne.source == 0 ? ne.pixelId + 1 : 0]; - - // // timeQueue.push(std::move(ne)); - // // } - // // } - - // // ne = timeQueue.top(); - // // timeQueue.pop(); - - // // status = getIntegerParam(this->P_Status, &currStatus); - - // // udpQueueHighWaterMark = - // // epicsRingBytesHighWaterMark(udpQueue) / sizeof(NormalisedEvent); - - // // // if (currStatus == STATUS_COUNTING && prevStatus == STATUS_IDLE) { - // // // // Starting a new count - - // // // // get current count configuration - // // // getIntegerParam(this->P_CountPreset, &countPreset); - // // // getIntegerParam(this->P_TimePreset, &timePreset); - // // // getIntegerParam(this->P_MonitorChannel, &presetChannel); - - // // // // reset status variables - // // // startTimestamp = std::numeric_limits::max(); - // // // for (size_t i = 0; i < this->num_channels; ++i) { - // // // counts[i] = 0; - // // // } - - // // // // reset pvs - // // // // lock(); - // // // // for (size_t i = 0; i < num_channels; ++i) { - // // // // setIntegerParam(P_Counts[i], counts[i]); - // // // // } - // // // // setIntegerParam(P_ElapsedTime, 0); - // // // // callParamCallbacks(); - // // // // unlock(); - - // // // // TODO might consider throwing out current buffer as it is - // // // // from before count started? then again, 0.2 ms or whatever is - // // // // set above is quite a small preceeding amount of time, so - // // // // maybe it doesn't matter - // // // } - - // // // prevStatus = currStatus; - - // // //if (currStatus == STATUS_COUNTING) { - // // startTimestamp = std::min(startTimestamp, ne.timestamp); - // // currTimestamp = ne.timestamp; - // // elapsedSeconds = - // // 0 ? currTimestamp <= startTimestamp - // // : ((double)(currTimestamp - startTimestamp)) / 1e9; - - // // // is our count finished? - // // // if ((countPreset && counts[presetChannel] >= countPreset) || - // // // (timePreset && elapsedSeconds >= timePreset)) { - - // // // // filter out events that occured after the specified time - // // // if (ne.timestamp - startTimestamp <= countPreset) { - // // // counts[ne.source == 0 ? ne.pixelId + 1 : 0] += 1; - // // // this->queueForKafka(std::move(ne)); - - // // // // add any remaining events with the same timestamp - // // // // we could theoretically have a small overrun if the - // // // // timestamps are identical on the monitor channel - // // // while (!timeQueue.empty() && - // // // !timeQueue.top().timestamp == currTimestamp) { - // // // ne = timeQueue.top(); - // // // timeQueue.pop(); - // // // counts[ne.source == 0 ? ne.pixelId + 1 : 0] += 1; - // // // this->queueForKafka(std::move(ne)); - // // // } - // // // } - - // // // countPreset = 0; - // // // timePreset = 0; - - // // // // lock(); - // // // for (size_t i = 0; i < num_channels; ++i) { - // // // setIntegerParam(P_Counts[i], counts[i]); - // // // } - // // // setIntegerParam(P_ElapsedTime, elapsedSeconds); - // // // setIntegerParam(P_CountPreset, countPreset); - // // // setIntegerParam(P_TimePreset, timePreset); - // // // setIntegerParam(P_UdpQueueHighWaterMark, udpQueueHighWaterMark); - // // // // callParamCallbacks(); - // // // setIntegerParam(P_Status, STATUS_IDLE); - // // // // callParamCallbacks(); - // // // // unlock(); - - // // // epicsRingBytesResetHighWaterMark(udpQueue); - - // // // } else { - - // // counts[ne.source == 0 ? ne.pixelId + 1 : 0] += 1; - // // this->queueForKafka(std::move(ne)); - - // // // lock(); - // // for (size_t i = 0; i < num_channels; ++i) { - // // setIntegerParam(P_Counts[i], counts[i]); - // // } - // // setIntegerParam(P_ElapsedTime, elapsedSeconds); - // // setIntegerParam(P_UdpQueueHighWaterMark, udpQueueHighWaterMark); - // // // callParamCallbacks(); - // // // unlock(); - // // // } - // // //} - - // // // Careful changing any of these magic numbers until I clean this up - // // // as you might end up calculating the wrong rate - // // // epicsTimeStamp currentTime = epicsTime::getCurrent(); - // // // if (epicsTimeDiffInNS(¤tTime, &lastRateUpdate) > - // // // minRateSamplePeriod) { - // // // timeSpans[countDiffsPtr] = - // // // epicsTimeDiffInNS(¤tTime, &lastRateUpdate); - - // // // uint64_t totalTime = 0; - // // // for (size_t i = 0; i <= rateAverageWindow; ++i) { - // // // totalTime += timeSpans[i]; - // // // } - - // // // lastRateUpdate = currentTime; - - // // // for (size_t i = 0; i <= this->num_channels; ++i) { - // // // countDiffs[i * rateAverageWindow + countDiffsPtr] = - // // // countDiff[i]; - - // // // uint64_t cnt = 0; - // // // for (size_t j = 0; j <= rateAverageWindow; ++j) { - // // // cnt += countDiffs[i * rateAverageWindow + j]; - // // // } - // // // rates[i] = cnt / (totalTime * 1e-9); - - // // // countDiff[i] = 0; - // // // } - - // // // countDiffsPtr = (countDiffsPtr + 1) % rateAverageWindow; - - // // // if (countDiffsPtr % 5 == 0) { - // // // // lock(); - // // // for (size_t i = 0; i < num_channels; ++i) { - // // // setIntegerParam(P_Rates[i], rates[i]); - // // // } - // // // // callParamCallbacks(); - // // // // unlock(); - // // // } - // // // } - // } } void asynStreamGeneratorDriver::produce(epicsRingBytesId eventQueue, @@ -922,7 +733,7 @@ void asynStreamGeneratorDriver::produce(epicsRingBytesId eventQueue, // At least every 0.2 seconds if (total >= this->kafkaMaxPacketSize || - epicsTimeDiffInNS(&now, &last_sent) > 200'000'000ll) { + epicsTimeDiffInNS(&now, &last_sent) > 250'000'000ll) { last_sent = epicsTime::getCurrent(); if (total) { diff --git a/src/asynStreamGeneratorDriver.h b/src/asynStreamGeneratorDriver.h index fa5358d..f4d61b8 100644 --- a/src/asynStreamGeneratorDriver.h +++ b/src/asynStreamGeneratorDriver.h @@ -166,7 +166,7 @@ class asynStreamGeneratorDriver : public asynPortDriver { asynStatus createInt32Param(asynStatus status, char *name, int *variable, epicsInt32 initialValue = 0); - inline void queueForKafka(NormalisedEvent &&ne); + inline void queueForKafka(NormalisedEvent &ne); void produce(epicsRingBytesId eventQueue, rd_kafka_t *kafkaProducer, const char *topic, const char *source); From 2f50a21e8390201803110b3fba48fea6bdac6c0e Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Thu, 6 Nov 2025 15:34:32 +0100 Subject: [PATCH 26/35] use local mirrors --- .gitmodules | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitmodules b/.gitmodules index 84a08ad..82f87d6 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,6 @@ [submodule "dep/streaming-data-types"] path = dep/streaming-data-types - url = https://github.com/ess-dmsc/streaming-data-types.git + url = https://gitea.psi.ch/lin-controls/streaming-data-types.git [submodule "dep/flatbuffers"] path = dep/flatbuffers - url = https://github.com/google/flatbuffers.git + url = https://gitea.psi.ch/lin-controls/flatbuffers.git From 318357127ee9d200d5023e8df6ed75a606a40928 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Thu, 6 Nov 2025 15:36:40 +0100 Subject: [PATCH 27/35] use ssh variant... --- .gitmodules | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitmodules b/.gitmodules index 82f87d6..a74445c 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,6 @@ [submodule "dep/streaming-data-types"] path = dep/streaming-data-types - url = https://gitea.psi.ch/lin-controls/streaming-data-types.git + url = git@gitea.psi.ch:lin-controls/streaming-data-types.git [submodule "dep/flatbuffers"] path = dep/flatbuffers - url = https://gitea.psi.ch/lin-controls/flatbuffers.git + url = git@gitea.psi.ch:lin-controls/flatbuffers.git From 9d5ed11dac2d902b00507736f4f2a643a0d4ec78 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Thu, 6 Nov 2025 15:38:33 +0100 Subject: [PATCH 28/35] adds comment on cloning with depdencies --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index ab512df..ae26b8b 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,11 @@ # StreamGenerator +Clone the repository to a local directory via: + +``` +git clone --recurse-submodules -j8 git@gitea.psi.ch:lin-instrument-computers/StreamGenerator.git +``` + ## Dependencies Currently, this project requires a system install of librdkafka. On Redhat, From 18da14f6d631001b3b91f246fe7bd57ba457fe46 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Thu, 6 Nov 2025 16:48:31 +0100 Subject: [PATCH 29/35] adds additional key that can be set --- src/asynStreamGeneratorDriver.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index dec026f..290361f 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -40,6 +40,11 @@ static rd_kafka_t *create_kafka_producer(const char *kafkaBroker) { set_kafka_config_key(conf, "bootstrap.servers", const_cast(kafkaBroker)); set_kafka_config_key(conf, "queue.buffering.max.messages", "10000000"); + // With 2e6 counts / s + // and a packet size of 20480 events (163920 bytes) + // this implies we need to send around 100 messages a second + // and need about .2 gigabit upload + // set_kafka_config_key(conf, "queue.buffering.max.kbytes", "10000000"); // Create the Producer producer = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr)); From 6faf23601e29db22d5be1847ef75cd6c46bd7d9e Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Fri, 7 Nov 2025 09:00:53 +0100 Subject: [PATCH 30/35] adds PV for number of missed udp packets --- db/daq_common.db | 11 +++++++++++ src/asynStreamGeneratorDriver.cpp | 4 ++++ src/asynStreamGeneratorDriver.h | 2 ++ 3 files changed, 17 insertions(+) diff --git a/db/daq_common.db b/db/daq_common.db index ef9f2ee..2f359f1 100644 --- a/db/daq_common.db +++ b/db/daq_common.db @@ -211,6 +211,17 @@ record(ai,"$(INSTR)$(NAME):ELAPSED-TIME") ################################################################################ # Stream Generator Status PVs +record(longin,"$(INSTR)$(NAME):UDP_DROPPED") +{ + field(DESC, "Max Events in Queue") + field(EGU, "Events") + field(DTYP, "asynInt32") + field(INP, "@asyn($(PORT),0,$(TIMEOUT=1)) DROP") + # field(SCAN, "I/O Intr") + field(SCAN, "1 second") + field(PINI, "YES") +} + record(longin,"$(INSTR)$(NAME):UDP_WATERMARK") { field(DESC, "Max Events in Queue") diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index 290361f..af9dffd 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -167,6 +167,7 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( status = createInt32Param(status, pv_name_buffer, P_ClearCounts + i); } + status = createInt32Param(status, P_UdpDroppedString, &P_UdpDropped); status = createInt32Param(status, P_UdpQueueHighWaterMarkString, &P_UdpQueueHighWaterMark); status = createInt32Param(status, P_SortedQueueHighWaterMarkString, @@ -438,6 +439,8 @@ void asynStreamGeneratorDriver::receiveUDP() { lastBufferNumber[i] = 0; } + epicsInt32 droppedMessages = 0; + NormalisedEvent ne; while (true) { @@ -472,6 +475,7 @@ void asynStreamGeneratorDriver::receiveUDP() { ", last: %" PRIu64 "\n", driverName, functionName, header->McpdID, header->BufferNumber, lastBufferNumber[header->McpdID]); + setIntegerParam(P_UdpDropped, ++droppedMessages); } lastBufferNumber[header->McpdID] = header->BufferNumber; diff --git a/src/asynStreamGeneratorDriver.h b/src/asynStreamGeneratorDriver.h index f4d61b8..f1c5cf4 100644 --- a/src/asynStreamGeneratorDriver.h +++ b/src/asynStreamGeneratorDriver.h @@ -96,6 +96,7 @@ struct __attribute__((__packed__)) NormalisedEvent { #define P_RateString "RATE%d" #define P_ClearCountsString "C_%d" +#define P_UdpDroppedString "DROP" #define P_UdpQueueHighWaterMarkString "UDP" #define P_SortedQueueHighWaterMarkString "SORT" @@ -139,6 +140,7 @@ class asynStreamGeneratorDriver : public asynPortDriver { int *P_ClearCounts; // System Status Parameter Identifying IDs + int P_UdpDropped; int P_UdpQueueHighWaterMark; int P_SortedQueueHighWaterMark; From 8f8b78a9bf8f98ed7aa8b86567d467f63e5ee122 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Fri, 7 Nov 2025 13:20:15 +0100 Subject: [PATCH 31/35] adds a udp config that works with the correlation unit --- scripts/st.cmd | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/scripts/st.cmd b/scripts/st.cmd index 9c3d3a6..06a696e 100755 --- a/scripts/st.cmd +++ b/scripts/st.cmd @@ -8,14 +8,21 @@ require StreamGenerator, test epicsEnvSet("INSTR", "SQ:TEST:") epicsEnvSet("NAME", "SG") -drvAsynIPPortConfigure("ASYN_IP_PORT", "127.0.0.1:9071:54321 UDP", 0, 0, 1) +# Local UDP Generator Test Config +# drvAsynIPPortConfigure("ASYN_IP_PORT", "127.0.0.1:9071:54321 UDP", 0, 0, 1) + +# Correlation Unit Config +drvAsynIPPortConfigure("ASYN_IP_PORT", "172.28.69.20:54321:54321 UDP", 0, 0, 1) # With a udpQueue and sortQueue size of 10'000 packets, we can hold in memory # 10'000 * 243 = 2.43e6 events -# asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 10000, "linkafka01:9092", "NEWEFU_TEST", "NEWEFU_TEST2", 1000, 8192) -asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 10000, "ess01:9092", "NEWEFU_TEST", "NEWEFU_TEST2", 10000, 20480) -# asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 10000, "", "", "", 0, 0) +# Kafka Broker and Topic Configuration +# asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 10000, "linkafka01:9092", "NEWEFU_TEST", "NEWEFU_TEST2", 10000, 20480) +# asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 10000, "ess01:9092", "NEWEFU_TEST", "NEWEFU_TEST2", 10000, 20480) + +# Don't send any kafka messages +asynStreamGenerator("ASYN_SG", "ASYN_IP_PORT", 4, 10000, "", "", "", 0, 0) dbLoadRecords("$(StreamGenerator_DB)daq_common.db", "INSTR=$(INSTR), NAME=$(NAME), PORT=ASYN_SG, CHANNELS=5") From 77ed74a203dbbeba599ac7499099227133338da4 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Fri, 7 Nov 2025 14:05:37 +0100 Subject: [PATCH 32/35] returns elapsed time as a double --- db/daq_common.db | 2 +- src/asynStreamGeneratorDriver.cpp | 17 ++++++++++++----- src/asynStreamGeneratorDriver.h | 2 ++ 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/db/daq_common.db b/db/daq_common.db index 2f359f1..f564816 100644 --- a/db/daq_common.db +++ b/db/daq_common.db @@ -200,7 +200,7 @@ record(ai,"$(INSTR)$(NAME):ELAPSED-TIME") { field(DESC, "DAQ Measured Time") field(EGU, "sec") - field(DTYP, "asynInt32") + field(DTYP, "asynFloat64") field(INP, "@asyn($(PORT),0,$(TIMEOUT=1)) TIME") # field(SCAN, "I/O Intr") field(SCAN, ".5 second") diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index af9dffd..767a108 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -90,12 +90,19 @@ static void detectorProducerTask(void *drvPvt) { */ asynStatus asynStreamGeneratorDriver::createInt32Param( - // TODO should show error if there is one asynStatus status, char *name, int *variable, epicsInt32 initialValue) { + // TODO should show error if there is one return (asynStatus)(status | createParam(name, asynParamInt32, variable) | setIntegerParam(*variable, initialValue)); } +asynStatus asynStreamGeneratorDriver::createFloat64Param( + asynStatus status, char *name, int *variable, double initialValue) { + // TODO should show error if there is one + return (asynStatus)(status | createParam(name, asynParamFloat64, variable) | + setDoubleParam(*variable, initialValue)); +} + /******************************************************************************* * Stream Generator Methods */ @@ -106,9 +113,9 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( const char *detectorTopic, const int kafkaQueueSize, const int kafkaMaxPacketSize) : asynPortDriver(portName, 1, /* maxAddr */ - asynInt32Mask | asynInt64Mask | + asynInt32Mask | asynFloat64Mask | asynDrvUserMask, /* Interface mask */ - asynInt32Mask | asynInt64Mask, /* Interrupt mask */ + asynInt32Mask, // | asynFloat64Mask, /* Interrupt mask */ 0, /* asynFlags. This driver does not block and it is not multi-device, but has a destructor ASYN_DESTRUCTIBLE our version of the Asyn @@ -139,7 +146,7 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( status = createInt32Param(status, P_StopString, &P_Stop); status = createInt32Param(status, P_CountPresetString, &P_CountPreset); status = createInt32Param(status, P_TimePresetString, &P_TimePreset); - status = createInt32Param(status, P_ElapsedTimeString, &P_ElapsedTime); + status = createFloat64Param(status, P_ElapsedTimeString, &P_ElapsedTime); status = createInt32Param(status, P_ClearElapsedTimeString, &P_ClearElapsedTime); status = @@ -688,7 +695,7 @@ void asynStreamGeneratorDriver::processEvents() { for (size_t i = 0; i < num_channels; ++i) { setIntegerParam(P_Counts[i], counts[i]); } - setIntegerParam(P_ElapsedTime, (epicsInt32)elapsedSeconds); + setDoubleParam(P_ElapsedTime, elapsedSeconds); if ((countPreset && counts[presetChannel] >= countPreset) || (timePreset && elapsedSeconds > (double)timePreset)) { diff --git a/src/asynStreamGeneratorDriver.h b/src/asynStreamGeneratorDriver.h index f1c5cf4..d986b02 100644 --- a/src/asynStreamGeneratorDriver.h +++ b/src/asynStreamGeneratorDriver.h @@ -168,6 +168,8 @@ class asynStreamGeneratorDriver : public asynPortDriver { asynStatus createInt32Param(asynStatus status, char *name, int *variable, epicsInt32 initialValue = 0); + asynStatus createFloat64Param(asynStatus status, char *name, int *variable, double initialValue = 0); + inline void queueForKafka(NormalisedEvent &ne); void produce(epicsRingBytesId eventQueue, rd_kafka_t *kafkaProducer, From ba07a8af9b270ffec997b86ae40e27fef5be9d70 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Fri, 7 Nov 2025 14:28:01 +0100 Subject: [PATCH 33/35] shows queue usage as a percentage --- db/daq_common.db | 10 +++++----- src/asynStreamGeneratorDriver.cpp | 19 ++++++++++++------- src/asynStreamGeneratorDriver.h | 5 ++++- 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/db/daq_common.db b/db/daq_common.db index f564816..2c5f7f4 100644 --- a/db/daq_common.db +++ b/db/daq_common.db @@ -213,7 +213,7 @@ record(ai,"$(INSTR)$(NAME):ELAPSED-TIME") record(longin,"$(INSTR)$(NAME):UDP_DROPPED") { - field(DESC, "Max Events in Queue") + field(DESC, "UDP Packets Missed") field(EGU, "Events") field(DTYP, "asynInt32") field(INP, "@asyn($(PORT),0,$(TIMEOUT=1)) DROP") @@ -224,8 +224,8 @@ record(longin,"$(INSTR)$(NAME):UDP_DROPPED") record(longin,"$(INSTR)$(NAME):UDP_WATERMARK") { - field(DESC, "Max Events in Queue") - field(EGU, "Events") + field(DESC, "UDP Queue Usage") + field(EGU, "%") field(DTYP, "asynInt32") field(INP, "@asyn($(PORT),0,$(TIMEOUT=1)) UDP") # field(SCAN, "I/O Intr") @@ -235,8 +235,8 @@ record(longin,"$(INSTR)$(NAME):UDP_WATERMARK") record(longin,"$(INSTR)$(NAME):SORTED_WATERMARK") { - field(DESC, "Max Events in Queue") - field(EGU, "Events") + field(DESC, "Partial Sort Queue Usage") + field(EGU, "%") field(DTYP, "asynInt32") field(INP, "@asyn($(PORT),0,$(TIMEOUT=1)) SORT") # field(SCAN, "I/O Intr") diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index 767a108..208584e 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -96,8 +96,10 @@ asynStatus asynStreamGeneratorDriver::createInt32Param( setIntegerParam(*variable, initialValue)); } -asynStatus asynStreamGeneratorDriver::createFloat64Param( - asynStatus status, char *name, int *variable, double initialValue) { +asynStatus asynStreamGeneratorDriver::createFloat64Param(asynStatus status, + char *name, + int *variable, + double initialValue) { // TODO should show error if there is one return (asynStatus)(status | createParam(name, asynParamFloat64, variable) | setDoubleParam(*variable, initialValue)); @@ -114,7 +116,7 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( const int kafkaMaxPacketSize) : asynPortDriver(portName, 1, /* maxAddr */ asynInt32Mask | asynFloat64Mask | - asynDrvUserMask, /* Interface mask */ + asynDrvUserMask, /* Interface mask */ asynInt32Mask, // | asynFloat64Mask, /* Interrupt mask */ 0, /* asynFlags. This driver does not block and it is not multi-device, but has a @@ -125,6 +127,7 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( 0), /* Default stack size*/ num_channels(numChannels + 1), kafkaEnabled(enableKafkaStream), monitorTopic(monitorTopic), detectorTopic(detectorTopic), + udpQueueSize(udpQueueSize), kafkaQueueSize(kafkaQueueSize), // measured in max packet sizes udpQueue( epicsRingBytesCreate(243 * udpQueueSize * sizeof(NormalisedEvent))), @@ -308,15 +311,17 @@ asynStatus asynStreamGeneratorDriver::readInt32(asynUser *pasynUser, getParamName(function, ¶mName); if (function == P_UdpQueueHighWaterMark) { - *value = epicsRingBytesHighWaterMark(this->udpQueue) / - sizeof(NormalisedEvent); + const double toPercent = 100. / (243. * udpQueueSize); + *value = (epicsInt32)(epicsRingBytesHighWaterMark(this->udpQueue) / + sizeof(NormalisedEvent) * toPercent); // Aparently resetting the watermark causes problems... // at least concurrently :D // epicsRingBytesResetHighWaterMark(this->udpQueue); return asynSuccess; } else if (function == P_SortedQueueHighWaterMark) { - *value = epicsRingBytesHighWaterMark(this->sortedQueue) / - sizeof(NormalisedEvent); + const double toPercent = 100. / (243. * udpQueueSize); + *value = (epicsInt32)(epicsRingBytesHighWaterMark(this->sortedQueue) / + sizeof(NormalisedEvent) * toPercent); // epicsRingBytesResetHighWaterMark(this->sortedQueue); return asynSuccess; } diff --git a/src/asynStreamGeneratorDriver.h b/src/asynStreamGeneratorDriver.h index d986b02..ac8ce5e 100644 --- a/src/asynStreamGeneratorDriver.h +++ b/src/asynStreamGeneratorDriver.h @@ -150,8 +150,10 @@ class asynStreamGeneratorDriver : public asynPortDriver { const int num_channels; const bool kafkaEnabled; + const int kafkaQueueSize; const int kafkaMaxPacketSize; + const int udpQueueSize; epicsRingBytesId udpQueue; epicsRingBytesId sortedQueue; @@ -168,7 +170,8 @@ class asynStreamGeneratorDriver : public asynPortDriver { asynStatus createInt32Param(asynStatus status, char *name, int *variable, epicsInt32 initialValue = 0); - asynStatus createFloat64Param(asynStatus status, char *name, int *variable, double initialValue = 0); + asynStatus createFloat64Param(asynStatus status, char *name, int *variable, + double initialValue = 0); inline void queueForKafka(NormalisedEvent &ne); From c530de3566d9232c4b730e289a0bb25cb4eb2644 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Fri, 7 Nov 2025 16:14:05 +0100 Subject: [PATCH 34/35] does removing all logic in the udp receive thread help to improve the packet receive frequency? --- src/asynStreamGeneratorDriver.cpp | 186 +++++++++++++++++++----------- src/asynStreamGeneratorDriver.h | 2 + 2 files changed, 121 insertions(+), 67 deletions(-) diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index 208584e..11bb120 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -65,6 +65,11 @@ static void udpPollerTask(void *drvPvt) { pSGD->receiveUDP(); } +static void udpNormaliserTask(void *drvPvt) { + asynStreamGeneratorDriver *pSGD = (asynStreamGeneratorDriver *)drvPvt; + pSGD->normaliseUDP(); +} + static void sortTask(void *drvPvt) { asynStreamGeneratorDriver *pSGD = (asynStreamGeneratorDriver *)drvPvt; pSGD->partialSortEvents(); @@ -131,6 +136,8 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( // measured in max packet sizes udpQueue( epicsRingBytesCreate(243 * udpQueueSize * sizeof(NormalisedEvent))), + normalisedQueue( + epicsRingBytesCreate(243 * udpQueueSize * sizeof(NormalisedEvent))), // TODO configurable sizes sortedQueue( epicsRingBytesCreate(243 * udpQueueSize * sizeof(NormalisedEvent))), @@ -267,6 +274,20 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( exit(1); } + /* Create the thread normalises the events + */ + status = + (asynStatus)(epicsThreadCreate( + "eventNormaliser", + epicsThreadPriorityMedium, // epicsThreadPriorityMax, + epicsThreadGetStackSize(epicsThreadStackMedium), + (EPICSTHREADFUNC)::udpNormaliserTask, this) == NULL); + if (status) { + epicsStdoutPrintf("%s:%s: epicsThreadCreate failure, status=%d\n", + driverName, functionName, status); + exit(1); + } + // UDP Receive Setup status = pasynOctetSyncIO->connect(ipPortName, 0, &pasynUDPUser, NULL); @@ -278,7 +299,7 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( /* Create the thread that receives UDP traffic in the background */ status = (asynStatus)(epicsThreadCreate( - "udp_receive", epicsThreadPriorityMedium, + "udp_receive", epicsThreadPriorityMax, epicsThreadGetStackSize(epicsThreadStackMedium), (EPICSTHREADFUNC)::udpPollerTask, this) == NULL); if (status) { @@ -424,6 +445,45 @@ asynStatus asynStreamGeneratorDriver::writeInt32(asynUser *pasynUser, void asynStreamGeneratorDriver::receiveUDP() { + const char *functionName = "receiveUDP"; + asynStatus status = asynSuccess; + int isConnected = 1; + std::size_t received; + int eomReason; + + const std::size_t bufferSize = 1500; + char buffer[bufferSize]; + + while (true) { + + // status = pasynManager->isConnected(pasynUDPUser, &isConnected); + + // if (!isConnected) + // asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, + // "%s:%s: isConnected = %d\n", driverName, functionName, + // isConnected); + + status = pasynOctetSyncIO->read(pasynUDPUser, buffer, bufferSize, + 0, // timeout + &received, &eomReason); + + if (received) { + + if ((received - 42) % 6 == 0) { + + epicsRingBytesPut(this->udpQueue, (char *)buffer, bufferSize); + + } else { + asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, + "%s:%s: invalid UDP packet\n", driverName, + functionName); + } + } + } +} + +void asynStreamGeneratorDriver::normaliseUDP() { + // TODO fix time overflows // Regarding time overflow. // * the header time stamp is 3 words, i.e. 48 bits. @@ -433,7 +493,7 @@ void asynStreamGeneratorDriver::receiveUDP() { // * so maybe this isn't necessary to solve, as long as we restart the // electronics at least once a year... - const char *functionName = "receiveUDP"; + const char *functionName = "normaliseUDP"; asynStatus status = asynSuccess; int isConnected = 1; std::size_t received; @@ -445,8 +505,11 @@ void asynStreamGeneratorDriver::receiveUDP() { const std::size_t bufferSize = 1500; char buffer[bufferSize]; + const std::size_t resultBufferSize = 243; + NormalisedEvent resultBuffer[resultBufferSize]; + // We have 10 mcpdids - uint64_t *lastBufferNumber = new uint64_t[10]; + uint64_t lastBufferNumber[10]; for (size_t i = 0; i < 10; ++i) { lastBufferNumber[i] = 0; } @@ -457,69 +520,56 @@ void asynStreamGeneratorDriver::receiveUDP() { while (true) { - status = pasynManager->isConnected(pasynUDPUser, &isConnected); + if (epicsRingBytesUsedBytes(this->udpQueue) > 1500) { - if (!isConnected) - asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, - "%s:%s: isConnected = %d\n", driverName, functionName, - isConnected); - - status = pasynOctetSyncIO->read(pasynUDPUser, buffer, bufferSize, - 0, // timeout - &received, &eomReason); - - if (received) { + epicsRingBytesGet(this->udpQueue, (char *)buffer, bufferSize); UDPHeader *header = (UDPHeader *)buffer; std::size_t total_events = (header->BufferLength - 21) / 3; - if (received == total_events * 6 + 42) { - - if (header->BufferNumber - lastBufferNumber[header->McpdID] > - 1 && - lastBufferNumber[header->McpdID] != - std::numeric_limits< - decltype(header->BufferNumber)>::max()) { - asynPrint( - pasynUserSelf, ASYN_TRACE_ERROR, - "%s:%s: missed packet on id: %d. Received: %" PRIu64 - ", last: %" PRIu64 "\n", - driverName, functionName, header->McpdID, - header->BufferNumber, lastBufferNumber[header->McpdID]); - setIntegerParam(P_UdpDropped, ++droppedMessages); - } - lastBufferNumber[header->McpdID] = header->BufferNumber; - - for (std::size_t i = 0; i < total_events; ++i) { - char *event = (buffer + 21 * 2 + i * 6); - - if (event[5] & 0x80) { // Monitor Event - MonitorEvent *m_event = (MonitorEvent *)event; - - ne.timestamp = - header->nanosecs() + (uint64_t)m_event->nanosecs(); - ne.source = 0; - ne.pixelId = m_event->DataID; - - } else { // Detector Event - DetectorEvent *d_event = (DetectorEvent *)event; - - ne.timestamp = - header->nanosecs() + (uint64_t)d_event->nanosecs(); - ne.source = header->McpdID; - ne.pixelId = d_event->pixelId(header->McpdID); - } - - epicsRingBytesPut(this->udpQueue, (char *)&ne, - sizeof(NormalisedEvent)); - } - - } else { + if (header->BufferNumber - lastBufferNumber[header->McpdID] > 1 && + lastBufferNumber[header->McpdID] != + std::numeric_limits< + decltype(header->BufferNumber)>::max()) { asynPrint(pasynUserSelf, ASYN_TRACE_ERROR, - "%s:%s: invalid UDP packet\n", driverName, - functionName); + "%s:%s: missed packet on id: %d. Received: %" PRIu64 + ", last: %" PRIu64 "\n", + driverName, functionName, header->McpdID, + header->BufferNumber, + lastBufferNumber[header->McpdID]); + setIntegerParam(P_UdpDropped, ++droppedMessages); } + lastBufferNumber[header->McpdID] = header->BufferNumber; + + for (std::size_t i = 0; i < total_events; ++i) { + char *event = (buffer + 21 * 2 + i * 6); + + if (event[5] & 0x80) { // Monitor Event + MonitorEvent *m_event = (MonitorEvent *)event; + + ne.timestamp = + header->nanosecs() + (uint64_t)m_event->nanosecs(); + ne.source = 0; + ne.pixelId = m_event->DataID; + + } else { // Detector Event + DetectorEvent *d_event = (DetectorEvent *)event; + + ne.timestamp = + header->nanosecs() + (uint64_t)d_event->nanosecs(); + ne.source = header->McpdID; + ne.pixelId = d_event->pixelId(header->McpdID); + } + + resultBuffer[i] = ne; + } + + epicsRingBytesPut(this->normalisedQueue, (char *)resultBuffer, + total_events * sizeof(NormalisedEvent)); + + } else { + epicsThreadSleep(0.0001); // seconds } } } @@ -540,7 +590,7 @@ void asynStreamGeneratorDriver::partialSortEvents() { // x * number of ids * max events in packet int bufferedEvents = 5 * 10 * 243; - NormalisedEvent *events = new NormalisedEvent[bufferedEvents]; + NormalisedEvent events[bufferedEvents]; int queuedEvents = 0; epicsTimeStamp lastSort = epicsTime::getCurrent(); @@ -548,7 +598,8 @@ void asynStreamGeneratorDriver::partialSortEvents() { while (true) { - queuedEvents = eventsInQueue(this->udpQueue); // in case we can't wait + queuedEvents = + eventsInQueue(this->normalisedQueue); // in case we can't wait lastSort = epicsTime::getCurrent(); currentTime = lastSort; @@ -558,13 +609,13 @@ void asynStreamGeneratorDriver::partialSortEvents() { epicsTimeDiffInNS(¤tTime, &lastSort) < 250'000'000ull) { epicsThreadSleep(0.0001); // seconds currentTime = epicsTime::getCurrent(); - queuedEvents = eventsInQueue(this->udpQueue); + queuedEvents = eventsInQueue(this->normalisedQueue); } queuedEvents = std::min(queuedEvents, bufferedEvents); if (queuedEvents) { - epicsRingBytesGet(this->udpQueue, (char *)events, + epicsRingBytesGet(this->normalisedQueue, (char *)events, queuedEvents * sizeof(NormalisedEvent)); std::sort(events, events + queuedEvents, oldestEventsFirst); @@ -597,10 +648,11 @@ void asynStreamGeneratorDriver::processEvents() { // we have two buffers. We alternate between reading data into one of them, // and then merge sorting into the other - NormalisedEvent *eventsA = - new NormalisedEvent[(bufferedEvents + extraBufferedEvents)]; - NormalisedEvent *eventsB = - new NormalisedEvent[(bufferedEvents + extraBufferedEvents)]; + NormalisedEvent eventsABuffer[(bufferedEvents + extraBufferedEvents)]; + NormalisedEvent eventsBBuffer[(bufferedEvents + extraBufferedEvents)]; + + NormalisedEvent *eventsA = &eventsABuffer[0]; + NormalisedEvent *eventsB = &eventsBBuffer[0]; NormalisedEvent *eventsBLastStart = eventsB + bufferedEvents; NormalisedEvent *eventsBLastEnd = eventsBLastStart; @@ -609,7 +661,7 @@ void asynStreamGeneratorDriver::processEvents() { epicsTimeStamp lastProcess = epicsTime::getCurrent(); epicsTimeStamp currentTime = lastProcess; - epicsInt32 *counts = new epicsInt32[this->num_channels]; + epicsInt32 counts[this->num_channels]; double elapsedSeconds = 0; uint64_t startTimestamp = std::numeric_limits::max(); uint64_t currTimestamp; diff --git a/src/asynStreamGeneratorDriver.h b/src/asynStreamGeneratorDriver.h index ac8ce5e..1517697 100644 --- a/src/asynStreamGeneratorDriver.h +++ b/src/asynStreamGeneratorDriver.h @@ -118,6 +118,7 @@ class asynStreamGeneratorDriver : public asynPortDriver { virtual asynStatus writeInt32(asynUser *pasynUser, epicsInt32 value); void receiveUDP(); + void normaliseUDP(); void partialSortEvents(); void processEvents(); void produceMonitor(); @@ -155,6 +156,7 @@ class asynStreamGeneratorDriver : public asynPortDriver { const int udpQueueSize; epicsRingBytesId udpQueue; + epicsRingBytesId normalisedQueue; epicsRingBytesId sortedQueue; epicsRingBytesId monitorQueue; From 9d93238db4a20650c53e2119bf331e2698f38183 Mon Sep 17 00:00:00 2001 From: Edward Wall Date: Fri, 14 Nov 2025 14:07:54 +0100 Subject: [PATCH 35/35] change counts to 64 bit integer, improve broken packet check, correct order of updating status --- db/channels.db | 5 ++- db/daq_common.db | 54 ++++++++++++++++++++++++------- src/asynStreamGeneratorDriver.cpp | 47 +++++++++++++++++---------- src/asynStreamGeneratorDriver.h | 11 ++++--- 4 files changed, 82 insertions(+), 35 deletions(-) diff --git a/db/channels.db b/db/channels.db index 9df5d06..6e07ee6 100644 --- a/db/channels.db +++ b/db/channels.db @@ -52,15 +52,14 @@ record(longout, "$(INSTR)$(NAME):C$(CHANNEL)") ################################################################################ # Read all monitors values -record(longin, "$(INSTR)$(NAME):M$(CHANNEL)") +record(int64in, "$(INSTR)$(NAME):M$(CHANNEL)") { field(DESC, "DAQ CH$(CHANNEL)") field(EGU, "cts") - field(DTYP, "asynInt32") + field(DTYP, "asynInt64") field(INP, "@asyn($(PORT),0,$(TIMEOUT=1)) COUNTS$(CHANNEL)") # This is probably too fast. We could trigger things the same as sinqDAQ to ensure the db is update in the same order # field(SCAN, "I/O Intr") - field(SCAN, ".2 second") field(PINI, "YES") } diff --git a/db/daq_common.db b/db/daq_common.db index 2c5f7f4..22ad9a1 100644 --- a/db/daq_common.db +++ b/db/daq_common.db @@ -15,14 +15,13 @@ record(longout, "$(INSTR)$(NAME):FULL-RESET") ################################################################################ # Status Variables -# record(stringin, "$(INSTR)$(NAME):MsgTxt") -# { -# field(DESC, "Unexpected received response") -# field(DTYP, "devDAQStringError") -# field(FLNK, "$(INSTR)$(NAME):INVALID-CONFIG") -# } - -record(mbbi, "$(INSTR)$(NAME):STATUS") +# We separate the RAW-STATUS and the STATUS PV so that the state can be updated +# in a sequence, that guarantees that we included the most recent time and +# counts before the status switches back to Idle. +# We do this via a sequenced update +# +# RAW-STATUS -> ELAPSED-SECONDS -> M* -> STATUS +record(mbbi, "$(INSTR)$(NAME):RAW-STATUS") { field(DESC, "DAQ Status") field(DTYP, "asynInt32") @@ -40,7 +39,41 @@ record(mbbi, "$(INSTR)$(NAME):STATUS") field(FRST, "INVALID") # This is probably too fast. We could trigger things the same as sinqDAQ to ensure the db is update in the same order #field(SCAN, "I/O Intr") - field(SCAN, ".5 second") + field(SCAN, ".2 second") + field(FLNK, "$(INSTR)$(NAME):READALL") + field(PINI, "YES") +} + +record(fanout, "$(INSTR)$(NAME):READALL") +{ + field(SELM, "All") + field(LNK0, "$(INSTR)$(NAME):ELAPSED-TIME PP") + field(LNK1, "$(INSTR)$(NAME):M0") + field(LNK2, "$(INSTR)$(NAME):M1") + field(LNK3, "$(INSTR)$(NAME):M2") + field(LNK4, "$(INSTR)$(NAME):M3") + field(LNK5, "$(INSTR)$(NAME):M4") + # Doesn't seemt o be a problem to have more in here :D + # field(LNK6, "$(INSTR)$(NAME):M5") + # field(LNK7, "$(INSTR)$(NAME):M6") + field(FLNK, "$(INSTR)$(NAME):STATUS") +} + +record(mbbi, "$(INSTR)$(NAME):STATUS") +{ + field(INP, "$(INSTR)$(NAME):RAW-STATUS NPP") + field(DESC, "DAQ Status") + field(ZRVL, "0") + field(ZRST, "Idle") + field(ONVL, "1") + field(ONST, "Counting") + field(TWVL, "2") + field(TWST, "Low rate") + field(THVL, "3") + field(THST, "Paused") + # 4 should never happen, if it does it means the DAQ reports undocumented statusbits + field(FRVL, "4") + field(FRST, "INVALID") field(PINI, "YES") } @@ -196,14 +229,13 @@ record(longout, "$(INSTR)$(NAME):CT") ################################################################################ # Read all monitors values -record(ai,"$(INSTR)$(NAME):ELAPSED-TIME") +record(ai, "$(INSTR)$(NAME):ELAPSED-TIME") { field(DESC, "DAQ Measured Time") field(EGU, "sec") field(DTYP, "asynFloat64") field(INP, "@asyn($(PORT),0,$(TIMEOUT=1)) TIME") # field(SCAN, "I/O Intr") - field(SCAN, ".5 second") field(PINI, "YES") # field(FLNK, "$(INSTR)$(NAME):ETO") } diff --git a/src/asynStreamGeneratorDriver.cpp b/src/asynStreamGeneratorDriver.cpp index 11bb120..fbbf86c 100644 --- a/src/asynStreamGeneratorDriver.cpp +++ b/src/asynStreamGeneratorDriver.cpp @@ -101,6 +101,13 @@ asynStatus asynStreamGeneratorDriver::createInt32Param( setIntegerParam(*variable, initialValue)); } +asynStatus asynStreamGeneratorDriver::createInt64Param( + asynStatus status, char *name, int *variable, epicsInt64 initialValue) { + // TODO should show error if there is one + return (asynStatus)(status | createParam(name, asynParamInt64, variable) | + setInteger64Param(*variable, initialValue)); +} + asynStatus asynStreamGeneratorDriver::createFloat64Param(asynStatus status, char *name, int *variable, @@ -120,7 +127,7 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( const char *detectorTopic, const int kafkaQueueSize, const int kafkaMaxPacketSize) : asynPortDriver(portName, 1, /* maxAddr */ - asynInt32Mask | asynFloat64Mask | + asynInt32Mask | asynInt64Mask | asynFloat64Mask | asynDrvUserMask, /* Interface mask */ asynInt32Mask, // | asynFloat64Mask, /* Interrupt mask */ 0, /* asynFlags. This driver does not block and it is @@ -173,7 +180,7 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver( for (std::size_t i = 0; i < this->num_channels; ++i) { memset(pv_name_buffer, 0, 100); epicsSnprintf(pv_name_buffer, 100, P_CountsString, i); - status = createInt32Param(status, pv_name_buffer, P_Counts + i); + status = createInt64Param(status, pv_name_buffer, P_Counts + i); memset(pv_name_buffer, 0, 100); epicsSnprintf(pv_name_buffer, 100, P_RateString, i); @@ -407,7 +414,7 @@ asynStatus asynStreamGeneratorDriver::writeInt32(asynUser *pasynUser, } } else if (isClearCount) { if (!currentStatus) { - setIntegerParam(P_Counts[channelToClear], 0); + setInteger64Param(P_Counts[channelToClear], 0); status = (asynStatus)callParamCallbacks(); } else { return asynError; @@ -447,7 +454,7 @@ void asynStreamGeneratorDriver::receiveUDP() { const char *functionName = "receiveUDP"; asynStatus status = asynSuccess; - int isConnected = 1; + // int isConnected = 1; std::size_t received; int eomReason; @@ -468,8 +475,10 @@ void asynStreamGeneratorDriver::receiveUDP() { &received, &eomReason); if (received) { + const uint16_t bufferLength = ((uint16_t *)buffer)[0]; + const std::size_t headerLength = 42; - if ((received - 42) % 6 == 0) { + if (received >= headerLength && received == bufferLength * 2) { epicsRingBytesPut(this->udpQueue, (char *)buffer, bufferSize); @@ -499,7 +508,7 @@ void asynStreamGeneratorDriver::normaliseUDP() { std::size_t received; int eomReason; - // The correlation unit sents messages with a maximum size of 1500 bytes. + // The correlation unit sends messages with a maximum size of 1500 bytes. // These messages don't have any obious start or end to synchronise // against... const std::size_t bufferSize = 1500; @@ -516,6 +525,9 @@ void asynStreamGeneratorDriver::normaliseUDP() { epicsInt32 droppedMessages = 0; + const UDPHeader *header; + const DetectorEvent *d_event; + const MonitorEvent *m_event; NormalisedEvent ne; while (true) { @@ -524,9 +536,8 @@ void asynStreamGeneratorDriver::normaliseUDP() { epicsRingBytesGet(this->udpQueue, (char *)buffer, bufferSize); - UDPHeader *header = (UDPHeader *)buffer; - - std::size_t total_events = (header->BufferLength - 21) / 3; + header = (UDPHeader *)buffer; + const std::size_t total_events = (header->BufferLength - 21) / 3; if (header->BufferNumber - lastBufferNumber[header->McpdID] > 1 && lastBufferNumber[header->McpdID] != @@ -540,22 +551,22 @@ void asynStreamGeneratorDriver::normaliseUDP() { lastBufferNumber[header->McpdID]); setIntegerParam(P_UdpDropped, ++droppedMessages); } + lastBufferNumber[header->McpdID] = header->BufferNumber; for (std::size_t i = 0; i < total_events; ++i) { char *event = (buffer + 21 * 2 + i * 6); + const bool isMonitorEvent = event[5] & 0x80; - if (event[5] & 0x80) { // Monitor Event - MonitorEvent *m_event = (MonitorEvent *)event; - + if (isMonitorEvent) { + m_event = (MonitorEvent *)event; ne.timestamp = header->nanosecs() + (uint64_t)m_event->nanosecs(); ne.source = 0; ne.pixelId = m_event->DataID; - } else { // Detector Event - DetectorEvent *d_event = (DetectorEvent *)event; - + } else { + d_event = (DetectorEvent *)event; ne.timestamp = header->nanosecs() + (uint64_t)d_event->nanosecs(); ne.source = header->McpdID; @@ -661,7 +672,7 @@ void asynStreamGeneratorDriver::processEvents() { epicsTimeStamp lastProcess = epicsTime::getCurrent(); epicsTimeStamp currentTime = lastProcess; - epicsInt32 counts[this->num_channels]; + epicsInt64 counts[this->num_channels]; double elapsedSeconds = 0; uint64_t startTimestamp = std::numeric_limits::max(); uint64_t currTimestamp; @@ -741,6 +752,8 @@ void asynStreamGeneratorDriver::processEvents() { 1; elapsedSeconds = (eventsA[i].timestamp - startTimestamp) / 1e9; + // TODO should really check there an no more events with the + // same final timestamp if ((countPreset && counts[presetChannel] >= countPreset) || (timePreset && elapsedSeconds > (double)timePreset)) break; @@ -750,7 +763,7 @@ void asynStreamGeneratorDriver::processEvents() { } for (size_t i = 0; i < num_channels; ++i) { - setIntegerParam(P_Counts[i], counts[i]); + setInteger64Param(P_Counts[i], counts[i]); } setDoubleParam(P_ElapsedTime, elapsedSeconds); diff --git a/src/asynStreamGeneratorDriver.h b/src/asynStreamGeneratorDriver.h index 1517697..3783374 100644 --- a/src/asynStreamGeneratorDriver.h +++ b/src/asynStreamGeneratorDriver.h @@ -22,7 +22,7 @@ struct __attribute__((__packed__)) UDPHeader { uint16_t Parameter2[3]; uint16_t Parameter3[3]; - inline uint64_t nanosecs() { + inline uint64_t nanosecs() const { uint64_t nsec{((uint64_t)TimeStamp[2]) << 32 | ((uint64_t)TimeStamp[1]) << 16 | (uint64_t)TimeStamp[0]}; return nsec * 100; @@ -35,8 +35,8 @@ struct __attribute__((__packed__)) DetectorEvent { uint16_t YPosition : 10; uint16_t Amplitude : 8; uint16_t Id : 1; - inline uint32_t nanosecs() { return TimeStamp * 100; } - inline uint64_t pixelId(uint32_t mpcdId) { + inline uint32_t nanosecs() const { return TimeStamp * 100; } + inline uint64_t pixelId(uint32_t mpcdId) const { const uint32_t x_pixels = 128; const uint32_t y_pixels = 128; return (mpcdId - 1) * x_pixels * y_pixels + @@ -50,7 +50,7 @@ struct __attribute__((__packed__)) MonitorEvent { uint64_t DataID : 4; uint64_t TriggerID : 3; uint64_t Id : 1; - inline uint32_t nanosecs() { return TimeStamp * 100; } + inline uint32_t nanosecs() const { return TimeStamp * 100; } }; /******************************************************************************* @@ -172,6 +172,9 @@ class asynStreamGeneratorDriver : public asynPortDriver { asynStatus createInt32Param(asynStatus status, char *name, int *variable, epicsInt32 initialValue = 0); + asynStatus createInt64Param(asynStatus status, char *name, int *variable, + epicsInt64 initialValue = 0); + asynStatus createFloat64Param(asynStatus status, char *name, int *variable, double initialValue = 0);