Fixes some logic bugs and adds rate calculation #2
@@ -67,7 +67,7 @@ record(ai, "$(INSTR)$(NAME):R$(CHANNEL)")
|
||||
{
|
||||
field(DESC, "Rate of DAQ CH$(CHANNEL)")
|
||||
field(EGU, "cts/sec")
|
||||
field(DTYP, "asynInt32")
|
||||
field(DTYP, "asynFloat64")
|
||||
field(INP, "@asyn($(PORT),0,$(TIMEOUT=1)) RATE$(CHANNEL)")
|
||||
field(SCAN, ".2 second")
|
||||
# field(SCAN, "I/O Intr")
|
||||
|
||||
@@ -47,7 +47,7 @@ while True:
|
||||
header[17] = t_high >> 8
|
||||
|
||||
num_events = random.randint(0, 243)
|
||||
# num_events = 243
|
||||
num_events = 243
|
||||
# num_events = 1
|
||||
|
||||
# update buffer length
|
||||
@@ -61,7 +61,7 @@ while True:
|
||||
# reduce also the number of checks on the parsing side of things...
|
||||
|
||||
is_monitor = random.randint(0, 9)
|
||||
# is_monitor = 4
|
||||
is_monitor = 4
|
||||
|
||||
header[11] = 0 if is_monitor > 3 else random.randint(1,9)
|
||||
|
||||
@@ -71,7 +71,7 @@ while True:
|
||||
header[7] = (header[7] + (header[6] == 0)) % (0xff + 1)
|
||||
buffer_ids[header[11]] = header[6], header[7]
|
||||
|
||||
tosend = list(header)
|
||||
tosend = []
|
||||
|
||||
if is_monitor > 3:
|
||||
|
||||
@@ -79,7 +79,7 @@ while True:
|
||||
d = list(data)
|
||||
|
||||
monitor = random.randint(0,3)
|
||||
# monitor = 0
|
||||
monitor = 0
|
||||
|
||||
d[5] = (1 << 7) | monitor
|
||||
|
||||
@@ -89,7 +89,8 @@ while True:
|
||||
d[1] = (event_timestamp >> 8) & 0xff
|
||||
d[2] = (event_timestamp >> 16) & 0x07
|
||||
|
||||
tosend += d
|
||||
# completely reversed sorting
|
||||
tosend = d + tosend
|
||||
|
||||
else:
|
||||
|
||||
@@ -110,9 +111,10 @@ while True:
|
||||
d[1] = (event_timestamp >> 8) & 0xff
|
||||
d[2] |= (event_timestamp >> 16) & 0x07
|
||||
|
||||
tosend += d
|
||||
# completely reversed sorting
|
||||
tosend = d + tosend
|
||||
|
||||
sock.sendto(bytes(tosend), ('127.0.0.1', 54321))
|
||||
sock.sendto(bytes(header + tosend), ('127.0.0.1', 54321))
|
||||
mv = memoryview(bytes(header)).cast('H')
|
||||
print(f'Sent packet {mv[3]} with {num_events} events {base_timestamp}')
|
||||
# time.sleep(.01)
|
||||
# time.sleep(.1)
|
||||
|
||||
@@ -191,7 +191,7 @@ asynStreamGeneratorDriver::asynStreamGeneratorDriver(
|
||||
|
||||
memset(pv_name_buffer, 0, 100);
|
||||
epicsSnprintf(pv_name_buffer, 100, P_RateString, i + 1);
|
||||
status = createInt32Param(status, pv_name_buffer, P_Rates + i);
|
||||
status = createFloat64Param(status, pv_name_buffer, P_Rates + i);
|
||||
|
||||
memset(pv_name_buffer, 0, 100);
|
||||
epicsSnprintf(pv_name_buffer, 100, P_ClearCountsString, i + 1);
|
||||
@@ -730,6 +730,21 @@ void asynStreamGeneratorDriver::processEvents() {
|
||||
double elapsedSeconds = 0;
|
||||
uint64_t startTimestamp = std::numeric_limits<uint64_t>::max();
|
||||
|
||||
std::size_t ratePtr = 0;
|
||||
const uint8_t rateWindowSize = 10;
|
||||
uint32_t ratesWindow[this->num_channels * rateWindowSize];
|
||||
uint64_t ratesStartTimes[rateWindowSize];
|
||||
uint64_t ratesStopTimes[rateWindowSize];
|
||||
|
||||
for (std::size_t i = 0; i < this->num_channels * rateWindowSize; ++i) {
|
||||
ratesWindow[i] = 0;
|
||||
}
|
||||
|
||||
for (std::size_t i = 0; i < this->num_channels; ++i) {
|
||||
ratesStartTimes[i] = std::numeric_limits<uint64_t>::max();
|
||||
ratesStopTimes[i] = 0;
|
||||
}
|
||||
|
||||
epicsInt32 currStatus = STATUS_IDLE;
|
||||
epicsInt32 prevStatus = STATUS_IDLE;
|
||||
epicsInt32 countPreset;
|
||||
@@ -858,8 +873,10 @@ void asynStreamGeneratorDriver::processEvents() {
|
||||
|
||||
for (std::size_t i = 0; i < toProcess; ++i) {
|
||||
|
||||
counts[eventsA[i].source == 0 ? eventsA[i].pixelId
|
||||
: this->num_channels - 1] += 1;
|
||||
const uint8_t channelId = eventsA[i].source == 0 ? eventsA[i].pixelId
|
||||
: this->num_channels - 1;
|
||||
|
||||
counts[channelId] += 1;
|
||||
elapsedSeconds =
|
||||
((double)(eventsA[i].timestamp - startTimestamp)) / 1e9;
|
||||
|
||||
@@ -909,6 +926,66 @@ void asynStreamGeneratorDriver::processEvents() {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
// TODO this was just done quickly and is probably not totally correct
|
||||
// Determing the rates on each channel
|
||||
uint32_t *ratesWindowPtr = ratesWindow + this->num_channels * ratePtr;
|
||||
|
||||
// Clear Bin
|
||||
for (std::size_t i = 0; i < this->num_channels; ++i) {
|
||||
ratesWindowPtr[i] = 0;
|
||||
}
|
||||
|
||||
// Sum Counts in Bin
|
||||
for (std::size_t i = 0; i < toProcess; ++i) {
|
||||
const uint8_t channelId = eventsA[i].source == 0 ? eventsA[i].pixelId
|
||||
: this->num_channels - 1;
|
||||
|
||||
// I think this should be the better way regarding speed
|
||||
// otherwise we could have
|
||||
// channelId * rateWindowSize + ratePtr
|
||||
// to simplify the average
|
||||
ratesWindowPtr[channelId] += 1;
|
||||
}
|
||||
|
||||
if (toProcess) {
|
||||
ratesStartTimes[ratePtr] = eventsA[0].timestamp;
|
||||
ratesStopTimes[ratePtr] = eventsA[toProcess-1].timestamp;
|
||||
}
|
||||
// else {
|
||||
// ratesStartTimes[ratePtr] = std::numeric_limits<uint64_t>::max();
|
||||
// ratesStopTimes[ratePtr] = 0;
|
||||
// }
|
||||
|
||||
uint64_t minTime = std::numeric_limits<uint64_t>::max();
|
||||
uint64_t maxTime = 0;
|
||||
for (std::size_t i = 0; i < rateWindowSize; ++i) {
|
||||
minTime = std::min(minTime, ratesStartTimes[i]);
|
||||
maxTime = std::max(maxTime, ratesStopTimes[i]);
|
||||
}
|
||||
const double timeWindow = maxTime <= minTime ? 0 : (maxTime - minTime) * 1e-9;
|
||||
|
||||
for (std::size_t i = 0; i < this->num_channels; ++i) {
|
||||
double rate = 0.;
|
||||
for (std::size_t j = 0; j < rateWindowSize; ++j) {
|
||||
rate += ratesWindow[i + this->num_channels * j];
|
||||
}
|
||||
setDoubleParam(
|
||||
this->P_Rates[i],
|
||||
rate / timeWindow
|
||||
);
|
||||
|
||||
// if (i == 0)
|
||||
// asynPrint(pasynUserSelf, ASYN_TRACE_ERROR,
|
||||
// "%s:%s: window %f (%d) counts %f\n", driverName, functionName, timeWindow, timeWindow > 0., rate);
|
||||
}
|
||||
|
||||
|
||||
ratePtr = (ratePtr + 1) % rateWindowSize;
|
||||
|
||||
|
||||
prevStatus = currStatus;
|
||||
|
||||
std::swap(eventsA, eventsB);
|
||||
|
||||
Reference in New Issue
Block a user