mirror of
https://github.com/slsdetectorgroup/aare.git
synced 2025-06-21 19:27:58 +02:00
For 2025.5.22 release (#181)
Co-authored-by: Patrick <patrick.sieberer@psi.ch> Co-authored-by: JulianHeymes <julian.heymes@psi.ch> Co-authored-by: Dhanya Thattil <dhanya.thattil@psi.ch> Co-authored-by: Xiangyu Xie <45243914+xiangyuxie@users.noreply.github.com> Co-authored-by: xiangyu.xie <xiangyu.xie@psi.ch> Co-authored-by: AliceMazzoleni99 <alice.mazzoleni@psi.ch> Co-authored-by: Mazzoleni Alice Francesca <mazzol_a@pc17378.psi.ch> Co-authored-by: siebsi <sieb.patr@gmail.com>
This commit is contained in:
127
src/CalculateEta.test.cpp
Normal file
127
src/CalculateEta.test.cpp
Normal file
@ -0,0 +1,127 @@
|
||||
/************************************************
|
||||
* @file CalculateEta.test.cpp
|
||||
* @short test case to calculate_eta2
|
||||
***********************************************/
|
||||
|
||||
#include "aare/CalculateEta.hpp"
|
||||
#include "aare/Cluster.hpp"
|
||||
#include "aare/ClusterFile.hpp"
|
||||
|
||||
// #include "catch.hpp"
|
||||
#include <array>
|
||||
#include <catch2/catch_all.hpp>
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
|
||||
using namespace aare;
|
||||
|
||||
using ClusterTypes =
|
||||
std::variant<Cluster<int, 2, 2>, Cluster<int, 3, 3>, Cluster<int, 5, 5>,
|
||||
Cluster<int, 4, 2>, Cluster<int, 2, 3>>;
|
||||
|
||||
auto get_test_parameters() {
|
||||
return GENERATE(
|
||||
std::make_tuple(ClusterTypes{Cluster<int, 2, 2>{0, 0, {1, 2, 3, 1}}},
|
||||
Eta2<int>{2. / 3, 3. / 4,
|
||||
static_cast<int>(corner::cBottomLeft), 7}),
|
||||
std::make_tuple(
|
||||
ClusterTypes{Cluster<int, 3, 3>{0, 0, {1, 2, 3, 4, 5, 6, 1, 2, 7}}},
|
||||
Eta2<int>{6. / 11, 2. / 7, static_cast<int>(corner::cTopRight),
|
||||
20}),
|
||||
std::make_tuple(ClusterTypes{Cluster<int, 5, 5>{
|
||||
0, 0, {1, 6, 7, 6, 5, 4, 3, 2, 1, 2, 8, 9, 8,
|
||||
1, 4, 5, 6, 7, 8, 4, 1, 1, 1, 1, 1}}},
|
||||
Eta2<int>{8. / 17, 7. / 15, 9, 30}),
|
||||
std::make_tuple(
|
||||
ClusterTypes{Cluster<int, 4, 2>{0, 0, {1, 4, 7, 2, 5, 6, 4, 3}}},
|
||||
Eta2<int>{4. / 10, 4. / 11, 1, 21}),
|
||||
std::make_tuple(
|
||||
ClusterTypes{Cluster<int, 2, 3>{0, 0, {1, 3, 2, 3, 4, 2}}},
|
||||
Eta2<int>{3. / 5, 2. / 5, 1, 11}));
|
||||
}
|
||||
|
||||
TEST_CASE("compute_largest_2x2_subcluster", "[eta_calculation]") {
|
||||
auto [cluster, expected_eta] = get_test_parameters();
|
||||
|
||||
auto [sum, index] = std::visit(
|
||||
[](const auto &clustertype) { return clustertype.max_sum_2x2(); },
|
||||
cluster);
|
||||
CHECK(expected_eta.c == index);
|
||||
CHECK(expected_eta.sum == sum);
|
||||
}
|
||||
|
||||
TEST_CASE("calculate_eta2", "[eta_calculation]") {
|
||||
|
||||
auto [cluster, expected_eta] = get_test_parameters();
|
||||
|
||||
auto eta = std::visit(
|
||||
[](const auto &clustertype) { return calculate_eta2(clustertype); },
|
||||
cluster);
|
||||
|
||||
CHECK(eta.x == expected_eta.x);
|
||||
CHECK(eta.y == expected_eta.y);
|
||||
CHECK(eta.c == expected_eta.c);
|
||||
CHECK(eta.sum == expected_eta.sum);
|
||||
}
|
||||
|
||||
// 3x3 cluster layout (rotated to match the cBottomLeft enum):
|
||||
// 6, 7, 8
|
||||
// 3, 4, 5
|
||||
// 0, 1, 2
|
||||
|
||||
TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in "
|
||||
"the bottom left",
|
||||
"[eta_calculation]") {
|
||||
|
||||
// Create a 3x3 cluster
|
||||
Cluster<int32_t, 3, 3> cl;
|
||||
cl.x = 0;
|
||||
cl.y = 0;
|
||||
cl.data[0] = 30;
|
||||
cl.data[1] = 23;
|
||||
cl.data[2] = 5;
|
||||
cl.data[3] = 20;
|
||||
cl.data[4] = 50;
|
||||
cl.data[5] = 3;
|
||||
cl.data[6] = 8;
|
||||
cl.data[7] = 2;
|
||||
cl.data[8] = 3;
|
||||
|
||||
// 8, 2, 3
|
||||
// 20, 50, 3
|
||||
// 30, 23, 5
|
||||
|
||||
auto eta = calculate_eta2(cl);
|
||||
CHECK(eta.c == static_cast<int>(corner::cBottomLeft));
|
||||
CHECK(eta.x == 50.0 / (20 + 50)); // 4/(3+4)
|
||||
CHECK(eta.y == 50.0 / (23 + 50)); // 4/(1+4)
|
||||
CHECK(eta.sum == 30 + 23 + 20 + 50);
|
||||
}
|
||||
|
||||
TEST_CASE("Calculate eta2 for a 3x3 int32 cluster with the largest 2x2 sum in "
|
||||
"the top left",
|
||||
"[eta_calculation]") {
|
||||
|
||||
// Create a 3x3 cluster
|
||||
Cluster<int32_t, 3, 3> cl;
|
||||
cl.x = 0;
|
||||
cl.y = 0;
|
||||
cl.data[0] = 8;
|
||||
cl.data[1] = 12;
|
||||
cl.data[2] = 5;
|
||||
cl.data[3] = 77;
|
||||
cl.data[4] = 80;
|
||||
cl.data[5] = 3;
|
||||
cl.data[6] = 82;
|
||||
cl.data[7] = 91;
|
||||
cl.data[8] = 3;
|
||||
|
||||
// 82, 91, 3
|
||||
// 77, 80, 3
|
||||
// 8, 12, 5
|
||||
|
||||
auto eta = calculate_eta2(cl);
|
||||
CHECK(eta.c == static_cast<int>(corner::cTopLeft));
|
||||
CHECK(eta.x == 80. / (77 + 80)); // 4/(3+4)
|
||||
CHECK(eta.y == 91.0 / (91 + 80)); // 7/(7+4)
|
||||
CHECK(eta.sum == 77 + 80 + 82 + 91);
|
||||
}
|
21
src/Cluster.test.cpp
Normal file
21
src/Cluster.test.cpp
Normal file
@ -0,0 +1,21 @@
|
||||
/************************************************
|
||||
* @file test-Cluster.cpp
|
||||
* @short test case for generic Cluster, ClusterVector, and calculate_eta2
|
||||
***********************************************/
|
||||
|
||||
#include "aare/Cluster.hpp"
|
||||
#include "aare/CalculateEta.hpp"
|
||||
#include "aare/ClusterFile.hpp"
|
||||
|
||||
// #include "catch.hpp"
|
||||
#include <array>
|
||||
#include <catch2/catch_all.hpp>
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
|
||||
using namespace aare;
|
||||
|
||||
TEST_CASE("Test sum of Cluster", "[.cluster]") {
|
||||
Cluster<int, 2, 2> cluster{0, 0, {1, 2, 3, 4}};
|
||||
|
||||
CHECK(cluster.sum() == 10);
|
||||
}
|
@ -1,35 +1,39 @@
|
||||
#include "aare/ClusterFile.hpp"
|
||||
#include "test_config.hpp"
|
||||
|
||||
|
||||
#include "aare/defs.hpp"
|
||||
#include <algorithm>
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
#include <filesystem>
|
||||
|
||||
|
||||
|
||||
|
||||
using aare::Cluster;
|
||||
using aare::ClusterFile;
|
||||
using aare::ClusterVector;
|
||||
|
||||
|
||||
TEST_CASE("Read one frame from a a cluster file", "[.files]") {
|
||||
TEST_CASE("Read one frame from a cluster file", "[.files]") {
|
||||
//We know that the frame has 97 clusters
|
||||
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
|
||||
ClusterFile f(fpath);
|
||||
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||
auto clusters = f.read_frame();
|
||||
REQUIRE(clusters.size() == 97);
|
||||
REQUIRE(clusters.frame_number() == 135);
|
||||
CHECK(clusters.size() == 97);
|
||||
CHECK(clusters.frame_number() == 135);
|
||||
CHECK(clusters[0].x == 1);
|
||||
CHECK(clusters[0].y == 200);
|
||||
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data),
|
||||
std::begin(expected_cluster_data)));
|
||||
}
|
||||
|
||||
|
||||
TEST_CASE("Read one frame using ROI", "[.files]") {
|
||||
//We know that the frame has 97 clusters
|
||||
// We know that the frame has 97 clusters
|
||||
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
|
||||
ClusterFile f(fpath);
|
||||
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||
aare::ROI roi;
|
||||
roi.xmin = 0;
|
||||
roi.xmax = 50;
|
||||
@ -40,45 +44,308 @@ TEST_CASE("Read one frame using ROI", "[.files]") {
|
||||
REQUIRE(clusters.size() == 49);
|
||||
REQUIRE(clusters.frame_number() == 135);
|
||||
|
||||
//Check that all clusters are within the ROI
|
||||
// Check that all clusters are within the ROI
|
||||
for (size_t i = 0; i < clusters.size(); i++) {
|
||||
auto c = clusters.at<aare::Cluster3x3>(i);
|
||||
auto c = clusters[i];
|
||||
REQUIRE(c.x >= roi.xmin);
|
||||
REQUIRE(c.x <= roi.xmax);
|
||||
REQUIRE(c.y >= roi.ymin);
|
||||
REQUIRE(c.y <= roi.ymax);
|
||||
}
|
||||
|
||||
CHECK(clusters[0].x == 1);
|
||||
CHECK(clusters[0].y == 200);
|
||||
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data),
|
||||
std::begin(expected_cluster_data)));
|
||||
}
|
||||
|
||||
|
||||
|
||||
TEST_CASE("Read clusters from single frame file", "[.files]") {
|
||||
|
||||
// frame_number, num_clusters [135] 97
|
||||
// [ 1 200] [0 1 2 3 4 5 6 7 8]
|
||||
// [ 2 201] [ 9 10 11 12 13 14 15 16 17]
|
||||
// [ 3 202] [18 19 20 21 22 23 24 25 26]
|
||||
// [ 4 203] [27 28 29 30 31 32 33 34 35]
|
||||
// [ 5 204] [36 37 38 39 40 41 42 43 44]
|
||||
// [ 6 205] [45 46 47 48 49 50 51 52 53]
|
||||
// [ 7 206] [54 55 56 57 58 59 60 61 62]
|
||||
// [ 8 207] [63 64 65 66 67 68 69 70 71]
|
||||
// [ 9 208] [72 73 74 75 76 77 78 79 80]
|
||||
// [ 10 209] [81 82 83 84 85 86 87 88 89]
|
||||
// [ 11 210] [90 91 92 93 94 95 96 97 98]
|
||||
// [ 12 211] [ 99 100 101 102 103 104 105 106 107]
|
||||
// [ 13 212] [108 109 110 111 112 113 114 115 116]
|
||||
// [ 14 213] [117 118 119 120 121 122 123 124 125]
|
||||
// [ 15 214] [126 127 128 129 130 131 132 133 134]
|
||||
// [ 16 215] [135 136 137 138 139 140 141 142 143]
|
||||
// [ 17 216] [144 145 146 147 148 149 150 151 152]
|
||||
// [ 18 217] [153 154 155 156 157 158 159 160 161]
|
||||
// [ 19 218] [162 163 164 165 166 167 168 169 170]
|
||||
// [ 20 219] [171 172 173 174 175 176 177 178 179]
|
||||
// [ 21 220] [180 181 182 183 184 185 186 187 188]
|
||||
// [ 22 221] [189 190 191 192 193 194 195 196 197]
|
||||
// [ 23 222] [198 199 200 201 202 203 204 205 206]
|
||||
// [ 24 223] [207 208 209 210 211 212 213 214 215]
|
||||
// [ 25 224] [216 217 218 219 220 221 222 223 224]
|
||||
// [ 26 225] [225 226 227 228 229 230 231 232 233]
|
||||
// [ 27 226] [234 235 236 237 238 239 240 241 242]
|
||||
// [ 28 227] [243 244 245 246 247 248 249 250 251]
|
||||
// [ 29 228] [252 253 254 255 256 257 258 259 260]
|
||||
// [ 30 229] [261 262 263 264 265 266 267 268 269]
|
||||
// [ 31 230] [270 271 272 273 274 275 276 277 278]
|
||||
// [ 32 231] [279 280 281 282 283 284 285 286 287]
|
||||
// [ 33 232] [288 289 290 291 292 293 294 295 296]
|
||||
// [ 34 233] [297 298 299 300 301 302 303 304 305]
|
||||
// [ 35 234] [306 307 308 309 310 311 312 313 314]
|
||||
// [ 36 235] [315 316 317 318 319 320 321 322 323]
|
||||
// [ 37 236] [324 325 326 327 328 329 330 331 332]
|
||||
// [ 38 237] [333 334 335 336 337 338 339 340 341]
|
||||
// [ 39 238] [342 343 344 345 346 347 348 349 350]
|
||||
// [ 40 239] [351 352 353 354 355 356 357 358 359]
|
||||
// [ 41 240] [360 361 362 363 364 365 366 367 368]
|
||||
// [ 42 241] [369 370 371 372 373 374 375 376 377]
|
||||
// [ 43 242] [378 379 380 381 382 383 384 385 386]
|
||||
// [ 44 243] [387 388 389 390 391 392 393 394 395]
|
||||
// [ 45 244] [396 397 398 399 400 401 402 403 404]
|
||||
// [ 46 245] [405 406 407 408 409 410 411 412 413]
|
||||
// [ 47 246] [414 415 416 417 418 419 420 421 422]
|
||||
// [ 48 247] [423 424 425 426 427 428 429 430 431]
|
||||
// [ 49 248] [432 433 434 435 436 437 438 439 440]
|
||||
// [ 50 249] [441 442 443 444 445 446 447 448 449]
|
||||
// [ 51 250] [450 451 452 453 454 455 456 457 458]
|
||||
// [ 52 251] [459 460 461 462 463 464 465 466 467]
|
||||
// [ 53 252] [468 469 470 471 472 473 474 475 476]
|
||||
// [ 54 253] [477 478 479 480 481 482 483 484 485]
|
||||
// [ 55 254] [486 487 488 489 490 491 492 493 494]
|
||||
// [ 56 255] [495 496 497 498 499 500 501 502 503]
|
||||
// [ 57 256] [504 505 506 507 508 509 510 511 512]
|
||||
// [ 58 257] [513 514 515 516 517 518 519 520 521]
|
||||
// [ 59 258] [522 523 524 525 526 527 528 529 530]
|
||||
// [ 60 259] [531 532 533 534 535 536 537 538 539]
|
||||
// [ 61 260] [540 541 542 543 544 545 546 547 548]
|
||||
// [ 62 261] [549 550 551 552 553 554 555 556 557]
|
||||
// [ 63 262] [558 559 560 561 562 563 564 565 566]
|
||||
// [ 64 263] [567 568 569 570 571 572 573 574 575]
|
||||
// [ 65 264] [576 577 578 579 580 581 582 583 584]
|
||||
// [ 66 265] [585 586 587 588 589 590 591 592 593]
|
||||
// [ 67 266] [594 595 596 597 598 599 600 601 602]
|
||||
// [ 68 267] [603 604 605 606 607 608 609 610 611]
|
||||
// [ 69 268] [612 613 614 615 616 617 618 619 620]
|
||||
// [ 70 269] [621 622 623 624 625 626 627 628 629]
|
||||
// [ 71 270] [630 631 632 633 634 635 636 637 638]
|
||||
// [ 72 271] [639 640 641 642 643 644 645 646 647]
|
||||
// [ 73 272] [648 649 650 651 652 653 654 655 656]
|
||||
// [ 74 273] [657 658 659 660 661 662 663 664 665]
|
||||
// [ 75 274] [666 667 668 669 670 671 672 673 674]
|
||||
// [ 76 275] [675 676 677 678 679 680 681 682 683]
|
||||
// [ 77 276] [684 685 686 687 688 689 690 691 692]
|
||||
// [ 78 277] [693 694 695 696 697 698 699 700 701]
|
||||
// [ 79 278] [702 703 704 705 706 707 708 709 710]
|
||||
// [ 80 279] [711 712 713 714 715 716 717 718 719]
|
||||
// [ 81 280] [720 721 722 723 724 725 726 727 728]
|
||||
// [ 82 281] [729 730 731 732 733 734 735 736 737]
|
||||
// [ 83 282] [738 739 740 741 742 743 744 745 746]
|
||||
// [ 84 283] [747 748 749 750 751 752 753 754 755]
|
||||
// [ 85 284] [756 757 758 759 760 761 762 763 764]
|
||||
// [ 86 285] [765 766 767 768 769 770 771 772 773]
|
||||
// [ 87 286] [774 775 776 777 778 779 780 781 782]
|
||||
// [ 88 287] [783 784 785 786 787 788 789 790 791]
|
||||
// [ 89 288] [792 793 794 795 796 797 798 799 800]
|
||||
// [ 90 289] [801 802 803 804 805 806 807 808 809]
|
||||
// [ 91 290] [810 811 812 813 814 815 816 817 818]
|
||||
// [ 92 291] [819 820 821 822 823 824 825 826 827]
|
||||
// [ 93 292] [828 829 830 831 832 833 834 835 836]
|
||||
// [ 94 293] [837 838 839 840 841 842 843 844 845]
|
||||
// [ 95 294] [846 847 848 849 850 851 852 853 854]
|
||||
// [ 96 295] [855 856 857 858 859 860 861 862 863]
|
||||
// [ 97 296] [864 865 866 867 868 869 870 871 872]
|
||||
|
||||
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
|
||||
SECTION("Read fewer clusters than available") {
|
||||
ClusterFile f(fpath);
|
||||
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||
auto clusters = f.read_clusters(50);
|
||||
REQUIRE(clusters.size() == 50);
|
||||
REQUIRE(clusters.frame_number() == 135);
|
||||
REQUIRE(clusters.frame_number() == 135);
|
||||
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
REQUIRE(clusters[0].x == 1);
|
||||
REQUIRE(clusters[0].y == 200);
|
||||
CHECK(std::equal(std::begin(clusters[0].data),
|
||||
std::end(clusters[0].data),
|
||||
std::begin(expected_cluster_data)));
|
||||
}
|
||||
SECTION("Read more clusters than available") {
|
||||
ClusterFile f(fpath);
|
||||
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||
// 100 is the maximum number of clusters read
|
||||
auto clusters = f.read_clusters(100);
|
||||
REQUIRE(clusters.size() == 97);
|
||||
REQUIRE(clusters.frame_number() == 135);
|
||||
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
REQUIRE(clusters[0].x == 1);
|
||||
REQUIRE(clusters[0].y == 200);
|
||||
CHECK(std::equal(std::begin(clusters[0].data),
|
||||
std::end(clusters[0].data),
|
||||
std::begin(expected_cluster_data)));
|
||||
}
|
||||
SECTION("Read all clusters") {
|
||||
ClusterFile f(fpath);
|
||||
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||
auto clusters = f.read_clusters(97);
|
||||
REQUIRE(clusters.size() == 97);
|
||||
REQUIRE(clusters.frame_number() == 135);
|
||||
REQUIRE(clusters[0].x == 1);
|
||||
REQUIRE(clusters[0].y == 200);
|
||||
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
CHECK(std::equal(std::begin(clusters[0].data),
|
||||
std::end(clusters[0].data),
|
||||
std::begin(expected_cluster_data)));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Read clusters from single frame file with ROI", "[.files]") {
|
||||
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
|
||||
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||
|
||||
aare::ROI roi;
|
||||
roi.xmin = 0;
|
||||
roi.xmax = 50;
|
||||
roi.ymin = 200;
|
||||
roi.ymax = 249;
|
||||
f.set_roi(roi);
|
||||
|
||||
auto clusters = f.read_clusters(10);
|
||||
|
||||
CHECK(clusters.size() == 10);
|
||||
CHECK(clusters.frame_number() == 135);
|
||||
CHECK(clusters[0].x == 1);
|
||||
CHECK(clusters[0].y == 200);
|
||||
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data),
|
||||
std::begin(expected_cluster_data)));
|
||||
}
|
||||
|
||||
TEST_CASE("Read cluster from multiple frame file", "[.files]") {
|
||||
|
||||
using ClusterType = Cluster<double, 2, 2>;
|
||||
|
||||
auto fpath =
|
||||
test_data_path() / "clust" / "Two_frames_2x2double_test_clusters.clust";
|
||||
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
|
||||
// Two_frames_2x2double_test_clusters.clust
|
||||
// frame number, num_clusters 0, 4
|
||||
//[10, 20], {0. ,0., 0., 0.}
|
||||
//[11, 30], {1., 1., 1., 1.}
|
||||
//[12, 40], {2., 2., 2., 2.}
|
||||
//[13, 50], {3., 3., 3., 3.}
|
||||
// 1,4
|
||||
//[10, 20], {4., 4., 4., 4.}
|
||||
//[11, 30], {5., 5., 5., 5.}
|
||||
//[12, 40], {6., 6., 6., 6.}
|
||||
//[13, 50], {7., 7., 7., 7.}
|
||||
|
||||
SECTION("Read clusters from both frames") {
|
||||
ClusterFile<ClusterType> f(fpath);
|
||||
auto clusters = f.read_clusters(2);
|
||||
REQUIRE(clusters.size() == 2);
|
||||
REQUIRE(clusters.frame_number() == 0);
|
||||
|
||||
auto clusters1 = f.read_clusters(3);
|
||||
|
||||
REQUIRE(clusters1.size() == 3);
|
||||
REQUIRE(clusters1.frame_number() == 1);
|
||||
}
|
||||
|
||||
SECTION("Read all clusters") {
|
||||
ClusterFile<ClusterType> f(fpath);
|
||||
auto clusters = f.read_clusters(8);
|
||||
REQUIRE(clusters.size() == 8);
|
||||
REQUIRE(clusters.frame_number() == 1);
|
||||
}
|
||||
|
||||
|
||||
SECTION("Read clusters from one frame") {
|
||||
ClusterFile<ClusterType> f(fpath);
|
||||
auto clusters = f.read_clusters(2);
|
||||
REQUIRE(clusters.size() == 2);
|
||||
REQUIRE(clusters.frame_number() == 0);
|
||||
|
||||
auto clusters1 = f.read_clusters(1);
|
||||
|
||||
REQUIRE(clusters1.size() == 1);
|
||||
REQUIRE(clusters1.frame_number() == 0);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Write cluster with potential padding", "[.files][.ClusterFile]") {
|
||||
|
||||
using ClusterType = Cluster<double, 3, 3>;
|
||||
|
||||
REQUIRE(std::filesystem::exists(test_data_path() / "clust"));
|
||||
|
||||
auto fpath = test_data_path() / "clust" / "single_frame_2_clusters.clust";
|
||||
|
||||
ClusterFile<ClusterType> file(fpath, 1000, "w");
|
||||
|
||||
ClusterVector<ClusterType> clustervec(2);
|
||||
int16_t coordinate = 5;
|
||||
clustervec.push_back(ClusterType{
|
||||
coordinate, coordinate, {0., 0., 0., 0., 0., 0., 0., 0., 0.}});
|
||||
clustervec.push_back(ClusterType{
|
||||
coordinate, coordinate, {0., 0., 0., 0., 0., 0., 0., 0., 0.}});
|
||||
|
||||
file.write_frame(clustervec);
|
||||
|
||||
file.close();
|
||||
|
||||
file.open("r");
|
||||
|
||||
auto read_cluster_vector = file.read_frame();
|
||||
|
||||
CHECK(read_cluster_vector.size() == 2);
|
||||
CHECK(read_cluster_vector.frame_number() == 0);
|
||||
|
||||
CHECK(read_cluster_vector[0].x == clustervec[0].x);
|
||||
CHECK(read_cluster_vector[0].y == clustervec[0].y);
|
||||
CHECK(std::equal(
|
||||
clustervec[0].data.begin(), clustervec[0].data.end(),
|
||||
read_cluster_vector[0].data.begin(), [](double a, double b) {
|
||||
return std::abs(a - b) < std::numeric_limits<double>::epsilon();
|
||||
}));
|
||||
|
||||
CHECK(read_cluster_vector[1].x == clustervec[1].x);
|
||||
CHECK(read_cluster_vector[1].y == clustervec[1].y);
|
||||
CHECK(std::equal(
|
||||
clustervec[1].data.begin(), clustervec[1].data.end(),
|
||||
read_cluster_vector[1].data.begin(), [](double a, double b) {
|
||||
return std::abs(a - b) < std::numeric_limits<double>::epsilon();
|
||||
}));
|
||||
}
|
||||
|
||||
TEST_CASE("Read frame and modify cluster data", "[.files][.ClusterFile]") {
|
||||
auto fpath = test_data_path() / "clust" / "single_frame_97_clustrers.clust";
|
||||
REQUIRE(std::filesystem::exists(fpath));
|
||||
|
||||
ClusterFile<Cluster<int32_t, 3, 3>> f(fpath);
|
||||
|
||||
auto clusters = f.read_frame();
|
||||
CHECK(clusters.size() == 97);
|
||||
CHECK(clusters.frame_number() == 135);
|
||||
|
||||
int32_t expected_cluster_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
|
||||
clusters.push_back(
|
||||
Cluster<int32_t, 3, 3>{0, 0, {0, 1, 2, 3, 4, 5, 6, 7, 8}});
|
||||
|
||||
CHECK(clusters.size() == 98);
|
||||
CHECK(clusters[0].x == 1);
|
||||
CHECK(clusters[0].y == 200);
|
||||
|
||||
CHECK(std::equal(std::begin(clusters[0].data), std::end(clusters[0].data),
|
||||
std::begin(expected_cluster_data)));
|
||||
}
|
||||
|
@ -1,19 +1,18 @@
|
||||
#include "aare/ClusterFinder.hpp"
|
||||
#include "aare/Pedestal.hpp"
|
||||
#include <catch2/matchers/catch_matchers_floating_point.hpp>
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
#include <catch2/matchers/catch_matchers_floating_point.hpp>
|
||||
#include <chrono>
|
||||
#include <random>
|
||||
|
||||
using namespace aare;
|
||||
|
||||
//TODO! Find a way to test the cluster finder
|
||||
|
||||
|
||||
// TODO! Find a way to test the cluster finder
|
||||
|
||||
// class ClusterFinderUnitTest : public ClusterFinder {
|
||||
// public:
|
||||
// ClusterFinderUnitTest(int cluster_sizeX, int cluster_sizeY, double nSigma = 5.0, double threshold = 0.0)
|
||||
// ClusterFinderUnitTest(int cluster_sizeX, int cluster_sizeY, double nSigma
|
||||
// = 5.0, double threshold = 0.0)
|
||||
// : ClusterFinder(cluster_sizeX, cluster_sizeY, nSigma, threshold) {}
|
||||
// double get_c2() { return c2; }
|
||||
// double get_c3() { return c3; }
|
||||
@ -37,8 +36,8 @@ using namespace aare;
|
||||
// REQUIRE_THAT(cf.get_c3(), Catch::Matchers::WithinRel(c3, 1e-9));
|
||||
// }
|
||||
|
||||
TEST_CASE("Construct a cluster finder"){
|
||||
ClusterFinder clusterFinder({400,400}, {3,3});
|
||||
TEST_CASE("Construct a cluster finder") {
|
||||
ClusterFinder clusterFinder({400, 400});
|
||||
// REQUIRE(clusterFinder.get_cluster_sizeX() == 3);
|
||||
// REQUIRE(clusterFinder.get_cluster_sizeY() == 3);
|
||||
// REQUIRE(clusterFinder.get_threshold() == 1);
|
||||
@ -49,16 +48,17 @@ TEST_CASE("Construct a cluster finder"){
|
||||
// aare::Pedestal pedestal(10, 10, 5);
|
||||
// NDArray<double, 2> frame({10, 10});
|
||||
// frame = 0;
|
||||
// ClusterFinder clusterFinder(3, 3, 1, 1); // 3x3 cluster, 1 nSigma, 1 threshold
|
||||
// ClusterFinder clusterFinder(3, 3, 1, 1); // 3x3 cluster, 1 nSigma, 1
|
||||
// threshold
|
||||
|
||||
// auto clusters = clusterFinder.find_clusters_without_threshold(frame.span(), pedestal);
|
||||
// auto clusters =
|
||||
// clusterFinder.find_clusters_without_threshold(frame.span(), pedestal);
|
||||
|
||||
// REQUIRE(clusters.size() == 0);
|
||||
|
||||
// frame(5, 5) = 10;
|
||||
// clusters = clusterFinder.find_clusters_without_threshold(frame.span(), pedestal);
|
||||
// REQUIRE(clusters.size() == 1);
|
||||
// REQUIRE(clusters[0].x == 5);
|
||||
// clusters = clusterFinder.find_clusters_without_threshold(frame.span(),
|
||||
// pedestal); REQUIRE(clusters.size() == 1); REQUIRE(clusters[0].x == 5);
|
||||
// REQUIRE(clusters[0].y == 5);
|
||||
// for (int i = 0; i < 3; i++) {
|
||||
// for (int j = 0; j < 3; j++) {
|
||||
|
99
src/ClusterFinderMT.test.cpp
Normal file
99
src/ClusterFinderMT.test.cpp
Normal file
@ -0,0 +1,99 @@
|
||||
|
||||
#include "aare/ClusterFinderMT.hpp"
|
||||
#include "aare/Cluster.hpp"
|
||||
#include "aare/ClusterCollector.hpp"
|
||||
#include "aare/File.hpp"
|
||||
|
||||
#include "test_config.hpp"
|
||||
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
#include <filesystem>
|
||||
#include <memory>
|
||||
|
||||
using namespace aare;
|
||||
|
||||
// wrapper function to access private member variables for testing
|
||||
template <typename ClusterType, typename FRAME_TYPE = uint16_t,
|
||||
typename PEDESTAL_TYPE = double>
|
||||
class ClusterFinderMTWrapper
|
||||
: public ClusterFinderMT<ClusterType, FRAME_TYPE, PEDESTAL_TYPE> {
|
||||
|
||||
public:
|
||||
ClusterFinderMTWrapper(Shape<2> image_size, PEDESTAL_TYPE nSigma = 5.0,
|
||||
size_t capacity = 2000, size_t n_threads = 3)
|
||||
: ClusterFinderMT<ClusterType, FRAME_TYPE, PEDESTAL_TYPE>(
|
||||
image_size, nSigma, capacity, n_threads) {}
|
||||
|
||||
size_t get_m_input_queues_size() const {
|
||||
return this->m_input_queues.size();
|
||||
}
|
||||
|
||||
size_t get_m_output_queues_size() const {
|
||||
return this->m_output_queues.size();
|
||||
}
|
||||
|
||||
size_t get_m_cluster_finders_size() const {
|
||||
return this->m_cluster_finders.size();
|
||||
}
|
||||
|
||||
bool m_output_queues_are_empty() const {
|
||||
for (auto &queue : this->m_output_queues) {
|
||||
if (!queue->isEmpty())
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool m_input_queues_are_empty() const {
|
||||
for (auto &queue : this->m_input_queues) {
|
||||
if (!queue->isEmpty())
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool m_sink_is_empty() const { return this->m_sink.isEmpty(); }
|
||||
|
||||
size_t m_sink_size() const { return this->m_sink.sizeGuess(); }
|
||||
};
|
||||
|
||||
TEST_CASE("multithreaded cluster finder", "[.files][.ClusterFinder]") {
|
||||
auto fpath = "/mnt/sls_det_storage/matterhorn_data/aare_test_data/"
|
||||
"Moench03new/cu_half_speed_master_4.json";
|
||||
|
||||
File file(fpath);
|
||||
|
||||
size_t n_threads = 2;
|
||||
size_t n_frames_pd = 10;
|
||||
|
||||
using ClusterType = Cluster<int32_t, 3, 3>;
|
||||
|
||||
ClusterFinderMTWrapper<ClusterType> cf(
|
||||
{static_cast<int64_t>(file.rows()), static_cast<int64_t>(file.cols())},
|
||||
5, 2000, n_threads); // no idea what frame type is!!! default uint16_t
|
||||
|
||||
CHECK(cf.get_m_input_queues_size() == n_threads);
|
||||
CHECK(cf.get_m_output_queues_size() == n_threads);
|
||||
CHECK(cf.get_m_cluster_finders_size() == n_threads);
|
||||
CHECK(cf.m_output_queues_are_empty() == true);
|
||||
CHECK(cf.m_input_queues_are_empty() == true);
|
||||
|
||||
for (size_t i = 0; i < n_frames_pd; ++i) {
|
||||
cf.find_clusters(file.read_frame().view<uint16_t>());
|
||||
}
|
||||
|
||||
cf.stop();
|
||||
|
||||
CHECK(cf.m_output_queues_are_empty() == true);
|
||||
CHECK(cf.m_input_queues_are_empty() == true);
|
||||
|
||||
CHECK(cf.m_sink_size() == n_frames_pd);
|
||||
ClusterCollector<ClusterType> clustercollector(&cf);
|
||||
|
||||
clustercollector.stop();
|
||||
|
||||
CHECK(cf.m_sink_size() == 0);
|
||||
|
||||
auto clustervec = clustercollector.steal_clusters();
|
||||
// CHECK(clustervec.size() == ) //dont know how many clusters to expect
|
||||
}
|
@ -1,21 +1,52 @@
|
||||
#include <cstdint>
|
||||
#include "aare/ClusterVector.hpp"
|
||||
#include <cstdint>
|
||||
|
||||
#include <catch2/matchers/catch_matchers_floating_point.hpp>
|
||||
#include <catch2/catch_all.hpp>
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
#include <catch2/matchers/catch_matchers_floating_point.hpp>
|
||||
|
||||
using aare::Cluster;
|
||||
using aare::ClusterVector;
|
||||
|
||||
struct Cluster_i2x2 {
|
||||
int16_t x;
|
||||
int16_t y;
|
||||
int32_t data[4];
|
||||
};
|
||||
TEST_CASE("item_size return the size of the cluster stored") {
|
||||
using C1 = Cluster<int32_t, 2, 2>;
|
||||
ClusterVector<C1> cv(4);
|
||||
CHECK(cv.item_size() == sizeof(C1));
|
||||
|
||||
TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") {
|
||||
|
||||
// Sanity check
|
||||
// 2*2*4 = 16 bytes of data for the cluster
|
||||
// 2*2 = 4 bytes for the x and y coordinates
|
||||
REQUIRE(cv.item_size() == 20);
|
||||
|
||||
ClusterVector<int32_t> cv(2, 2, 4);
|
||||
using C2 = Cluster<int32_t, 3, 3>;
|
||||
ClusterVector<C2> cv2(4);
|
||||
CHECK(cv2.item_size() == sizeof(C2));
|
||||
|
||||
using C3 = Cluster<double, 2, 3>;
|
||||
ClusterVector<C3> cv3(4);
|
||||
CHECK(cv3.item_size() == sizeof(C3));
|
||||
|
||||
using C4 = Cluster<char, 10, 5>;
|
||||
ClusterVector<C4> cv4(4);
|
||||
CHECK(cv4.item_size() == sizeof(C4));
|
||||
|
||||
using C5 = Cluster<int32_t, 2, 3>;
|
||||
ClusterVector<C5> cv5(4);
|
||||
CHECK(cv5.item_size() == sizeof(C5));
|
||||
|
||||
using C6 = Cluster<double, 5, 5>;
|
||||
ClusterVector<C6> cv6(4);
|
||||
CHECK(cv6.item_size() == sizeof(C6));
|
||||
|
||||
using C7 = Cluster<double, 3, 3>;
|
||||
ClusterVector<C7> cv7(4);
|
||||
CHECK(cv7.item_size() == sizeof(C7));
|
||||
}
|
||||
|
||||
TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read",
|
||||
"[.ClusterVector]") {
|
||||
|
||||
ClusterVector<Cluster<int32_t, 2, 2>> cv(4);
|
||||
REQUIRE(cv.capacity() == 4);
|
||||
REQUIRE(cv.size() == 0);
|
||||
REQUIRE(cv.cluster_size_x() == 2);
|
||||
@ -23,112 +54,102 @@ TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") {
|
||||
// int16_t, int16_t, 2x2 int32_t = 20 bytes
|
||||
REQUIRE(cv.item_size() == 20);
|
||||
|
||||
//Create a cluster and push back into the vector
|
||||
Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}};
|
||||
cv.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
|
||||
// Create a cluster and push back into the vector
|
||||
Cluster<int32_t, 2, 2> c1 = {1, 2, {3, 4, 5, 6}};
|
||||
cv.push_back(c1);
|
||||
REQUIRE(cv.size() == 1);
|
||||
REQUIRE(cv.capacity() == 4);
|
||||
|
||||
//Read the cluster back out using copy. TODO! Can we improve the API?
|
||||
Cluster_i2x2 c2;
|
||||
std::byte *ptr = cv.element_ptr(0);
|
||||
std::copy(ptr, ptr + cv.item_size(), reinterpret_cast<std::byte*>(&c2));
|
||||
auto c2 = cv[0];
|
||||
|
||||
//Check that the data is the same
|
||||
// Check that the data is the same
|
||||
REQUIRE(c1.x == c2.x);
|
||||
REQUIRE(c1.y == c2.y);
|
||||
for(size_t i = 0; i < 4; i++) {
|
||||
for (size_t i = 0; i < 4; i++) {
|
||||
REQUIRE(c1.data[i] == c2.data[i]);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Summing 3x1 clusters of int64"){
|
||||
struct Cluster_l3x1{
|
||||
int16_t x;
|
||||
int16_t y;
|
||||
int32_t data[3];
|
||||
};
|
||||
|
||||
ClusterVector<int32_t> cv(3, 1, 2);
|
||||
TEST_CASE("Summing 3x1 clusters of int64", "[.ClusterVector]") {
|
||||
ClusterVector<Cluster<int32_t, 3, 1>> cv(2);
|
||||
REQUIRE(cv.capacity() == 2);
|
||||
REQUIRE(cv.size() == 0);
|
||||
REQUIRE(cv.cluster_size_x() == 3);
|
||||
REQUIRE(cv.cluster_size_y() == 1);
|
||||
|
||||
//Create a cluster and push back into the vector
|
||||
Cluster_l3x1 c1 = {1, 2, {3, 4, 5}};
|
||||
cv.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
|
||||
// Create a cluster and push back into the vector
|
||||
Cluster<int32_t, 3, 1> c1 = {1, 2, {3, 4, 5}};
|
||||
cv.push_back(c1);
|
||||
REQUIRE(cv.capacity() == 2);
|
||||
REQUIRE(cv.size() == 1);
|
||||
|
||||
Cluster_l3x1 c2 = {6, 7, {8, 9, 10}};
|
||||
cv.push_back(c2.x, c2.y, reinterpret_cast<std::byte*>(&c2.data[0]));
|
||||
Cluster<int32_t, 3, 1> c2 = {6, 7, {8, 9, 10}};
|
||||
cv.push_back(c2);
|
||||
REQUIRE(cv.capacity() == 2);
|
||||
REQUIRE(cv.size() == 2);
|
||||
|
||||
Cluster_l3x1 c3 = {11, 12, {13, 14, 15}};
|
||||
cv.push_back(c3.x, c3.y, reinterpret_cast<std::byte*>(&c3.data[0]));
|
||||
Cluster<int32_t, 3, 1> c3 = {11, 12, {13, 14, 15}};
|
||||
cv.push_back(c3);
|
||||
REQUIRE(cv.capacity() == 4);
|
||||
REQUIRE(cv.size() == 3);
|
||||
|
||||
/*
|
||||
auto sums = cv.sum();
|
||||
REQUIRE(sums.size() == 3);
|
||||
REQUIRE(sums[0] == 12);
|
||||
REQUIRE(sums[1] == 27);
|
||||
REQUIRE(sums[2] == 42);
|
||||
*/
|
||||
}
|
||||
|
||||
TEST_CASE("Storing floats"){
|
||||
struct Cluster_f4x2{
|
||||
int16_t x;
|
||||
int16_t y;
|
||||
float data[8];
|
||||
};
|
||||
|
||||
ClusterVector<float> cv(2, 4, 10);
|
||||
TEST_CASE("Storing floats", "[.ClusterVector]") {
|
||||
ClusterVector<Cluster<float, 2, 4>> cv(10);
|
||||
REQUIRE(cv.capacity() == 10);
|
||||
REQUIRE(cv.size() == 0);
|
||||
REQUIRE(cv.cluster_size_x() == 2);
|
||||
REQUIRE(cv.cluster_size_y() == 4);
|
||||
|
||||
//Create a cluster and push back into the vector
|
||||
Cluster_f4x2 c1 = {1, 2, {3.0, 4.0, 5.0, 6.0,3.0, 4.0, 5.0, 6.0}};
|
||||
cv.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
|
||||
// Create a cluster and push back into the vector
|
||||
Cluster<float, 2, 4> c1 = {1, 2, {3.0, 4.0, 5.0, 6.0, 3.0, 4.0, 5.0, 6.0}};
|
||||
cv.push_back(c1);
|
||||
REQUIRE(cv.capacity() == 10);
|
||||
REQUIRE(cv.size() == 1);
|
||||
|
||||
|
||||
Cluster_f4x2 c2 = {6, 7, {8.0, 9.0, 10.0, 11.0,8.0, 9.0, 10.0, 11.0}};
|
||||
cv.push_back(c2.x, c2.y, reinterpret_cast<std::byte*>(&c2.data[0]));
|
||||
Cluster<float, 2, 4> c2 = {
|
||||
6, 7, {8.0, 9.0, 10.0, 11.0, 8.0, 9.0, 10.0, 11.0}};
|
||||
cv.push_back(c2);
|
||||
REQUIRE(cv.capacity() == 10);
|
||||
REQUIRE(cv.size() == 2);
|
||||
|
||||
/*
|
||||
auto sums = cv.sum();
|
||||
REQUIRE(sums.size() == 2);
|
||||
REQUIRE_THAT(sums[0], Catch::Matchers::WithinAbs(36.0, 1e-6));
|
||||
REQUIRE_THAT(sums[1], Catch::Matchers::WithinAbs(76.0, 1e-6));
|
||||
*/
|
||||
}
|
||||
|
||||
TEST_CASE("Push back more than initial capacity"){
|
||||
|
||||
ClusterVector<int32_t> cv(2, 2, 2);
|
||||
TEST_CASE("Push back more than initial capacity", "[.ClusterVector]") {
|
||||
|
||||
ClusterVector<Cluster<int32_t, 2, 2>> cv(2);
|
||||
auto initial_data = cv.data();
|
||||
Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}};
|
||||
cv.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
|
||||
Cluster<int32_t, 2, 2> c1 = {1, 2, {3, 4, 5, 6}};
|
||||
cv.push_back(c1);
|
||||
REQUIRE(cv.size() == 1);
|
||||
REQUIRE(cv.capacity() == 2);
|
||||
|
||||
Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}};
|
||||
cv.push_back(c2.x, c2.y, reinterpret_cast<std::byte*>(&c2.data[0]));
|
||||
Cluster<int32_t, 2, 2> c2 = {6, 7, {8, 9, 10, 11}};
|
||||
cv.push_back(c2);
|
||||
REQUIRE(cv.size() == 2);
|
||||
REQUIRE(cv.capacity() == 2);
|
||||
|
||||
Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}};
|
||||
cv.push_back(c3.x, c3.y, reinterpret_cast<std::byte*>(&c3.data[0]));
|
||||
REQUIRE(cv.size() == 3);
|
||||
Cluster<int32_t, 2, 2> c3 = {11, 12, {13, 14, 15, 16}};
|
||||
cv.push_back(c3);
|
||||
REQUIRE(cv.size() == 3);
|
||||
REQUIRE(cv.capacity() == 4);
|
||||
|
||||
Cluster_i2x2* ptr = reinterpret_cast<Cluster_i2x2*>(cv.data());
|
||||
Cluster<int32_t, 2, 2> *ptr =
|
||||
reinterpret_cast<Cluster<int32_t, 2, 2> *>(cv.data());
|
||||
REQUIRE(ptr[0].x == 1);
|
||||
REQUIRE(ptr[0].y == 2);
|
||||
REQUIRE(ptr[1].x == 6);
|
||||
@ -136,29 +157,31 @@ TEST_CASE("Push back more than initial capacity"){
|
||||
REQUIRE(ptr[2].x == 11);
|
||||
REQUIRE(ptr[2].y == 12);
|
||||
|
||||
//We should have allocated a new buffer, since we outgrew the initial capacity
|
||||
// We should have allocated a new buffer, since we outgrew the initial
|
||||
// capacity
|
||||
REQUIRE(initial_data != cv.data());
|
||||
|
||||
}
|
||||
|
||||
TEST_CASE("Concatenate two cluster vectors where the first has enough capacity"){
|
||||
ClusterVector<int32_t> cv1(2, 2, 12);
|
||||
Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}};
|
||||
cv1.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
|
||||
Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}};
|
||||
cv1.push_back(c2.x, c2.y, reinterpret_cast<std::byte*>(&c2.data[0]));
|
||||
TEST_CASE("Concatenate two cluster vectors where the first has enough capacity",
|
||||
"[.ClusterVector]") {
|
||||
ClusterVector<Cluster<int32_t, 2, 2>> cv1(12);
|
||||
Cluster<int32_t, 2, 2> c1 = {1, 2, {3, 4, 5, 6}};
|
||||
cv1.push_back(c1);
|
||||
Cluster<int32_t, 2, 2> c2 = {6, 7, {8, 9, 10, 11}};
|
||||
cv1.push_back(c2);
|
||||
|
||||
ClusterVector<int32_t> cv2(2, 2, 2);
|
||||
Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}};
|
||||
cv2.push_back(c3.x, c3.y, reinterpret_cast<std::byte*>(&c3.data[0]));
|
||||
Cluster_i2x2 c4 = {16, 17, {18, 19, 20, 21}};
|
||||
cv2.push_back(c4.x, c4.y, reinterpret_cast<std::byte*>(&c4.data[0]));
|
||||
ClusterVector<Cluster<int32_t, 2, 2>> cv2(2);
|
||||
Cluster<int32_t, 2, 2> c3 = {11, 12, {13, 14, 15, 16}};
|
||||
cv2.push_back(c3);
|
||||
Cluster<int32_t, 2, 2> c4 = {16, 17, {18, 19, 20, 21}};
|
||||
cv2.push_back(c4);
|
||||
|
||||
cv1 += cv2;
|
||||
REQUIRE(cv1.size() == 4);
|
||||
REQUIRE(cv1.capacity() == 12);
|
||||
|
||||
Cluster_i2x2* ptr = reinterpret_cast<Cluster_i2x2*>(cv1.data());
|
||||
Cluster<int32_t, 2, 2> *ptr =
|
||||
reinterpret_cast<Cluster<int32_t, 2, 2> *>(cv1.data());
|
||||
REQUIRE(ptr[0].x == 1);
|
||||
REQUIRE(ptr[0].y == 2);
|
||||
REQUIRE(ptr[1].x == 6);
|
||||
@ -169,24 +192,26 @@ TEST_CASE("Concatenate two cluster vectors where the first has enough capacity")
|
||||
REQUIRE(ptr[3].y == 17);
|
||||
}
|
||||
|
||||
TEST_CASE("Concatenate two cluster vectors where we need to allocate"){
|
||||
ClusterVector<int32_t> cv1(2, 2, 2);
|
||||
Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}};
|
||||
cv1.push_back(c1.x, c1.y, reinterpret_cast<std::byte*>(&c1.data[0]));
|
||||
Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}};
|
||||
cv1.push_back(c2.x, c2.y, reinterpret_cast<std::byte*>(&c2.data[0]));
|
||||
TEST_CASE("Concatenate two cluster vectors where we need to allocate",
|
||||
"[.ClusterVector]") {
|
||||
ClusterVector<Cluster<int32_t, 2, 2>> cv1(2);
|
||||
Cluster<int32_t, 2, 2> c1 = {1, 2, {3, 4, 5, 6}};
|
||||
cv1.push_back(c1);
|
||||
Cluster<int32_t, 2, 2> c2 = {6, 7, {8, 9, 10, 11}};
|
||||
cv1.push_back(c2);
|
||||
|
||||
ClusterVector<int32_t> cv2(2, 2, 2);
|
||||
Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}};
|
||||
cv2.push_back(c3.x, c3.y, reinterpret_cast<std::byte*>(&c3.data[0]));
|
||||
Cluster_i2x2 c4 = {16, 17, {18, 19, 20, 21}};
|
||||
cv2.push_back(c4.x, c4.y, reinterpret_cast<std::byte*>(&c4.data[0]));
|
||||
ClusterVector<Cluster<int32_t, 2, 2>> cv2(2);
|
||||
Cluster<int32_t, 2, 2> c3 = {11, 12, {13, 14, 15, 16}};
|
||||
cv2.push_back(c3);
|
||||
Cluster<int32_t, 2, 2> c4 = {16, 17, {18, 19, 20, 21}};
|
||||
cv2.push_back(c4);
|
||||
|
||||
cv1 += cv2;
|
||||
REQUIRE(cv1.size() == 4);
|
||||
REQUIRE(cv1.capacity() == 4);
|
||||
|
||||
Cluster_i2x2* ptr = reinterpret_cast<Cluster_i2x2*>(cv1.data());
|
||||
Cluster<int32_t, 2, 2> *ptr =
|
||||
reinterpret_cast<Cluster<int32_t, 2, 2> *>(cv1.data());
|
||||
REQUIRE(ptr[0].x == 1);
|
||||
REQUIRE(ptr[0].y == 2);
|
||||
REQUIRE(ptr[1].x == 6);
|
||||
@ -195,4 +220,49 @@ TEST_CASE("Concatenate two cluster vectors where we need to allocate"){
|
||||
REQUIRE(ptr[2].y == 12);
|
||||
REQUIRE(ptr[3].x == 16);
|
||||
REQUIRE(ptr[3].y == 17);
|
||||
}
|
||||
|
||||
struct ClusterTestData {
|
||||
uint8_t ClusterSizeX;
|
||||
uint8_t ClusterSizeY;
|
||||
std::vector<int64_t> index_map_x;
|
||||
std::vector<int64_t> index_map_y;
|
||||
};
|
||||
|
||||
TEST_CASE("Gain Map Calculation Index Map", "[.ClusterVector][.gain_map]") {
|
||||
|
||||
auto clustertestdata = GENERATE(
|
||||
ClusterTestData{3,
|
||||
3,
|
||||
{-1, 0, 1, -1, 0, 1, -1, 0, 1},
|
||||
{-1, -1, -1, 0, 0, 0, 1, 1, 1}},
|
||||
ClusterTestData{
|
||||
4,
|
||||
4,
|
||||
{-2, -1, 0, 1, -2, -1, 0, 1, -2, -1, 0, 1, -2, -1, 0, 1},
|
||||
{-2, -2, -2, -2, -1, -1, -1, -1, 0, 0, 0, 0, 1, 1, 1, 1}},
|
||||
ClusterTestData{2, 2, {-1, 0, -1, 0}, {-1, -1, 0, 0}},
|
||||
ClusterTestData{5,
|
||||
5,
|
||||
{-2, -1, 0, 1, 2, -2, -1, 0, 1, 2, -2, -1, 0,
|
||||
1, 2, -2, -1, 0, 1, 2, -2, -1, 0, 1, 2},
|
||||
{-2, -2, -2, -2, -2, -1, -1, -1, -1, -1, 0, 0, 0,
|
||||
0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2}});
|
||||
|
||||
uint8_t ClusterSizeX = clustertestdata.ClusterSizeX;
|
||||
uint8_t ClusterSizeY = clustertestdata.ClusterSizeY;
|
||||
|
||||
std::vector<int64_t> index_map_x(ClusterSizeX * ClusterSizeY);
|
||||
std::vector<int64_t> index_map_y(ClusterSizeX * ClusterSizeY);
|
||||
|
||||
int64_t index_cluster_center_x = ClusterSizeX / 2;
|
||||
int64_t index_cluster_center_y = ClusterSizeY / 2;
|
||||
|
||||
for (size_t j = 0; j < ClusterSizeX * ClusterSizeY; j++) {
|
||||
index_map_x[j] = j % ClusterSizeX - index_cluster_center_x;
|
||||
index_map_y[j] = j / ClusterSizeX - index_cluster_center_y;
|
||||
}
|
||||
|
||||
CHECK(index_map_x == clustertestdata.index_map_x);
|
||||
CHECK(index_map_y == clustertestdata.index_map_y);
|
||||
}
|
@ -21,7 +21,7 @@ FilePtr &FilePtr::operator=(FilePtr &&other) {
|
||||
|
||||
FILE *FilePtr::get() { return fp_; }
|
||||
|
||||
int64_t FilePtr::tell() {
|
||||
ssize_t FilePtr::tell() {
|
||||
auto pos = ftell(fp_);
|
||||
if (pos == -1)
|
||||
throw std::runtime_error(fmt::format("Error getting file position: {}", error_msg()));
|
||||
|
251
src/Fit.cpp
251
src/Fit.cpp
@ -34,6 +34,30 @@ NDArray<double, 1> pol1(NDView<double, 1> x, NDView<double, 1> par) {
|
||||
return y;
|
||||
}
|
||||
|
||||
double scurve(const double x, const double * par) {
|
||||
return (par[0] + par[1] * x) + 0.5 * (1 + erf((x - par[2]) / (sqrt(2) * par[3]))) * (par[4] + par[5] * (x - par[2]));
|
||||
}
|
||||
|
||||
NDArray<double, 1> scurve(NDView<double, 1> x, NDView<double, 1> par) {
|
||||
NDArray<double, 1> y({x.shape()}, 0);
|
||||
for (ssize_t i = 0; i < x.size(); i++) {
|
||||
y(i) = scurve(x(i), par.data());
|
||||
}
|
||||
return y;
|
||||
}
|
||||
|
||||
double scurve2(const double x, const double * par) {
|
||||
return (par[0] + par[1] * x) + 0.5 * (1 - erf((x - par[2]) / (sqrt(2) * par[3]))) * (par[4] + par[5] * (x - par[2]));
|
||||
}
|
||||
|
||||
NDArray<double, 1> scurve2(NDView<double, 1> x, NDView<double, 1> par) {
|
||||
NDArray<double, 1> y({x.shape()}, 0);
|
||||
for (ssize_t i = 0; i < x.size(); i++) {
|
||||
y(i) = scurve2(x(i), par.data());
|
||||
}
|
||||
return y;
|
||||
}
|
||||
|
||||
} // namespace func
|
||||
|
||||
NDArray<double, 1> fit_gaus(NDView<double, 1> x, NDView<double, 1> y) {
|
||||
@ -81,7 +105,7 @@ std::array<double, 3> gaus_init_par(const NDView<double, 1> x, const NDView<doub
|
||||
auto delta = x[1] - x[0];
|
||||
start_par[2] =
|
||||
std::count_if(y.begin(), y.end(),
|
||||
[e, delta](double val) { return val > *e / 2; }) *
|
||||
[e](double val) { return val > *e / 2; }) *
|
||||
delta / 2.35;
|
||||
|
||||
return start_par;
|
||||
@ -273,4 +297,229 @@ NDArray<double, 3> fit_pol1(NDView<double, 1> x, NDView<double, 3> y,
|
||||
return result;
|
||||
}
|
||||
|
||||
// ~~ S-CURVES ~~
|
||||
|
||||
// SCURVE --
|
||||
std::array<double, 6> scurve_init_par(const NDView<double, 1> x, const NDView<double, 1> y){
|
||||
// Estimate the initial parameters for the fit
|
||||
std::array<double, 6> start_par{0, 0, 0, 0, 0, 0};
|
||||
|
||||
auto ymax = std::max_element(y.begin(), y.end());
|
||||
auto ymin = std::min_element(y.begin(), y.end());
|
||||
start_par[4] = *ymin + (*ymax - *ymin) / 2;
|
||||
|
||||
// Find the first x where the corresponding y value is above the threshold (start_par[4])
|
||||
for (ssize_t i = 0; i < y.size(); ++i) {
|
||||
if (y[i] >= start_par[4]) {
|
||||
start_par[2] = x[i];
|
||||
break; // Exit the loop after finding the first valid x
|
||||
}
|
||||
}
|
||||
|
||||
start_par[3] = 2 * sqrt(start_par[2]);
|
||||
start_par[0] = 100;
|
||||
start_par[1] = 0.25;
|
||||
start_par[5] = 1;
|
||||
return start_par;
|
||||
}
|
||||
|
||||
// - No error
|
||||
NDArray<double, 1> fit_scurve(NDView<double, 1> x, NDView<double, 1> y) {
|
||||
NDArray<double, 1> result = scurve_init_par(x, y);
|
||||
lm_status_struct status;
|
||||
|
||||
lmcurve(result.size(), result.data(), x.size(), x.data(), y.data(),
|
||||
aare::func::scurve, &lm_control_double, &status);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
NDArray<double, 3> fit_scurve(NDView<double, 1> x, NDView<double, 3> y, int n_threads) {
|
||||
NDArray<double, 3> result({y.shape(0), y.shape(1), 6}, 0);
|
||||
|
||||
auto process = [&x, &y, &result](ssize_t first_row, ssize_t last_row) {
|
||||
for (ssize_t row = first_row; row < last_row; row++) {
|
||||
for (ssize_t col = 0; col < y.shape(1); col++) {
|
||||
NDView<double, 1> values(&y(row, col, 0), {y.shape(2)});
|
||||
auto res = fit_scurve(x, values);
|
||||
result(row, col, 0) = res(0);
|
||||
result(row, col, 1) = res(1);
|
||||
result(row, col, 2) = res(2);
|
||||
result(row, col, 3) = res(3);
|
||||
result(row, col, 4) = res(4);
|
||||
result(row, col, 5) = res(5);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
auto tasks = split_task(0, y.shape(0), n_threads);
|
||||
RunInParallel(process, tasks);
|
||||
return result;
|
||||
}
|
||||
|
||||
// - Error
|
||||
void fit_scurve(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
|
||||
NDView<double, 1> par_out, NDView<double, 1> par_err_out, double& chi2) {
|
||||
|
||||
// Check that we have the correct sizes
|
||||
if (y.size() != x.size() || y.size() != y_err.size() ||
|
||||
par_out.size() != 6 || par_err_out.size() != 6) {
|
||||
throw std::runtime_error("Data, x, data_err must have the same size "
|
||||
"and par_out, par_err_out must have size 6");
|
||||
}
|
||||
|
||||
lm_status_struct status;
|
||||
par_out = scurve_init_par(x, y);
|
||||
std::array<double, 36> cov = {0}; // size 6x6
|
||||
// std::array<double, 4> cov{0, 0, 0, 0};
|
||||
|
||||
lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(),
|
||||
x.size(), x.data(), y.data(), y_err.data(), aare::func::scurve,
|
||||
&lm_control_double, &status);
|
||||
|
||||
// Calculate chi2
|
||||
chi2 = 0;
|
||||
for (ssize_t i = 0; i < y.size(); i++) {
|
||||
chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2);
|
||||
}
|
||||
}
|
||||
|
||||
void fit_scurve(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
|
||||
NDView<double, 3> par_out, NDView<double, 3> par_err_out, NDView<double, 2> chi2_out,
|
||||
int n_threads) {
|
||||
|
||||
auto process = [&](ssize_t first_row, ssize_t last_row) {
|
||||
for (ssize_t row = first_row; row < last_row; row++) {
|
||||
for (ssize_t col = 0; col < y.shape(1); col++) {
|
||||
NDView<double, 1> y_view(&y(row, col, 0), {y.shape(2)});
|
||||
NDView<double, 1> y_err_view(&y_err(row, col, 0),
|
||||
{y_err.shape(2)});
|
||||
NDView<double, 1> par_out_view(&par_out(row, col, 0),
|
||||
{par_out.shape(2)});
|
||||
NDView<double, 1> par_err_out_view(&par_err_out(row, col, 0),
|
||||
{par_err_out.shape(2)});
|
||||
|
||||
fit_scurve(x, y_view, y_err_view, par_out_view, par_err_out_view, chi2_out(row, col));
|
||||
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
auto tasks = split_task(0, y.shape(0), n_threads);
|
||||
RunInParallel(process, tasks);
|
||||
|
||||
}
|
||||
|
||||
// SCURVE2 ---
|
||||
|
||||
std::array<double, 6> scurve2_init_par(const NDView<double, 1> x, const NDView<double, 1> y){
|
||||
// Estimate the initial parameters for the fit
|
||||
std::array<double, 6> start_par{0, 0, 0, 0, 0, 0};
|
||||
|
||||
auto ymax = std::max_element(y.begin(), y.end());
|
||||
auto ymin = std::min_element(y.begin(), y.end());
|
||||
start_par[4] = *ymin + (*ymax - *ymin) / 2;
|
||||
|
||||
// Find the first x where the corresponding y value is above the threshold (start_par[4])
|
||||
for (ssize_t i = 0; i < y.size(); ++i) {
|
||||
if (y[i] <= start_par[4]) {
|
||||
start_par[2] = x[i];
|
||||
break; // Exit the loop after finding the first valid x
|
||||
}
|
||||
}
|
||||
|
||||
start_par[3] = 2 * sqrt(start_par[2]);
|
||||
start_par[0] = 100;
|
||||
start_par[1] = 0.25;
|
||||
start_par[5] = -1;
|
||||
return start_par;
|
||||
}
|
||||
|
||||
// - No error
|
||||
NDArray<double, 1> fit_scurve2(NDView<double, 1> x, NDView<double, 1> y) {
|
||||
NDArray<double, 1> result = scurve2_init_par(x, y);
|
||||
lm_status_struct status;
|
||||
|
||||
lmcurve(result.size(), result.data(), x.size(), x.data(), y.data(),
|
||||
aare::func::scurve2, &lm_control_double, &status);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
NDArray<double, 3> fit_scurve2(NDView<double, 1> x, NDView<double, 3> y, int n_threads) {
|
||||
NDArray<double, 3> result({y.shape(0), y.shape(1), 6}, 0);
|
||||
|
||||
auto process = [&x, &y, &result](ssize_t first_row, ssize_t last_row) {
|
||||
for (ssize_t row = first_row; row < last_row; row++) {
|
||||
for (ssize_t col = 0; col < y.shape(1); col++) {
|
||||
NDView<double, 1> values(&y(row, col, 0), {y.shape(2)});
|
||||
auto res = fit_scurve2(x, values);
|
||||
result(row, col, 0) = res(0);
|
||||
result(row, col, 1) = res(1);
|
||||
result(row, col, 2) = res(2);
|
||||
result(row, col, 3) = res(3);
|
||||
result(row, col, 4) = res(4);
|
||||
result(row, col, 5) = res(5);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
auto tasks = split_task(0, y.shape(0), n_threads);
|
||||
RunInParallel(process, tasks);
|
||||
return result;
|
||||
}
|
||||
|
||||
// - Error
|
||||
void fit_scurve2(NDView<double, 1> x, NDView<double, 1> y, NDView<double, 1> y_err,
|
||||
NDView<double, 1> par_out, NDView<double, 1> par_err_out, double& chi2) {
|
||||
|
||||
// Check that we have the correct sizes
|
||||
if (y.size() != x.size() || y.size() != y_err.size() ||
|
||||
par_out.size() != 6 || par_err_out.size() != 6) {
|
||||
throw std::runtime_error("Data, x, data_err must have the same size "
|
||||
"and par_out, par_err_out must have size 6");
|
||||
}
|
||||
|
||||
lm_status_struct status;
|
||||
par_out = scurve2_init_par(x, y);
|
||||
std::array<double, 36> cov = {0}; // size 6x6
|
||||
// std::array<double, 4> cov{0, 0, 0, 0};
|
||||
|
||||
lmcurve2(par_out.size(), par_out.data(), par_err_out.data(), cov.data(),
|
||||
x.size(), x.data(), y.data(), y_err.data(), aare::func::scurve2,
|
||||
&lm_control_double, &status);
|
||||
|
||||
// Calculate chi2
|
||||
chi2 = 0;
|
||||
for (ssize_t i = 0; i < y.size(); i++) {
|
||||
chi2 += std::pow((y(i) - func::pol1(x(i), par_out.data())) / y_err(i), 2);
|
||||
}
|
||||
}
|
||||
|
||||
void fit_scurve2(NDView<double, 1> x, NDView<double, 3> y, NDView<double, 3> y_err,
|
||||
NDView<double, 3> par_out, NDView<double, 3> par_err_out, NDView<double, 2> chi2_out,
|
||||
int n_threads) {
|
||||
|
||||
auto process = [&](ssize_t first_row, ssize_t last_row) {
|
||||
for (ssize_t row = first_row; row < last_row; row++) {
|
||||
for (ssize_t col = 0; col < y.shape(1); col++) {
|
||||
NDView<double, 1> y_view(&y(row, col, 0), {y.shape(2)});
|
||||
NDView<double, 1> y_err_view(&y_err(row, col, 0),
|
||||
{y_err.shape(2)});
|
||||
NDView<double, 1> par_out_view(&par_out(row, col, 0),
|
||||
{par_out.shape(2)});
|
||||
NDView<double, 1> par_err_out_view(&par_err_out(row, col, 0),
|
||||
{par_err_out.shape(2)});
|
||||
|
||||
fit_scurve2(x, y_view, y_err_view, par_out_view, par_err_out_view, chi2_out(row, col));
|
||||
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
auto tasks = split_task(0, y.shape(0), n_threads);
|
||||
RunInParallel(process, tasks);
|
||||
|
||||
}
|
||||
|
||||
} // namespace aare
|
@ -1,11 +1,11 @@
|
||||
#include "aare/Interpolator.hpp"
|
||||
#include "aare/algorithm.hpp"
|
||||
|
||||
namespace aare {
|
||||
|
||||
Interpolator::Interpolator(NDView<double, 3> etacube, NDView<double, 1> xbins,
|
||||
NDView<double, 1> ybins, NDView<double, 1> ebins)
|
||||
: m_ietax(etacube), m_ietay(etacube), m_etabinsx(xbins), m_etabinsy(ybins), m_energy_bins(ebins) {
|
||||
: m_ietax(etacube), m_ietay(etacube), m_etabinsx(xbins), m_etabinsy(ybins),
|
||||
m_energy_bins(ebins) {
|
||||
if (etacube.shape(0) != xbins.size() || etacube.shape(1) != ybins.size() ||
|
||||
etacube.shape(2) != ebins.size()) {
|
||||
throw std::invalid_argument(
|
||||
@ -53,87 +53,4 @@ Interpolator::Interpolator(NDView<double, 3> etacube, NDView<double, 1> xbins,
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<Photon> Interpolator::interpolate(const ClusterVector<int32_t>& clusters) {
|
||||
std::vector<Photon> photons;
|
||||
photons.reserve(clusters.size());
|
||||
|
||||
if (clusters.cluster_size_x() == 3 || clusters.cluster_size_y() == 3) {
|
||||
for (size_t i = 0; i<clusters.size(); i++){
|
||||
|
||||
auto cluster = clusters.at<Cluster3x3>(i);
|
||||
Eta2 eta= calculate_eta2(cluster);
|
||||
|
||||
Photon photon;
|
||||
photon.x = cluster.x;
|
||||
photon.y = cluster.y;
|
||||
photon.energy = eta.sum;
|
||||
|
||||
|
||||
//Finding the index of the last element that is smaller
|
||||
//should work fine as long as we have many bins
|
||||
auto ie = last_smaller(m_energy_bins, photon.energy);
|
||||
auto ix = last_smaller(m_etabinsx, eta.x);
|
||||
auto iy = last_smaller(m_etabinsy, eta.y);
|
||||
|
||||
double dX{}, dY{};
|
||||
// cBottomLeft = 0,
|
||||
// cBottomRight = 1,
|
||||
// cTopLeft = 2,
|
||||
// cTopRight = 3
|
||||
switch (eta.c) {
|
||||
case cTopLeft:
|
||||
dX = -1.;
|
||||
dY = 0.;
|
||||
break;
|
||||
case cTopRight:;
|
||||
dX = 0.;
|
||||
dY = 0.;
|
||||
break;
|
||||
case cBottomLeft:
|
||||
dX = -1.;
|
||||
dY = -1.;
|
||||
break;
|
||||
case cBottomRight:
|
||||
dX = 0.;
|
||||
dY = -1.;
|
||||
break;
|
||||
}
|
||||
photon.x += m_ietax(ix, iy, ie)*2 + dX;
|
||||
photon.y += m_ietay(ix, iy, ie)*2 + dY;
|
||||
photons.push_back(photon);
|
||||
}
|
||||
}else if(clusters.cluster_size_x() == 2 || clusters.cluster_size_y() == 2){
|
||||
for (size_t i = 0; i<clusters.size(); i++){
|
||||
auto cluster = clusters.at<Cluster2x2>(i);
|
||||
Eta2 eta= calculate_eta2(cluster);
|
||||
|
||||
Photon photon;
|
||||
photon.x = cluster.x;
|
||||
photon.y = cluster.y;
|
||||
photon.energy = eta.sum;
|
||||
|
||||
//Now do some actual interpolation.
|
||||
//Find which energy bin the cluster is in
|
||||
// auto ie = nearest_index(m_energy_bins, photon.energy)-1;
|
||||
// auto ix = nearest_index(m_etabinsx, eta.x)-1;
|
||||
// auto iy = nearest_index(m_etabinsy, eta.y)-1;
|
||||
//Finding the index of the last element that is smaller
|
||||
//should work fine as long as we have many bins
|
||||
auto ie = last_smaller(m_energy_bins, photon.energy);
|
||||
auto ix = last_smaller(m_etabinsx, eta.x);
|
||||
auto iy = last_smaller(m_etabinsy, eta.y);
|
||||
|
||||
photon.x += m_ietax(ix, iy, ie)*2; //eta goes between 0 and 1 but we could move the hit anywhere in the 2x2
|
||||
photon.y += m_ietay(ix, iy, ie)*2;
|
||||
photons.push_back(photon);
|
||||
}
|
||||
|
||||
}else{
|
||||
throw std::runtime_error("Only 3x3 and 2x2 clusters are supported for interpolation");
|
||||
}
|
||||
|
||||
|
||||
return photons;
|
||||
}
|
||||
|
||||
} // namespace aare
|
@ -89,7 +89,7 @@ void JungfrauDataFile::seek(size_t frame_index) {
|
||||
: frame_index;
|
||||
auto byte_offset = frame_offset * (m_bytes_per_frame + header_size);
|
||||
m_fp.seek(byte_offset);
|
||||
};
|
||||
}
|
||||
|
||||
size_t JungfrauDataFile::tell() { return m_current_frame_index; }
|
||||
size_t JungfrauDataFile::total_frames() const { return m_total_frames; }
|
||||
@ -235,4 +235,4 @@ std::filesystem::path JungfrauDataFile::fpath(size_t file_index) const {
|
||||
return m_path / fname;
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
} // namespace aare
|
||||
|
@ -44,9 +44,9 @@ TEST_CASE("3D NDArray from NDView"){
|
||||
REQUIRE(image.size() == view.size());
|
||||
REQUIRE(image.data() != view.data());
|
||||
|
||||
for(int64_t i=0; i<image.shape(0); i++){
|
||||
for(int64_t j=0; j<image.shape(1); j++){
|
||||
for(int64_t k=0; k<image.shape(2); k++){
|
||||
for(ssize_t i=0; i<image.shape(0); i++){
|
||||
for(ssize_t j=0; j<image.shape(1); j++){
|
||||
for(ssize_t k=0; k<image.shape(2); k++){
|
||||
REQUIRE(image(i, j, k) == view(i, j, k));
|
||||
}
|
||||
}
|
||||
@ -54,7 +54,7 @@ TEST_CASE("3D NDArray from NDView"){
|
||||
}
|
||||
|
||||
TEST_CASE("1D image") {
|
||||
std::array<int64_t, 1> shape{{20}};
|
||||
std::array<ssize_t, 1> shape{{20}};
|
||||
NDArray<short, 1> img(shape, 3);
|
||||
REQUIRE(img.size() == 20);
|
||||
REQUIRE(img(5) == 3);
|
||||
@ -71,7 +71,7 @@ TEST_CASE("Accessing a const object") {
|
||||
}
|
||||
|
||||
TEST_CASE("Indexing of a 2D image") {
|
||||
std::array<int64_t, 2> shape{{3, 7}};
|
||||
std::array<ssize_t, 2> shape{{3, 7}};
|
||||
NDArray<long> img(shape, 5);
|
||||
for (uint32_t i = 0; i != img.size(); ++i) {
|
||||
REQUIRE(img(i) == 5);
|
||||
@ -114,7 +114,7 @@ TEST_CASE("Divide double by int") {
|
||||
}
|
||||
|
||||
TEST_CASE("Elementwise multiplication of 3D image") {
|
||||
std::array<int64_t, 3> shape{3, 4, 2};
|
||||
std::array<ssize_t, 3> shape{3, 4, 2};
|
||||
NDArray<double, 3> a{shape};
|
||||
NDArray<double, 3> b{shape};
|
||||
for (uint32_t i = 0; i != a.size(); ++i) {
|
||||
@ -179,9 +179,9 @@ TEST_CASE("Compare two images") {
|
||||
}
|
||||
|
||||
TEST_CASE("Size and shape matches") {
|
||||
int64_t w = 15;
|
||||
int64_t h = 75;
|
||||
std::array<int64_t, 2> shape{w, h};
|
||||
ssize_t w = 15;
|
||||
ssize_t h = 75;
|
||||
std::array<ssize_t, 2> shape{w, h};
|
||||
NDArray<double> a{shape};
|
||||
REQUIRE(a.size() == w * h);
|
||||
REQUIRE(a.shape() == shape);
|
||||
@ -224,7 +224,7 @@ TEST_CASE("Bitwise and on data") {
|
||||
|
||||
|
||||
TEST_CASE("Elementwise operations on images") {
|
||||
std::array<int64_t, 2> shape{5, 5};
|
||||
std::array<ssize_t, 2> shape{5, 5};
|
||||
double a_val = 3.0;
|
||||
double b_val = 8.0;
|
||||
|
||||
|
@ -142,7 +142,7 @@ TEST_CASE("iterators") {
|
||||
// for (int i = 0; i != 12; ++i) {
|
||||
// vec.push_back(i);
|
||||
// }
|
||||
// std::vector<int64_t> shape{3, 4};
|
||||
// std::vector<ssize_t> shape{3, 4};
|
||||
// NDView<int, 2> data(vec.data(), shape);
|
||||
// }
|
||||
|
||||
@ -151,8 +151,8 @@ TEST_CASE("divide with another span") {
|
||||
std::vector<int> vec1{3, 2, 1};
|
||||
std::vector<int> result{3, 6, 3};
|
||||
|
||||
NDView<int, 1> data0(vec0.data(), Shape<1>{static_cast<int64_t>(vec0.size())});
|
||||
NDView<int, 1> data1(vec1.data(), Shape<1>{static_cast<int64_t>(vec1.size())});
|
||||
NDView<int, 1> data0(vec0.data(), Shape<1>{static_cast<ssize_t>(vec0.size())});
|
||||
NDView<int, 1> data1(vec1.data(), Shape<1>{static_cast<ssize_t>(vec1.size())});
|
||||
|
||||
data0 /= data1;
|
||||
|
||||
|
@ -72,8 +72,8 @@ void NumpyFile::get_frame_into(size_t frame_number, std::byte *image_buf) {
|
||||
}
|
||||
}
|
||||
|
||||
size_t NumpyFile::pixels_per_frame() { return m_pixels_per_frame; };
|
||||
size_t NumpyFile::bytes_per_frame() { return m_bytes_per_frame; };
|
||||
size_t NumpyFile::pixels_per_frame() { return m_pixels_per_frame; }
|
||||
size_t NumpyFile::bytes_per_frame() { return m_bytes_per_frame; }
|
||||
|
||||
std::vector<Frame> NumpyFile::read_n(size_t n_frames) {
|
||||
// TODO: implement this in a more efficient way
|
||||
@ -197,4 +197,4 @@ void NumpyFile::load_metadata() {
|
||||
m_header = {dtype, fortran_order, shape};
|
||||
}
|
||||
|
||||
} // namespace aare
|
||||
} // namespace aare
|
||||
|
146
src/RawFile.cpp
146
src/RawFile.cpp
@ -1,6 +1,8 @@
|
||||
#include "aare/RawFile.hpp"
|
||||
#include "aare/algorithm.hpp"
|
||||
#include "aare/PixelMap.hpp"
|
||||
#include "aare/defs.hpp"
|
||||
#include "aare/logger.hpp"
|
||||
#include "aare/geo_helpers.hpp"
|
||||
|
||||
#include <fmt/format.h>
|
||||
@ -14,27 +16,18 @@ RawFile::RawFile(const std::filesystem::path &fname, const std::string &mode)
|
||||
: m_master(fname) {
|
||||
m_mode = mode;
|
||||
if (mode == "r") {
|
||||
|
||||
n_subfiles = find_number_of_subfiles(); // f0,f1...fn
|
||||
n_subfile_parts =
|
||||
m_master.geometry().col * m_master.geometry().row; // d0,d1...dn
|
||||
|
||||
|
||||
|
||||
find_geometry();
|
||||
|
||||
if (m_master.roi()){
|
||||
m_geometry = update_geometry_with_roi(m_geometry, m_master.roi().value());
|
||||
}
|
||||
|
||||
open_subfiles();
|
||||
} else {
|
||||
throw std::runtime_error(LOCATION +
|
||||
"Unsupported mode. Can only read RawFiles.");
|
||||
" Unsupported mode. Can only read RawFiles.");
|
||||
}
|
||||
}
|
||||
|
||||
Frame RawFile::read_frame() { return get_frame(m_current_frame++); };
|
||||
Frame RawFile::read_frame() { return get_frame(m_current_frame++); }
|
||||
|
||||
Frame RawFile::read_frame(size_t frame_number) {
|
||||
seek(frame_number);
|
||||
@ -52,13 +45,13 @@ void RawFile::read_into(std::byte *image_buf, size_t n_frames) {
|
||||
|
||||
void RawFile::read_into(std::byte *image_buf) {
|
||||
return get_frame_into(m_current_frame++, image_buf);
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
void RawFile::read_into(std::byte *image_buf, DetectorHeader *header) {
|
||||
|
||||
return get_frame_into(m_current_frame++, image_buf, header);
|
||||
};
|
||||
}
|
||||
|
||||
void RawFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header) {
|
||||
// return get_frame_into(m_current_frame++, image_buf, header);
|
||||
@ -67,12 +60,12 @@ void RawFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *h
|
||||
this->get_frame_into(m_current_frame++, image_buf, header);
|
||||
image_buf += bytes_per_frame();
|
||||
if(header)
|
||||
header+=n_mod();
|
||||
header+=n_modules();
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
size_t RawFile::n_mod() const { return n_subfile_parts; }
|
||||
size_t RawFile::n_modules() const { return m_master.n_modules(); }
|
||||
|
||||
|
||||
size_t RawFile::bytes_per_frame() {
|
||||
@ -94,9 +87,9 @@ void RawFile::seek(size_t frame_index) {
|
||||
frame_index, total_frames()));
|
||||
}
|
||||
m_current_frame = frame_index;
|
||||
};
|
||||
}
|
||||
|
||||
size_t RawFile::tell() { return m_current_frame; };
|
||||
size_t RawFile::tell() { return m_current_frame; }
|
||||
|
||||
size_t RawFile::total_frames() const { return m_master.frames_in_file(); }
|
||||
size_t RawFile::rows() const { return m_geometry.pixels_y; }
|
||||
@ -106,17 +99,11 @@ xy RawFile::geometry() { return m_master.geometry(); }
|
||||
|
||||
void RawFile::open_subfiles() {
|
||||
if (m_mode == "r")
|
||||
for (size_t i = 0; i != n_subfiles; ++i) {
|
||||
auto v = std::vector<RawSubFile *>(n_subfile_parts);
|
||||
for (size_t j = 0; j != n_subfile_parts; ++j) {
|
||||
auto pos = m_geometry.module_pixel_0[j];
|
||||
v[j] = new RawSubFile(m_master.data_fname(j, i),
|
||||
m_master.detector_type(), pos.height,
|
||||
pos.width, m_master.bitdepth(),
|
||||
pos.row_index, pos.col_index);
|
||||
|
||||
}
|
||||
subfiles.push_back(v);
|
||||
for (size_t i = 0; i != n_modules(); ++i) {
|
||||
auto pos = m_geometry.module_pixel_0[i];
|
||||
m_subfiles.emplace_back(std::make_unique<RawSubFile>(
|
||||
m_master.data_fname(i, 0), m_master.detector_type(), pos.height,
|
||||
pos.width, m_master.bitdepth(), pos.row_index, pos.col_index));
|
||||
}
|
||||
else {
|
||||
throw std::runtime_error(LOCATION +
|
||||
@ -141,18 +128,6 @@ DetectorHeader RawFile::read_header(const std::filesystem::path &fname) {
|
||||
return h;
|
||||
}
|
||||
|
||||
int RawFile::find_number_of_subfiles() {
|
||||
int n_files = 0;
|
||||
// f0,f1...fn How many files is the data split into?
|
||||
while (std::filesystem::exists(m_master.data_fname(0, n_files)))
|
||||
n_files++; // increment after test
|
||||
|
||||
#ifdef AARE_VERBOSE
|
||||
fmt::print("Found: {} subfiles\n", n_files);
|
||||
#endif
|
||||
return n_files;
|
||||
|
||||
}
|
||||
|
||||
RawMasterFile RawFile::master() const { return m_master; }
|
||||
|
||||
@ -168,7 +143,7 @@ void RawFile::find_geometry() {
|
||||
uint16_t c{};
|
||||
|
||||
|
||||
for (size_t i = 0; i < n_subfile_parts; i++) {
|
||||
for (size_t i = 0; i < n_modules(); i++) {
|
||||
auto h = read_header(m_master.data_fname(i, 0));
|
||||
r = std::max(r, h.row);
|
||||
c = std::max(c, h.column);
|
||||
@ -210,70 +185,58 @@ size_t RawFile::bytes_per_pixel() const {
|
||||
}
|
||||
|
||||
void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, DetectorHeader *header) {
|
||||
LOG(logDEBUG) << "RawFile::get_frame_into(" << frame_index << ")";
|
||||
if (frame_index >= total_frames()) {
|
||||
throw std::runtime_error(LOCATION + "Frame number out of range");
|
||||
}
|
||||
std::vector<size_t> frame_numbers(n_subfile_parts);
|
||||
std::vector<size_t> frame_indices(n_subfile_parts, frame_index);
|
||||
std::vector<size_t> frame_numbers(n_modules());
|
||||
std::vector<size_t> frame_indices(n_modules(), frame_index);
|
||||
|
||||
|
||||
// sync the frame numbers
|
||||
|
||||
if (n_subfile_parts != 1) {
|
||||
for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) {
|
||||
auto subfile_id = frame_index / m_master.max_frames_per_file();
|
||||
if (subfile_id >= subfiles.size()) {
|
||||
throw std::runtime_error(LOCATION +
|
||||
" Subfile out of range. Possible missing data.");
|
||||
}
|
||||
frame_numbers[part_idx] =
|
||||
subfiles[subfile_id][part_idx]->frame_number(
|
||||
frame_index % m_master.max_frames_per_file());
|
||||
if (n_modules() != 1) { //if we have more than one module
|
||||
for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) {
|
||||
frame_numbers[part_idx] = m_subfiles[part_idx]->frame_number(frame_index);
|
||||
}
|
||||
|
||||
// 1. if frame number vector is the same break
|
||||
while (std::adjacent_find(frame_numbers.begin(), frame_numbers.end(),
|
||||
std::not_equal_to<>()) !=
|
||||
frame_numbers.end()) {
|
||||
while (!all_equal(frame_numbers)) {
|
||||
|
||||
// 2. find the index of the minimum frame number,
|
||||
auto min_frame_idx = std::distance(
|
||||
frame_numbers.begin(),
|
||||
std::min_element(frame_numbers.begin(), frame_numbers.end()));
|
||||
|
||||
// 3. increase its index and update its respective frame number
|
||||
frame_indices[min_frame_idx]++;
|
||||
|
||||
// 4. if we can't increase its index => throw error
|
||||
if (frame_indices[min_frame_idx] >= total_frames()) {
|
||||
throw std::runtime_error(LOCATION +
|
||||
"Frame number out of range");
|
||||
}
|
||||
auto subfile_id =
|
||||
frame_indices[min_frame_idx] / m_master.max_frames_per_file();
|
||||
|
||||
frame_numbers[min_frame_idx] =
|
||||
subfiles[subfile_id][min_frame_idx]->frame_number(
|
||||
frame_indices[min_frame_idx] %
|
||||
m_master.max_frames_per_file());
|
||||
m_subfiles[min_frame_idx]->frame_number(frame_indices[min_frame_idx]);
|
||||
}
|
||||
}
|
||||
|
||||
if (m_master.geometry().col == 1) {
|
||||
// get the part from each subfile and copy it to the frame
|
||||
for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) {
|
||||
for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) {
|
||||
auto corrected_idx = frame_indices[part_idx];
|
||||
auto subfile_id = corrected_idx / m_master.max_frames_per_file();
|
||||
if (subfile_id >= subfiles.size()) {
|
||||
throw std::runtime_error(LOCATION +
|
||||
" Subfile out of range. Possible missing data.");
|
||||
}
|
||||
|
||||
|
||||
// This is where we start writing
|
||||
auto offset = (m_geometry.module_pixel_0[part_idx].origin_y * m_geometry.pixels_x +
|
||||
m_geometry.module_pixel_0[part_idx].origin_x)*m_master.bitdepth()/8;
|
||||
|
||||
if (m_geometry.module_pixel_0[part_idx].origin_x!=0)
|
||||
throw std::runtime_error(LOCATION + "Implementation error. x pos not 0.");
|
||||
throw std::runtime_error(LOCATION + " Implementation error. x pos not 0.");
|
||||
|
||||
//TODO! Risk for out of range access
|
||||
subfiles[subfile_id][part_idx]->seek(corrected_idx % m_master.max_frames_per_file());
|
||||
subfiles[subfile_id][part_idx]->read_into(frame_buffer + offset, header);
|
||||
//TODO! What if the files don't match?
|
||||
m_subfiles[part_idx]->seek(corrected_idx);
|
||||
m_subfiles[part_idx]->read_into(frame_buffer + offset, header);
|
||||
if (header)
|
||||
++header;
|
||||
}
|
||||
@ -282,26 +245,21 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect
|
||||
//TODO! should we read row by row?
|
||||
|
||||
// create a buffer large enough to hold a full module
|
||||
|
||||
auto bytes_per_part = m_master.pixels_y() * m_master.pixels_x() *
|
||||
m_master.bitdepth() /
|
||||
8; // TODO! replace with image_size_in_bytes
|
||||
|
||||
auto *part_buffer = new std::byte[bytes_per_part];
|
||||
|
||||
// TODO! if we have many submodules we should reorder them on the module
|
||||
// level
|
||||
|
||||
for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) {
|
||||
for (size_t part_idx = 0; part_idx != n_modules(); ++part_idx) {
|
||||
auto pos = m_geometry.module_pixel_0[part_idx];
|
||||
auto corrected_idx = frame_indices[part_idx];
|
||||
auto subfile_id = corrected_idx / m_master.max_frames_per_file();
|
||||
if (subfile_id >= subfiles.size()) {
|
||||
throw std::runtime_error(LOCATION +
|
||||
" Subfile out of range. Possible missing data.");
|
||||
}
|
||||
|
||||
subfiles[subfile_id][part_idx]->seek(corrected_idx % m_master.max_frames_per_file());
|
||||
subfiles[subfile_id][part_idx]->read_into(part_buffer, header);
|
||||
m_subfiles[part_idx]->seek(corrected_idx);
|
||||
m_subfiles[part_idx]->read_into(part_buffer, header);
|
||||
if(header)
|
||||
++header;
|
||||
|
||||
@ -321,6 +279,7 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect
|
||||
}
|
||||
delete[] part_buffer;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
std::vector<Frame> RawFile::read_n(size_t n_frames) {
|
||||
@ -337,27 +296,8 @@ size_t RawFile::frame_number(size_t frame_index) {
|
||||
if (frame_index >= m_master.frames_in_file()) {
|
||||
throw std::runtime_error(LOCATION + " Frame number out of range");
|
||||
}
|
||||
size_t subfile_id = frame_index / m_master.max_frames_per_file();
|
||||
if (subfile_id >= subfiles.size()) {
|
||||
throw std::runtime_error(
|
||||
LOCATION + " Subfile out of range. Possible missing data.");
|
||||
}
|
||||
return subfiles[subfile_id][0]->frame_number(
|
||||
frame_index % m_master.max_frames_per_file());
|
||||
}
|
||||
|
||||
RawFile::~RawFile() {
|
||||
|
||||
// TODO! Fix this, for file closing
|
||||
for (auto &vec : subfiles) {
|
||||
for (auto *subfile : vec) {
|
||||
delete subfile;
|
||||
}
|
||||
}
|
||||
return m_subfiles[0]->frame_number(frame_index);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
} // namespace aare
|
||||
} // namespace aare
|
||||
|
@ -99,11 +99,11 @@ TEST_CASE("Read frame numbers from a raw file", "[.integration]") {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Compare reading from a numpy file with a raw file", "[.integration]") {
|
||||
auto fpath_raw = test_data_path() / "jungfrau" / "jungfrau_single_master_0.json";
|
||||
TEST_CASE("Compare reading from a numpy file with a raw file", "[.files]") {
|
||||
auto fpath_raw = test_data_path() / "raw/jungfrau" / "jungfrau_single_master_0.json";
|
||||
REQUIRE(std::filesystem::exists(fpath_raw));
|
||||
|
||||
auto fpath_npy = test_data_path() / "jungfrau" / "jungfrau_single_0.npy";
|
||||
auto fpath_npy = test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy";
|
||||
REQUIRE(std::filesystem::exists(fpath_npy));
|
||||
|
||||
File raw(fpath_raw, "r");
|
||||
@ -113,6 +113,7 @@ TEST_CASE("Compare reading from a numpy file with a raw file", "[.integration]")
|
||||
CHECK(npy.total_frames() == 10);
|
||||
|
||||
for (size_t i = 0; i < 10; ++i) {
|
||||
CHECK(raw.tell() == i);
|
||||
auto raw_frame = raw.read_frame();
|
||||
auto npy_frame = npy.read_frame();
|
||||
CHECK((raw_frame.view<uint16_t>() == npy_frame.view<uint16_t>()));
|
||||
|
@ -87,7 +87,7 @@ int ScanParameters::start() const { return m_start; }
|
||||
int ScanParameters::stop() const { return m_stop; }
|
||||
void ScanParameters::increment_stop(){
|
||||
m_stop += 1;
|
||||
};
|
||||
}
|
||||
int ScanParameters::step() const { return m_step; }
|
||||
const std::string &ScanParameters::dac() const { return m_dac; }
|
||||
bool ScanParameters::enabled() const { return m_enabled; }
|
||||
@ -140,6 +140,10 @@ std::optional<size_t> RawMasterFile::number_of_rows() const {
|
||||
|
||||
xy RawMasterFile::geometry() const { return m_geometry; }
|
||||
|
||||
size_t RawMasterFile::n_modules() const {
|
||||
return m_geometry.row * m_geometry.col;
|
||||
}
|
||||
|
||||
std::optional<uint8_t> RawMasterFile::quad() const { return m_quad; }
|
||||
|
||||
// optional values, these may or may not be present in the master file
|
||||
@ -417,4 +421,4 @@ void RawMasterFile::parse_raw(const std::filesystem::path &fpath) {
|
||||
if(m_frames_in_file==0)
|
||||
m_frames_in_file = m_total_frames_expected;
|
||||
}
|
||||
} // namespace aare
|
||||
} // namespace aare
|
||||
|
@ -1,9 +1,15 @@
|
||||
#include "aare/RawSubFile.hpp"
|
||||
#include "aare/PixelMap.hpp"
|
||||
#include "aare/algorithm.hpp"
|
||||
#include "aare/utils/ifstream_helpers.hpp"
|
||||
#include "aare/logger.hpp"
|
||||
|
||||
|
||||
#include <cstring> // memcpy
|
||||
#include <fmt/core.h>
|
||||
#include <iostream>
|
||||
#include <regex>
|
||||
|
||||
|
||||
|
||||
|
||||
@ -12,51 +18,51 @@ namespace aare {
|
||||
RawSubFile::RawSubFile(const std::filesystem::path &fname,
|
||||
DetectorType detector, size_t rows, size_t cols,
|
||||
size_t bitdepth, uint32_t pos_row, uint32_t pos_col)
|
||||
: m_detector_type(detector), m_bitdepth(bitdepth), m_fname(fname),
|
||||
: m_detector_type(detector), m_bitdepth(bitdepth),
|
||||
m_rows(rows), m_cols(cols),
|
||||
m_bytes_per_frame((m_bitdepth / 8) * m_rows * m_cols), m_pos_row(pos_row),
|
||||
m_pos_col(pos_col) {
|
||||
|
||||
LOG(logDEBUG) << "RawSubFile::RawSubFile()";
|
||||
if (m_detector_type == DetectorType::Moench03_old) {
|
||||
m_pixel_map = GenerateMoench03PixelMap();
|
||||
} else if (m_detector_type == DetectorType::Eiger && m_pos_row % 2 == 0) {
|
||||
m_pixel_map = GenerateEigerFlipRowsPixelMap();
|
||||
}
|
||||
|
||||
if (std::filesystem::exists(fname)) {
|
||||
m_num_frames = std::filesystem::file_size(fname) /
|
||||
(sizeof(DetectorHeader) + rows * cols * bitdepth / 8);
|
||||
} else {
|
||||
throw std::runtime_error(
|
||||
LOCATION + fmt::format("File {} does not exist", m_fname.string()));
|
||||
}
|
||||
|
||||
// fp = fopen(m_fname.string().c_str(), "rb");
|
||||
m_file.open(m_fname, std::ios::binary);
|
||||
if (!m_file.is_open()) {
|
||||
throw std::runtime_error(
|
||||
LOCATION + fmt::format("Could not open file {}", m_fname.string()));
|
||||
}
|
||||
|
||||
#ifdef AARE_VERBOSE
|
||||
fmt::print("Opened file: {} with {} frames\n", m_fname.string(), m_num_frames);
|
||||
fmt::print("m_rows: {}, m_cols: {}, m_bitdepth: {}\n", m_rows, m_cols,
|
||||
m_bitdepth);
|
||||
fmt::print("file size: {}\n", std::filesystem::file_size(fname));
|
||||
#endif
|
||||
parse_fname(fname);
|
||||
scan_files();
|
||||
open_file(m_current_file_index); // open the first file
|
||||
}
|
||||
|
||||
void RawSubFile::seek(size_t frame_index) {
|
||||
if (frame_index >= m_num_frames) {
|
||||
throw std::runtime_error(LOCATION + fmt::format("Frame index {} out of range in a file with {} frames", frame_index, m_num_frames));
|
||||
LOG(logDEBUG) << "RawSubFile::seek(" << frame_index << ")";
|
||||
if (frame_index >= m_total_frames) {
|
||||
throw std::runtime_error(LOCATION + " Frame index out of range: " +
|
||||
std::to_string(frame_index));
|
||||
}
|
||||
m_file.seekg((sizeof(DetectorHeader) + bytes_per_frame()) * frame_index);
|
||||
m_current_frame_index = frame_index;
|
||||
auto file_index = first_larger(m_last_frame_in_file, frame_index);
|
||||
|
||||
if (file_index != m_current_file_index)
|
||||
open_file(file_index);
|
||||
|
||||
auto frame_offset = (file_index)
|
||||
? frame_index - m_last_frame_in_file[file_index - 1]
|
||||
: frame_index;
|
||||
auto byte_offset = frame_offset * (m_bytes_per_frame + sizeof(DetectorHeader));
|
||||
m_file.seekg(byte_offset);
|
||||
}
|
||||
|
||||
size_t RawSubFile::tell() {
|
||||
return m_file.tellg() / (sizeof(DetectorHeader) + bytes_per_frame());
|
||||
LOG(logDEBUG) << "RawSubFile::tell():" << m_current_frame_index;
|
||||
return m_current_frame_index;
|
||||
}
|
||||
|
||||
void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) {
|
||||
LOG(logDEBUG) << "RawSubFile::read_into()";
|
||||
|
||||
if (header) {
|
||||
m_file.read(reinterpret_cast<char *>(header), sizeof(DetectorHeader));
|
||||
} else {
|
||||
@ -90,6 +96,13 @@ void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) {
|
||||
if (m_file.fail()){
|
||||
throw std::runtime_error(LOCATION + ifstream_error_msg(m_file));
|
||||
}
|
||||
|
||||
++ m_current_frame_index;
|
||||
if (m_current_frame_index >= m_last_frame_in_file[m_current_file_index] &&
|
||||
(m_current_frame_index < m_total_frames)) {
|
||||
++m_current_file_index;
|
||||
open_file(m_current_file_index);
|
||||
}
|
||||
}
|
||||
|
||||
void RawSubFile::read_into(std::byte *image_buf, size_t n_frames, DetectorHeader *header) {
|
||||
@ -130,4 +143,69 @@ size_t RawSubFile::frame_number(size_t frame_index) {
|
||||
return h.frameNumber;
|
||||
}
|
||||
|
||||
void RawSubFile::parse_fname(const std::filesystem::path &fname) {
|
||||
LOG(logDEBUG) << "RawSubFile::parse_fname()";
|
||||
// data has the format: /path/too/data/jungfrau_single_d0_f1_0.raw
|
||||
// d0 is the module index, will not change for this file
|
||||
// f1 is the file index - thi is the one we need
|
||||
// 0 is the measurement index, will not change
|
||||
m_path = fname.parent_path();
|
||||
m_base_name = fname.filename();
|
||||
|
||||
// Regex to extract numbers after 'd' and 'f'
|
||||
std::regex pattern(R"(^(.*_d)(\d+)(_f)(\d+)(_\d+\.raw)$)");
|
||||
std::smatch match;
|
||||
|
||||
if (std::regex_match(m_base_name, match, pattern)) {
|
||||
m_offset = std::stoi(match[4].str()); // find the first file index in case of a truncated series
|
||||
m_base_name = match[1].str() + match[2].str() + match[3].str() + "{}" + match[5].str();
|
||||
LOG(logDEBUG) << "Base name: " << m_base_name;
|
||||
LOG(logDEBUG) << "Offset: " << m_offset;
|
||||
LOG(logDEBUG) << "Path: " << m_path.string();
|
||||
} else {
|
||||
throw std::runtime_error(
|
||||
LOCATION + fmt::format("Could not parse file name {}", fname.string()));
|
||||
}
|
||||
}
|
||||
|
||||
std::filesystem::path RawSubFile::fpath(size_t file_index) const {
|
||||
auto fname = fmt::format(m_base_name, file_index);
|
||||
return m_path / fname;
|
||||
}
|
||||
|
||||
void RawSubFile::open_file(size_t file_index) {
|
||||
m_file.close();
|
||||
auto fname = fpath(file_index+m_offset);
|
||||
LOG(logDEBUG) << "RawSubFile::open_file(): " << fname.string();
|
||||
m_file.open(fname, std::ios::binary);
|
||||
if (!m_file.is_open()) {
|
||||
throw std::runtime_error(
|
||||
LOCATION + fmt::format("Could not open file {}", fpath(file_index).string()));
|
||||
}
|
||||
m_current_file_index = file_index;
|
||||
}
|
||||
|
||||
void RawSubFile::scan_files() {
|
||||
LOG(logDEBUG) << "RawSubFile::scan_files()";
|
||||
// find how many files we have and the number of frames in each file
|
||||
m_last_frame_in_file.clear();
|
||||
size_t file_index = m_offset;
|
||||
|
||||
while (std::filesystem::exists(fpath(file_index))) {
|
||||
auto n_frames = std::filesystem::file_size(fpath(file_index)) /
|
||||
(m_bytes_per_frame + sizeof(DetectorHeader));
|
||||
m_last_frame_in_file.push_back(n_frames);
|
||||
LOG(logDEBUG) << "Found: " << n_frames << " frames in file: " << fpath(file_index).string();
|
||||
++file_index;
|
||||
}
|
||||
|
||||
// find where we need to open the next file and total number of frames
|
||||
m_last_frame_in_file = cumsum(m_last_frame_in_file);
|
||||
if(m_last_frame_in_file.empty()){
|
||||
m_total_frames = 0;
|
||||
}else{
|
||||
m_total_frames = m_last_frame_in_file.back();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace aare
|
76
src/RawSubFile.test.cpp
Normal file
76
src/RawSubFile.test.cpp
Normal file
@ -0,0 +1,76 @@
|
||||
#include "aare/RawSubFile.hpp"
|
||||
#include "aare/File.hpp"
|
||||
#include "aare/NDArray.hpp"
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
#include "test_config.hpp"
|
||||
|
||||
using namespace aare;
|
||||
|
||||
TEST_CASE("Read frames directly from a RawSubFile", "[.files]"){
|
||||
auto fpath_raw = test_data_path() / "raw/jungfrau" / "jungfrau_single_d0_f0_0.raw";
|
||||
REQUIRE(std::filesystem::exists(fpath_raw));
|
||||
|
||||
RawSubFile f(fpath_raw, DetectorType::Jungfrau, 512, 1024, 16);
|
||||
REQUIRE(f.rows() == 512);
|
||||
REQUIRE(f.cols() == 1024);
|
||||
REQUIRE(f.pixels_per_frame() == 512 * 1024);
|
||||
REQUIRE(f.bytes_per_frame() == 512 * 1024 * 2);
|
||||
REQUIRE(f.bytes_per_pixel() == 2);
|
||||
|
||||
|
||||
auto fpath_npy = test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy";
|
||||
REQUIRE(std::filesystem::exists(fpath_npy));
|
||||
|
||||
//Numpy file with the same data to use as reference
|
||||
File npy(fpath_npy, "r");
|
||||
|
||||
CHECK(f.frames_in_file() == 10);
|
||||
CHECK(npy.total_frames() == 10);
|
||||
|
||||
|
||||
DetectorHeader header{};
|
||||
NDArray<uint16_t, 2> image({static_cast<ssize_t>(f.rows()), static_cast<ssize_t>(f.cols())});
|
||||
for (size_t i = 0; i < 10; ++i) {
|
||||
CHECK(f.tell() == i);
|
||||
f.read_into(image.buffer(), &header);
|
||||
auto npy_frame = npy.read_frame();
|
||||
CHECK((image.view() == npy_frame.view<uint16_t>()));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Read frames directly from a RawSubFile starting at the second file", "[.files]"){
|
||||
// we know this file has 10 frames with frame numbers 1 to 10
|
||||
// f0 1,2,3
|
||||
// f1 4,5,6 <-- starting here
|
||||
// f2 7,8,9
|
||||
// f3 10
|
||||
|
||||
auto fpath_raw = test_data_path() / "raw/jungfrau" / "jungfrau_single_d0_f1_0.raw";
|
||||
REQUIRE(std::filesystem::exists(fpath_raw));
|
||||
|
||||
RawSubFile f(fpath_raw, DetectorType::Jungfrau, 512, 1024, 16);
|
||||
|
||||
|
||||
auto fpath_npy = test_data_path() / "raw/jungfrau" / "jungfrau_single_0.npy";
|
||||
REQUIRE(std::filesystem::exists(fpath_npy));
|
||||
|
||||
//Numpy file with the same data to use as reference
|
||||
File npy(fpath_npy, "r");
|
||||
npy.seek(3);
|
||||
|
||||
CHECK(f.frames_in_file() == 7);
|
||||
CHECK(npy.total_frames() == 10);
|
||||
|
||||
|
||||
DetectorHeader header{};
|
||||
NDArray<uint16_t, 2> image({static_cast<ssize_t>(f.rows()), static_cast<ssize_t>(f.cols())});
|
||||
for (size_t i = 0; i < 7; ++i) {
|
||||
CHECK(f.tell() == i);
|
||||
f.read_into(image.buffer(), &header);
|
||||
// frame numbers start at 1 frame index at 0
|
||||
// adding 3 + 1 to verify the frame number
|
||||
CHECK(header.frameNumber == i + 4);
|
||||
auto npy_frame = npy.read_frame();
|
||||
CHECK((image.view() == npy_frame.view<uint16_t>()));
|
||||
}
|
||||
}
|
@ -1,8 +1,7 @@
|
||||
|
||||
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
#include <aare/algorithm.hpp>
|
||||
|
||||
#include <catch2/catch_test_macros.hpp>
|
||||
|
||||
TEST_CASE("Find the closed index in a 1D array", "[algorithm]") {
|
||||
aare::NDArray<double, 1> arr({5});
|
||||
@ -17,7 +16,7 @@ TEST_CASE("Find the closed index in a 1D array", "[algorithm]") {
|
||||
REQUIRE(aare::nearest_index(arr, -1.0) == 0);
|
||||
}
|
||||
|
||||
TEST_CASE("Passing integers to nearest_index works", "[algorithm]"){
|
||||
TEST_CASE("Passing integers to nearest_index works", "[algorithm]") {
|
||||
aare::NDArray<int, 1> arr({5});
|
||||
for (ssize_t i = 0; i < arr.size(); i++) {
|
||||
arr[i] = i;
|
||||
@ -30,8 +29,7 @@ TEST_CASE("Passing integers to nearest_index works", "[algorithm]"){
|
||||
REQUIRE(aare::nearest_index(arr, -1) == 0);
|
||||
}
|
||||
|
||||
|
||||
TEST_CASE("nearest_index works with std::vector", "[algorithm]"){
|
||||
TEST_CASE("nearest_index works with std::vector", "[algorithm]") {
|
||||
std::vector<double> vec = {0, 1, 2, 3, 4};
|
||||
REQUIRE(aare::nearest_index(vec, 2.123) == 2);
|
||||
REQUIRE(aare::nearest_index(vec, 2.66) == 3);
|
||||
@ -40,7 +38,7 @@ TEST_CASE("nearest_index works with std::vector", "[algorithm]"){
|
||||
REQUIRE(aare::nearest_index(vec, -10.0) == 0);
|
||||
}
|
||||
|
||||
TEST_CASE("nearest index works with std::array", "[algorithm]"){
|
||||
TEST_CASE("nearest index works with std::array", "[algorithm]") {
|
||||
std::array<double, 5> arr = {0, 1, 2, 3, 4};
|
||||
REQUIRE(aare::nearest_index(arr, 2.123) == 2);
|
||||
REQUIRE(aare::nearest_index(arr, 2.501) == 3);
|
||||
@ -49,18 +47,20 @@ TEST_CASE("nearest index works with std::array", "[algorithm]"){
|
||||
REQUIRE(aare::nearest_index(arr, -10.0) == 0);
|
||||
}
|
||||
|
||||
TEST_CASE("nearest index when there is no different uses the first element", "[algorithm]"){
|
||||
TEST_CASE("nearest index when there is no different uses the first element",
|
||||
"[algorithm]") {
|
||||
std::vector<int> vec = {5, 5, 5, 5, 5};
|
||||
REQUIRE(aare::nearest_index(vec, 5) == 0);
|
||||
}
|
||||
|
||||
TEST_CASE("nearest index when there is no different uses the first element also when all smaller", "[algorithm]"){
|
||||
TEST_CASE("nearest index when there is no different uses the first element "
|
||||
"also when all smaller",
|
||||
"[algorithm]") {
|
||||
std::vector<int> vec = {5, 5, 5, 5, 5};
|
||||
REQUIRE(aare::nearest_index(vec, 10) == 0);
|
||||
}
|
||||
|
||||
|
||||
TEST_CASE("last smaller", "[algorithm]"){
|
||||
TEST_CASE("last smaller", "[algorithm]") {
|
||||
aare::NDArray<double, 1> arr({5});
|
||||
for (ssize_t i = 0; i < arr.size(); i++) {
|
||||
arr[i] = i;
|
||||
@ -72,17 +72,17 @@ TEST_CASE("last smaller", "[algorithm]"){
|
||||
REQUIRE(aare::last_smaller(arr, 253.) == 4);
|
||||
}
|
||||
|
||||
TEST_CASE("returns last bin strictly smaller", "[algorithm]"){
|
||||
TEST_CASE("returns last bin strictly smaller", "[algorithm]") {
|
||||
aare::NDArray<double, 1> arr({5});
|
||||
for (ssize_t i = 0; i < arr.size(); i++) {
|
||||
arr[i] = i;
|
||||
}
|
||||
// arr 0, 1, 2, 3, 4
|
||||
REQUIRE(aare::last_smaller(arr, 2.0) == 1);
|
||||
|
||||
}
|
||||
|
||||
TEST_CASE("last_smaller with all elements smaller returns last element", "[algorithm]"){
|
||||
TEST_CASE("last_smaller with all elements smaller returns last element",
|
||||
"[algorithm]") {
|
||||
aare::NDArray<double, 1> arr({5});
|
||||
for (ssize_t i = 0; i < arr.size(); i++) {
|
||||
arr[i] = i;
|
||||
@ -91,7 +91,8 @@ TEST_CASE("last_smaller with all elements smaller returns last element", "[algor
|
||||
REQUIRE(aare::last_smaller(arr, 50.) == 4);
|
||||
}
|
||||
|
||||
TEST_CASE("last_smaller with all elements bigger returns first element", "[algorithm]"){
|
||||
TEST_CASE("last_smaller with all elements bigger returns first element",
|
||||
"[algorithm]") {
|
||||
aare::NDArray<double, 1> arr({5});
|
||||
for (ssize_t i = 0; i < arr.size(); i++) {
|
||||
arr[i] = i;
|
||||
@ -100,38 +101,41 @@ TEST_CASE("last_smaller with all elements bigger returns first element", "[algor
|
||||
REQUIRE(aare::last_smaller(arr, -50.) == 0);
|
||||
}
|
||||
|
||||
TEST_CASE("last smaller with all elements equal returns the first element", "[algorithm]"){
|
||||
std::vector<int> vec = {5,5,5,5,5,5,5};
|
||||
TEST_CASE("last smaller with all elements equal returns the first element",
|
||||
"[algorithm]") {
|
||||
std::vector<int> vec = {5, 5, 5, 5, 5, 5, 5};
|
||||
REQUIRE(aare::last_smaller(vec, 5) == 0);
|
||||
}
|
||||
|
||||
|
||||
TEST_CASE("first_lager with vector", "[algorithm]"){
|
||||
TEST_CASE("first_lager with vector", "[algorithm]") {
|
||||
std::vector<double> vec = {0, 1, 2, 3, 4};
|
||||
REQUIRE(aare::first_larger(vec, 2.5) == 3);
|
||||
}
|
||||
|
||||
TEST_CASE("first_lager with all elements smaller returns last element", "[algorithm]"){
|
||||
TEST_CASE("first_lager with all elements smaller returns last element",
|
||||
"[algorithm]") {
|
||||
std::vector<double> vec = {0, 1, 2, 3, 4};
|
||||
REQUIRE(aare::first_larger(vec, 50.) == 4);
|
||||
}
|
||||
|
||||
TEST_CASE("first_lager with all elements bigger returns first element", "[algorithm]"){
|
||||
TEST_CASE("first_lager with all elements bigger returns first element",
|
||||
"[algorithm]") {
|
||||
std::vector<double> vec = {0, 1, 2, 3, 4};
|
||||
REQUIRE(aare::first_larger(vec, -50.) == 0);
|
||||
}
|
||||
|
||||
TEST_CASE("first_lager with all elements the same as the check returns last", "[algorithm]"){
|
||||
TEST_CASE("first_lager with all elements the same as the check returns last",
|
||||
"[algorithm]") {
|
||||
std::vector<int> vec = {14, 14, 14, 14, 14};
|
||||
REQUIRE(aare::first_larger(vec, 14) == 4);
|
||||
}
|
||||
|
||||
TEST_CASE("first larger with the same element", "[algorithm]"){
|
||||
std::vector<int> vec = {7,8,9,10,11};
|
||||
TEST_CASE("first larger with the same element", "[algorithm]") {
|
||||
std::vector<int> vec = {7, 8, 9, 10, 11};
|
||||
REQUIRE(aare::first_larger(vec, 9) == 3);
|
||||
}
|
||||
|
||||
TEST_CASE("cumsum works", "[algorithm]"){
|
||||
TEST_CASE("cumsum works", "[algorithm]") {
|
||||
std::vector<double> vec = {0, 1, 2, 3, 4};
|
||||
auto result = aare::cumsum(vec);
|
||||
REQUIRE(result.size() == vec.size());
|
||||
@ -141,12 +145,12 @@ TEST_CASE("cumsum works", "[algorithm]"){
|
||||
REQUIRE(result[3] == 6);
|
||||
REQUIRE(result[4] == 10);
|
||||
}
|
||||
TEST_CASE("cumsum works with empty vector", "[algorithm]"){
|
||||
TEST_CASE("cumsum works with empty vector", "[algorithm]") {
|
||||
std::vector<double> vec = {};
|
||||
auto result = aare::cumsum(vec);
|
||||
REQUIRE(result.size() == 0);
|
||||
}
|
||||
TEST_CASE("cumsum works with negative numbers", "[algorithm]"){
|
||||
TEST_CASE("cumsum works with negative numbers", "[algorithm]") {
|
||||
std::vector<double> vec = {0, -1, -2, -3, -4};
|
||||
auto result = aare::cumsum(vec);
|
||||
REQUIRE(result.size() == vec.size());
|
||||
@ -157,3 +161,35 @@ TEST_CASE("cumsum works with negative numbers", "[algorithm]"){
|
||||
REQUIRE(result[4] == -10);
|
||||
}
|
||||
|
||||
|
||||
TEST_CASE("cumsum on an empty vector", "[algorithm]") {
|
||||
std::vector<double> vec = {};
|
||||
auto result = aare::cumsum(vec);
|
||||
REQUIRE(result.size() == 0);
|
||||
|
||||
}
|
||||
|
||||
TEST_CASE("All equal on an empty vector is false", "[algorithm]") {
|
||||
std::vector<int> vec = {};
|
||||
REQUIRE(aare::all_equal(vec) == false);
|
||||
}
|
||||
|
||||
TEST_CASE("All equal on a vector with 1 element is true", "[algorithm]") {
|
||||
std::vector<int> vec = {1};
|
||||
REQUIRE(aare::all_equal(vec) == true);
|
||||
}
|
||||
|
||||
TEST_CASE("All equal on a vector with 2 elements is true", "[algorithm]") {
|
||||
std::vector<int> vec = {1, 1};
|
||||
REQUIRE(aare::all_equal(vec) == true);
|
||||
}
|
||||
|
||||
TEST_CASE("All equal on a vector with two different elements is false", "[algorithm]") {
|
||||
std::vector<int> vec = {1, 2};
|
||||
REQUIRE(aare::all_equal(vec) == false);
|
||||
}
|
||||
|
||||
TEST_CASE("Last element is different", "[algorithm]") {
|
||||
std::vector<int> vec = {1, 1, 1, 1, 2};
|
||||
REQUIRE(aare::all_equal(vec) == false);
|
||||
}
|
||||
|
@ -26,8 +26,8 @@ void adc_sar_05_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> outpu
|
||||
throw std::invalid_argument(LOCATION + " input and output shapes must match");
|
||||
}
|
||||
|
||||
for(int64_t i = 0; i < input.shape(0); i++){
|
||||
for(int64_t j = 0; j < input.shape(1); j++){
|
||||
for(ssize_t i = 0; i < input.shape(0); i++){
|
||||
for(ssize_t j = 0; j < input.shape(1); j++){
|
||||
output(i,j) = adc_sar_05_decode64to16(input(i,j));
|
||||
}
|
||||
}
|
||||
@ -56,8 +56,8 @@ void adc_sar_04_decode64to16(NDView<uint64_t, 2> input, NDView<uint16_t,2> outpu
|
||||
if(input.shape() != output.shape()){
|
||||
throw std::invalid_argument(LOCATION + " input and output shapes must match");
|
||||
}
|
||||
for(int64_t i = 0; i < input.shape(0); i++){
|
||||
for(int64_t j = 0; j < input.shape(1); j++){
|
||||
for(ssize_t i = 0; i < input.shape(0); i++){
|
||||
for(ssize_t j = 0; j < input.shape(1); j++){
|
||||
output(i,j) = adc_sar_04_decode64to16(input(i,j));
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user