mirror of
https://github.com/bec-project/bec_widgets.git
synced 2025-07-14 11:41:49 +02:00
fix: test_bec_monitor.py config loaded fresh in the test function to avoid parameter leak
This commit is contained in:
@ -101,8 +101,8 @@ class BECMonitor(pg.GraphicsLayoutWidget):
|
|||||||
|
|
||||||
if self.scan_types is False: # Device tracking mode
|
if self.scan_types is False: # Device tracking mode
|
||||||
self.plot_data = self.plot_data_config # TODO logic has to be improved
|
self.plot_data = self.plot_data_config # TODO logic has to be improved
|
||||||
else: # setup first line scan as default, then changed with different scan type
|
else: # without incoming data setup the first configuration to the first scan type sorted alphabetically by name
|
||||||
self.plot_data = self.plot_data_config[list(self.plot_data_config.keys())[0]]
|
self.plot_data = self.plot_data_config[min(list(self.plot_data_config.keys()))]
|
||||||
|
|
||||||
# TODO init plot background -> so far not used, I don't like how it is done in extreme.py
|
# TODO init plot background -> so far not used, I don't like how it is done in extreme.py
|
||||||
|
|
||||||
|
@ -6,23 +6,17 @@ from unittest.mock import MagicMock
|
|||||||
|
|
||||||
from bec_widgets.widgets import BECMonitor
|
from bec_widgets.widgets import BECMonitor
|
||||||
|
|
||||||
current_path = os.path.dirname(__file__)
|
# current_path = os.path.dirname(__file__)
|
||||||
|
|
||||||
|
|
||||||
def load_config(config_path):
|
def load_test_config(config_name):
|
||||||
"""Helper function to load config from yaml file."""
|
"""Helper function to load config from yaml file."""
|
||||||
|
config_path = os.path.join(os.path.dirname(__file__), "test_configs", f"{config_name}.yaml")
|
||||||
with open(config_path, "r") as f:
|
with open(config_path, "r") as f:
|
||||||
config = yaml.safe_load(f)
|
config = yaml.safe_load(f)
|
||||||
return config
|
return config
|
||||||
|
|
||||||
|
|
||||||
config_device = load_config(os.path.join(current_path, "test_configs/config_device.yaml"))
|
|
||||||
config_device_no_entry = load_config(
|
|
||||||
os.path.join(current_path, "test_configs/config_device_no_entry.yaml")
|
|
||||||
)
|
|
||||||
config_scan = load_config(os.path.join(current_path, "test_configs/config_scan.yaml"))
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="function")
|
@pytest.fixture(scope="function")
|
||||||
def monitor(qtbot):
|
def monitor(qtbot):
|
||||||
client = MagicMock()
|
client = MagicMock()
|
||||||
@ -33,15 +27,15 @@ def monitor(qtbot):
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"config, scan_type, number_of_plots",
|
"config_name, scan_type, number_of_plots",
|
||||||
[
|
[
|
||||||
(config_device, False, 2),
|
("config_device", False, 2),
|
||||||
(config_scan, True, 4),
|
("config_device_no_entry", False, 2),
|
||||||
(config_device_no_entry, False, 2),
|
("config_scan", True, 4),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_initialization_with_device_config(monitor, config, scan_type, number_of_plots):
|
def test_initialization_with_device_config(monitor, config_name, scan_type, number_of_plots):
|
||||||
# monitor = setup_monitor(qtbot, config)
|
config = load_test_config(config_name)
|
||||||
monitor.update_config(config)
|
monitor.update_config(config)
|
||||||
assert isinstance(monitor, BECMonitor)
|
assert isinstance(monitor, BECMonitor)
|
||||||
assert monitor.config == config
|
assert monitor.config == config
|
||||||
@ -51,9 +45,12 @@ def test_initialization_with_device_config(monitor, config, scan_type, number_of
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"config_initial,config_update", [(config_device, config_scan), (config_scan, config_device)]
|
"config_initial,config_update",
|
||||||
|
[("config_device", "config_scan"), ("config_scan", "config_device")],
|
||||||
)
|
)
|
||||||
def test_update_config(monitor, config_initial, config_update):
|
def test_update_config(monitor, config_initial, config_update):
|
||||||
|
config_initial = load_test_config(config_initial)
|
||||||
|
config_update = load_test_config(config_update)
|
||||||
monitor.update_config(config_initial)
|
monitor.update_config(config_initial)
|
||||||
assert monitor.config == config_initial
|
assert monitor.config == config_initial
|
||||||
monitor.update_config(config_update)
|
monitor.update_config(config_update)
|
||||||
@ -61,16 +58,16 @@ def test_update_config(monitor, config_initial, config_update):
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"config, expected_num_columns, expected_plot_names, expected_coordinates",
|
"config_name, expected_num_columns, expected_plot_names, expected_coordinates",
|
||||||
[
|
[
|
||||||
(
|
(
|
||||||
config_device,
|
"config_device",
|
||||||
1,
|
1,
|
||||||
["BPM4i plots vs samx", "Gauss plots vs samx"],
|
["BPM4i plots vs samx", "Gauss plots vs samx"],
|
||||||
[(0, 0), (1, 0)],
|
[(0, 0), (1, 0)],
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
config_scan,
|
"config_scan",
|
||||||
3,
|
3,
|
||||||
["Grid plot 1", "Grid plot 2", "Grid plot 3", "Grid plot 4"],
|
["Grid plot 1", "Grid plot 2", "Grid plot 3", "Grid plot 4"],
|
||||||
[(0, 0), (0, 1), (0, 2), (1, 0)],
|
[(0, 0), (0, 1), (0, 2), (1, 0)],
|
||||||
@ -78,10 +75,12 @@ def test_update_config(monitor, config_initial, config_update):
|
|||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_render_initial_plots(
|
def test_render_initial_plots(
|
||||||
monitor, config, expected_num_columns, expected_plot_names, expected_coordinates
|
monitor, config_name, expected_num_columns, expected_plot_names, expected_coordinates
|
||||||
):
|
):
|
||||||
|
config = load_test_config(config_name)
|
||||||
monitor.update_config(config)
|
monitor.update_config(config)
|
||||||
|
|
||||||
|
assert monitor.config == config
|
||||||
# Validate number of columns
|
# Validate number of columns
|
||||||
assert monitor.plot_settings["num_columns"] == expected_num_columns
|
assert monitor.plot_settings["num_columns"] == expected_num_columns
|
||||||
|
|
||||||
@ -122,13 +121,13 @@ metadata_line = {"scan_name": "line_scan"}
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"config, msg, metadata, expected_data",
|
"config_name, msg, metadata, expected_data",
|
||||||
[
|
[
|
||||||
# case: msg does not have 'scanid'
|
# case: msg does not have 'scanid'
|
||||||
(config_device, {"data": {}}, {}, {}),
|
("config_device", {"data": {}}, {}, {}),
|
||||||
# case: scan_types is false, msg contains all valid fields, and entry is present in config
|
# case: scan_types is false, msg contains all valid fields, and entry is present in config
|
||||||
(
|
(
|
||||||
config_device,
|
"config_device",
|
||||||
msg_1,
|
msg_1,
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
@ -139,7 +138,7 @@ metadata_line = {"scan_name": "line_scan"}
|
|||||||
),
|
),
|
||||||
# case: scan_types is false, msg contains all valid fields and entry is missing in config, should use hints
|
# case: scan_types is false, msg contains all valid fields and entry is missing in config, should use hints
|
||||||
(
|
(
|
||||||
config_device_no_entry,
|
"config_device_no_entry",
|
||||||
msg_1,
|
msg_1,
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
@ -149,7 +148,7 @@ metadata_line = {"scan_name": "line_scan"}
|
|||||||
),
|
),
|
||||||
# case: scan_types is true, msg contains all valid fields, metadata contains scan "line_scan:"
|
# case: scan_types is true, msg contains all valid fields, metadata contains scan "line_scan:"
|
||||||
(
|
(
|
||||||
config_scan,
|
"config_scan",
|
||||||
msg_1,
|
msg_1,
|
||||||
metadata_line,
|
metadata_line,
|
||||||
{
|
{
|
||||||
@ -160,7 +159,7 @@ metadata_line = {"scan_name": "line_scan"}
|
|||||||
},
|
},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
config_scan,
|
"config_scan",
|
||||||
msg_1,
|
msg_1,
|
||||||
metadata_grid,
|
metadata_grid,
|
||||||
{
|
{
|
||||||
@ -172,7 +171,8 @@ metadata_line = {"scan_name": "line_scan"}
|
|||||||
),
|
),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_on_scan_segment(monitor, config, msg, metadata, expected_data):
|
def test_on_scan_segment(monitor, config_name, msg, metadata, expected_data):
|
||||||
|
config = load_test_config(config_name)
|
||||||
monitor.update_config(config)
|
monitor.update_config(config)
|
||||||
# Get hints
|
# Get hints
|
||||||
monitor.dev.__getitem__.side_effect = mock_getitem
|
monitor.dev.__getitem__.side_effect = mock_getitem
|
||||||
|
@ -13,7 +13,7 @@ def eiger_plot_instance(qtbot):
|
|||||||
widget = EigerPlot()
|
widget = EigerPlot()
|
||||||
qtbot.addWidget(widget)
|
qtbot.addWidget(widget)
|
||||||
qtbot.waitExposed(widget)
|
qtbot.waitExposed(widget)
|
||||||
return widget
|
yield widget
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
|
@ -10,14 +10,14 @@ from bec_lib.core import RedisConnector
|
|||||||
from bec_widgets.examples.stream_plot.stream_plot import StreamPlot
|
from bec_widgets.examples.stream_plot.stream_plot import StreamPlot
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture(scope="function")
|
||||||
def stream_app(qtbot):
|
def stream_app(qtbot):
|
||||||
"""Helper function to set up the StreamPlot widget."""
|
"""Helper function to set up the StreamPlot widget."""
|
||||||
client = mock.MagicMock()
|
client = mock.MagicMock()
|
||||||
widget = StreamPlot(client=client)
|
widget = StreamPlot(client=client)
|
||||||
qtbot.addWidget(widget)
|
qtbot.addWidget(widget)
|
||||||
qtbot.waitExposed(widget)
|
qtbot.waitExposed(widget)
|
||||||
return widget
|
yield widget
|
||||||
|
|
||||||
|
|
||||||
def test_roi_signals_emitted(qtbot, stream_app):
|
def test_roi_signals_emitted(qtbot, stream_app):
|
||||||
@ -126,6 +126,10 @@ def test_on_dap_update(qtbot, stream_app):
|
|||||||
np.testing.assert_array_equal(stream_app.img.image, expected_data)
|
np.testing.assert_array_equal(stream_app.img.image, expected_data)
|
||||||
|
|
||||||
|
|
||||||
|
####################
|
||||||
|
# Until Here
|
||||||
|
####################
|
||||||
|
|
||||||
# def test_new_proj(qtbot, stream_app): #TODO this test is not working, does it make sense testing even?
|
# def test_new_proj(qtbot, stream_app): #TODO this test is not working, does it make sense testing even?
|
||||||
# # Create some mock content to be "received" by the slot
|
# # Create some mock content to be "received" by the slot
|
||||||
# content_dict = {"signals": {"proj_nr": 1}}
|
# content_dict = {"signals": {"proj_nr": 1}}
|
||||||
|
Reference in New Issue
Block a user