import pytest import os, sys from creader import ClusterFileReader from fixtures import data_path import numpy as np def test_open_file_using_Path(data_path): # C extension cannot directly parse pathlib.Path to char * # Check that implementation is working try: fname= data_path/'beam_En700eV_-40deg_300V_10us_d0_f0_100.clust' r = ClusterFileReader(fname) assert r.read(10).size == 10 # Read to make sure file is really open except: pytest.fail("Could not open file using Path object") def test_open_file_using_str(data_path): # Check that we didn't mess up the string passing try: fname= f'{data_path}/beam_En700eV_-40deg_300V_10us_d0_f0_100.clust' r = ClusterFileReader(fname) assert r.read(10).size == 10 # Read to make sure file is really open except: pytest.fail("Could not open file using string") def test_references_on_read(data_path): fname= (data_path/'beam_En700eV_-40deg_300V_10us_d0_f0_100.clust').as_posix() r = ClusterFileReader(fname) clusters = r.read(10) assert sys.getrefcount(clusters) == 2 #Over counts by one due to call by reference def test_size_on_read(data_path): fname= (data_path/'beam_En700eV_-40deg_300V_10us_d0_f0_100.clust').as_posix() r = ClusterFileReader(fname) for i in range(10): clusters = r.read(10) assert clusters.size == 10 def test_resize_on_read(data_path): # File contains 481603 clusters, output should be resized to the correct size fname= (data_path/'beam_En700eV_-40deg_300V_10us_d0_f0_100.clust').as_posix() r = ClusterFileReader(fname) max_clusters = 10000000 #400MB initial allocation clusters = r.read(max_clusters) assert clusters.size == 481603 assert sys.getrefcount(clusters) == 2 def test_read_file_with_single_frame(data_path): #File shoud contain one frame 135 with 97 clusters fname= (data_path/'single_frame_97_clustrers.clust').as_posix() r = ClusterFileReader(fname) clusters = r.read(100) assert clusters.size == 97 for i, c in enumerate(clusters): assert c['x'] == i+1 assert c['y'] == i+200 assert (c['data'] == np.arange(i*9, (i+1)*9, dtype = np.int32)).all() def test_read_file_with_single_frame_in_chunks(data_path): #File shoud contain one frame 135 with 97 clusters fname= (data_path/'single_frame_97_clustrers.clust').as_posix() r = ClusterFileReader(fname) # clusters = r.read(5) total_clusters = 0 while (clusters:=r.read(5)).size: total_clusters += clusters.size assert total_clusters == 97 def test_read_file_with_37_frames(data_path): #File shoud contain 37 frames with 5 clusters each #Full spec in utils/write_test_data.py fname= (data_path/'37frames_with_5_clusters.clust').as_posix() r = ClusterFileReader(fname) clusters = r.read(200) assert clusters.size == 185 for i, c in enumerate(clusters): assert c['x'] == i%5+1 assert c['y'] == i%5+1 assert (c['data'] == np.arange(i%5, (i%5)+9, dtype = np.int32)).all() def test_read_file_with_37_frames_in_chunks(data_path): #File shoud contain 37 frames with 5 clusters each #Full spec in utils/write_test_data.py fname= data_path/'37frames_with_5_clusters.clust' r = ClusterFileReader(fname) total_clusters = 0 while (clusters:=r.read(7)).size: total_clusters += clusters.size assert total_clusters == 185 @pytest.mark.skip(reason="Need to check how the selection is made") def test_read_file_with_noise_mask(data_path): #No mask fname= data_path/'noise_test.clust' r = ClusterFileReader(fname) cl = r.read(85) #file contains 70 clusters assert cl.size == 70 #noise mask with zeros noise_cut = np.zeros((400,400)) r = ClusterFileReader(fname) cl = r.read(85, noise_cut) assert cl.size == 70 #only pixel 80, 133 above noise noise_cut[:] = 100 #TODO! Agree on orientation of noise mask! # noise_cut[80,133] = 0 noise_cut[133,80] = 0 r = ClusterFileReader(fname) cl = r.read(85, noise_cut) assert cl.size == 1 def test_chunk_config(data_path): fname= data_path/'noise_test.clust' #File contains total 70 clusters r = ClusterFileReader(fname, chunk = 5) assert r.read().size == 5 assert r.read(10).size == 10