Compare commits
528 Commits
Author | SHA1 | Date | |
---|---|---|---|
108c1aae2f | |||
b82184b9e7 | |||
b6a43c3f3b | |||
18b692a62e | |||
c2d6f6b259 | |||
60b90ec9e5 | |||
1fc30ae3e1 | |||
dfa6bfe926 | |||
ed3f58436b | |||
68f7b429f7 | |||
e5030902c7 | |||
60f01d9dd8 | |||
bd429393a5 | |||
9e3ffd6230 | |||
bdc71f15c1 | |||
c3398ef4e5 | |||
9b33f1152b | |||
dc1f2a92cc | |||
e9ae52bb60 | |||
982887ab85 | |||
19e934e873 | |||
8604d695c6 | |||
a55295829f | |||
4181d597a8 | |||
c4869fb0cd | |||
80e75d9ef9 | |||
eb2177215b | |||
89fb4f054f | |||
019a36bbb7 | |||
58a704764a | |||
144b37ba09 | |||
48114a0dd9 | |||
0fee06f2d6 | |||
31b4b0bb5f | |||
a6611976e1 | |||
9b48fb7a24 | |||
14d122b947 | |||
bff44a7461 | |||
eae8a1bde4 | |||
![]() |
a1c1de4adf | ||
9e6fc04d63 | |||
779426f4bb | |||
1a5d61a9f7 | |||
b41ab102b1 | |||
bc791b1028 | |||
3ab4420912 | |||
90552cee2c | |||
6164be16f0 | |||
07f03a2a04 | |||
f89267b5ec | |||
8e6cef32b5 | |||
e318055304 | |||
015eb095a4 | |||
d145f9107d | |||
d1a0ba6fec | |||
2e2677d856 | |||
a165167902 | |||
0b88ab0c7f | |||
59d392c9ec | |||
774c1008f5 | |||
054bae5b27 | |||
f27cffa4f8 | |||
237b168cc0 | |||
60f565e2b9 | |||
eee9337c72 | |||
3293a74017 | |||
f83aad6b76 | |||
1570e17b6f | |||
b72033c6b1 | |||
e1be4276cd | |||
4dd7644596 | |||
f0488f5648 | |||
06f89842d6 | |||
bb8c158591 | |||
2a27ef7de1 | |||
cb8fc3f4ff | |||
6b9555c33c | |||
f6f4f64891 | |||
9f2585139b | |||
afea511b2e | |||
008b3b13d7 | |||
bc66c593f3 | |||
dc397062ad | |||
3458a6c755 | |||
32aba1e75d | |||
24fbb8b397 | |||
661aecc3ee | |||
1860bce4c2 | |||
64baeb0373 | |||
d57160a712 | |||
de9ebc419b | |||
30d08733a7 | |||
4b6994a9f3 | |||
cda8896599 | |||
1e3abce5fc | |||
dd262ab6cd | |||
dfef1eadd9 | |||
4d993280c7 | |||
18d5bec842 | |||
e563666373 | |||
32a591136e | |||
831a42ecc0 | |||
ef22d77a5c | |||
9419b6928a | |||
4e34776f97 | |||
17c647b60d | |||
200e5d0a13 | |||
0b340d0bb9 | |||
5f37e9a57c | |||
7a7baadc8f | |||
41f5157ba8 | |||
7503076a1b | |||
21562ee85b | |||
23ba256b6e | |||
f5814ddd5e | |||
b57348b369 | |||
165c06bc22 | |||
3c127b7f00 | |||
45b091f90b | |||
20fc969d60 | |||
bce8cf3120 | |||
56d626e312 | |||
19daa16de7 | |||
4bcfaf8c80 | |||
b8cf76220c | |||
20e43ecce9 | |||
bcb6f7bd3b | |||
c3b198f63a | |||
f044e739a0 | |||
6f83292d69 | |||
7075e0f42b | |||
1d43c86cff | |||
cde2aa1182 | |||
df9607ceb9 | |||
5c5be065be | |||
279dee3935 | |||
4ebb343afb | |||
2810cf3d2b | |||
585d4b3f54 | |||
72fd54d268 | |||
181c359ef9 | |||
00607d94c7 | |||
a192648cc4 | |||
42adda235b | |||
e30035350b | |||
89fffed5d1 | |||
fae90cbeae | |||
7b70d30578 | |||
b26d7005c5 | |||
90021428ee | |||
bf97af1949 | |||
098314e30d | |||
49ff319230 | |||
9b1e396bdb | |||
d72c1d478e | |||
fa73271076 | |||
4d2a8ecad2 | |||
6f1fe1a3d8 | |||
8d4f4a9b50 | |||
c94f784373 | |||
de2a10b507 | |||
e2220760f0 | |||
39b2af60ca | |||
94df4869d4 | |||
4131eb31e7 | |||
4ee1f6fb70 | |||
fbce5de7b9 | |||
636badfba8 | |||
77d6f42e0d | |||
166259d669 | |||
14d65c9590 | |||
48b001180f | |||
21332b3edc | |||
421fc726dd | |||
eb3cc99aeb | |||
d36686177d | |||
7e2c6ad21f | |||
03ed97e063 | |||
234fb1f3b6 | |||
e8ce57b56b | |||
ac6f67cc53 | |||
3234a544de | |||
4bd6c6760e | |||
b401d2f459 | |||
6b8d15234b | |||
5cfa5c176d | |||
bbe7b7d305 | |||
4ba08366df | |||
51c78ad06b | |||
bcd594fa7e | |||
10bcabd7f9 | |||
bee8263184 | |||
98a76ebf63 | |||
12ba0b291b | |||
a719f10f4f | |||
aaff6032c8 | |||
3fb3fe573b | |||
1687337f26 | |||
53ceac21aa | |||
741a09819c | |||
3c619713d5 | |||
3d5a4ed6aa | |||
b2129805dc | |||
92765b5665 | |||
328b71e058 | |||
11ab8485bc | |||
4734b3e50f | |||
dfeeed284b | |||
9adf83ec74 | |||
a299449209 | |||
45a81aa632 | |||
3926e8de39 | |||
d2e2a2c7fd | |||
3934dcdd07 | |||
4c8037af5c | |||
e29b4e7da8 | |||
7189ee8196 | |||
be8417856a | |||
8ba062064a | |||
6557b2f3a4 | |||
7dcd20198f | |||
13a6ff285a | |||
09b6e4fdcf | |||
e7780a2405 | |||
e8b85bcea3 | |||
2482746f14 | |||
3986b8173f | |||
16966b6e3e | |||
e9d3fcc41a | |||
506d70a913 | |||
fc4e9c12cf | |||
c5faa0a55a | |||
c9922bb0cb | |||
813270d6f8 | |||
cf2f8435e7 | |||
380abfb102 | |||
c8502a3b93 | |||
b84fc632aa | |||
3acd57adb9 | |||
960ce0a534 | |||
1d43a952e6 | |||
9f7a7b8bbf | |||
8129b5e683 | |||
eaa6c4a2ad | |||
c2be907113 | |||
4dae756b3e | |||
a77a40618d | |||
a73c34b06f | |||
4b9f0a8c36 | |||
9f56921072 | |||
49a6bd22ae | |||
5b502b31eb | |||
20e99c35ba | |||
abf4750030 | |||
5de09d16ca | |||
5c4362d984 | |||
8d065b85a4 | |||
c86466b470 | |||
b8968192ca | |||
4745f0f401 | |||
9f6e7230fa | |||
089a0cf5ac | |||
639dc070c3 | |||
fec463398d | |||
b6d7a52b06 | |||
d6e599d4f9 | |||
d6b27fb33a | |||
bae15ee2ef | |||
c2bd6c25f5 | |||
cf6527af13 | |||
57e503fc3d | |||
c10efeb9cc | |||
137f20cc20 | |||
531463a637 | |||
e3368c1817 | |||
313bd8bc62 | |||
fe61d3c4cb | |||
f6d9f63863 | |||
620f32446a | |||
4b4d5c16ce | |||
91b9e01441 | |||
18ea894f35 | |||
9141ac49c7 | |||
2adbcc6bcd | |||
b39d970960 | |||
b11004bf0f | |||
6c2e221595 | |||
502a4b8096 | |||
3fe4fca96a | |||
a3e3e6768f | |||
0b6a58e160 | |||
09d22e7674 | |||
90387174e5 | |||
e99edbaf72 | |||
a2fceffc1b | |||
415d68b4dc | |||
00ff4117ea | |||
67853b8db4 | |||
60787bccb7 | |||
880d86d750 | |||
7a88e5e254 | |||
20f2a8ada4 | |||
42c092fc14 | |||
8153db9f67 | |||
62c969d6ad | |||
085620abae | |||
9ebe290966 | |||
c9cd96c521 | |||
d745cda4a5 | |||
1b5f70afa0 | |||
a034065a09 | |||
2a60c86b48 | |||
ccc075975f | |||
4982b05de0 | |||
2b0c392a3e | |||
099842b2bd | |||
bd3efd698a | |||
24f083e585 | |||
f43488af34 | |||
1b90d53466 | |||
c1b3a28351 | |||
5b45685257 | |||
e7b28a4e75 | |||
83a7d607a5 | |||
5eedd14b3f | |||
3db7dca7ba | |||
b2d1a0be02 | |||
69d22dd067 | |||
242da76c59 | |||
0c812a5dd5 | |||
4cfcb3d396 | |||
8018783eb5 | |||
fdb1609a41 | |||
e7dda3cda8 | |||
f788d74f15 | |||
0c620d4a08 | |||
5d65afa194 | |||
d312c99f96 | |||
526821e073 | |||
d606230feb | |||
ce5cff05a7 | |||
a738f74f88 | |||
4785c97f33 | |||
e68fedc7e5 | |||
2581996625 | |||
d014849abd | |||
aa6e668f97 | |||
85c74c39f0 | |||
0e97f44cc7 | |||
9c5f7e6284 | |||
167e5136b1 | |||
21d642b7d7 | |||
fe68b1de7e | |||
3291a67e7d | |||
041c5c0e8b | |||
566cebb01a | |||
a70b4fae57 | |||
97836b2906 | |||
7ee8eba007 | |||
5169e08777 | |||
ed8e6d262f | |||
426bb16792 | |||
554716fc9a | |||
69767da850 | |||
3e174f22e5 | |||
7c1e2fdf0c | |||
b61327b5f2 | |||
0e3aea142d | |||
c7b05d252f | |||
a65708004b | |||
af1336df78 | |||
966d6349df | |||
5dbb208e92 | |||
af70302bf3 | |||
8727e25a2d | |||
fac3592ab2 | |||
599e2e1e74 | |||
11fae5d47a | |||
7ee4b1c86a | |||
e76623e55e | |||
794d5c49d4 | |||
0b4f4b1ce9 | |||
15bd970a72 | |||
89ff0b15f5 | |||
9e3466fcaa | |||
55f3198f9d | |||
747b008d5e | |||
1a7cd2a4b5 | |||
eb4e30b7e0 | |||
82f3c53380 | |||
ac76d4b2c3 | |||
dab52d9508 | |||
b63ef90e11 | |||
d5ac2c6d56 | |||
36faad1d9d | |||
9140798c74 | |||
96eaa27e55 | |||
b418fb8300 | |||
699c5f3d11 | |||
6a822c4c85 | |||
c2e1f1def1 | |||
2b9775faed | |||
ea7d611819 | |||
9ef07cff19 | |||
12077dd5f3 | |||
3b1a2b1a0b | |||
8eee968344 | |||
97936d6c2a | |||
91d3a0ac9e | |||
4f1f03224c | |||
16a47cf3b3 | |||
e3de0f7217 | |||
983e0dab42 | |||
a6f97f59e8 | |||
bf3b44405d | |||
93a557fea9 | |||
b31c0b413c | |||
20527e8d2b | |||
e09538eaeb | |||
239949b7c0 | |||
7e6df95c49 | |||
e38993e69d | |||
6bf401aba8 | |||
0c4fffff0d | |||
2c727d34bd | |||
5e979eb9e3 | |||
ba6a99b912 | |||
315b025341 | |||
![]() |
d0b67b8565 | ||
1761003d8a | |||
d42a53ed47 | |||
cd1e5c42c0 | |||
c2215e9b6d | |||
c5ec09a5e3 | |||
b0a4e35d3d | |||
db85eee329 | |||
67d0af292b | |||
![]() |
241c2a9779 | ||
![]() |
8c265027bf | ||
![]() |
f5c405bee8 | ||
ae33ee825e | |||
f694249298 | |||
b7ac6d4359 | |||
e27ba7c711 | |||
ddaeda35c7 | |||
dac1237009 | |||
999ceba3b7 | |||
0a634ca8da | |||
1f1762063d | |||
fdb2027fd4 | |||
559e18ed5d | |||
d6202d4a6b | |||
4b3b14e4ac | |||
b4db41d672 | |||
16174f5cc4 | |||
b50783f6be | |||
f830acf70d | |||
ea41301605 | |||
6a341e5001 | |||
9bbed3b55d | |||
6bd2398f5e | |||
768dd77ef5 | |||
cf6f7a8506 | |||
654d281c49 | |||
950f76d4be | |||
238f3e4fbc | |||
826363a0f5 | |||
4822121b3b | |||
56609ad5ff | |||
a9b0a8a01d | |||
aa6bcb6c6b | |||
216de442a5 | |||
9507339c2a | |||
0c158db48f | |||
9bd959e656 | |||
65b28fffc6 | |||
0d8a30b995 | |||
c602a3df2e | |||
b19b70caae | |||
b08f3c27db | |||
e15f9c9c3e | |||
b62573fa09 | |||
![]() |
f7f016cf1c | ||
8c8715b041 | |||
![]() |
008761e661 | ||
4343d6e2b6 | |||
11e1a6b60c | |||
8be637a7f3 | |||
4fbfe21e99 | |||
b5b77d165a | |||
0e176cb2f3 | |||
2ba0964e07 | |||
b31f359ee7 | |||
63150a4b19 | |||
8d779b11f6 | |||
b2d603b3c5 | |||
2ddb0a668a | |||
de81f2fd9f | |||
b2fc2d604a | |||
78096efcef | |||
5b0f97959e | |||
8fb1c5f247 | |||
58641ab94f | |||
a6bcb8ffa1 | |||
7b6e6bf396 | |||
45f295fcf8 | |||
3c58fd2102 | |||
abbaded278 | |||
fbe992c901 | |||
dec282d1b7 | |||
4429823629 | |||
80fddb514a | |||
cfe9832c1e | |||
fd942672df | |||
60cb733ca7 | |||
![]() |
7c2ecef56d | ||
![]() |
468f33e606 | ||
![]() |
dbc643aba9 | ||
![]() |
0856705024 | ||
ce608f1b49 | |||
3eaf54eda3 | |||
a496267a9d | |||
1a3ebfbcbd | |||
![]() |
7bcb23c1bd | ||
b28fe39bbb | |||
42c6e6b921 | |||
dba2dc6149 | |||
a0c9b0162b |
53
.gitea/workflows/deploy.yaml
Normal file
53
.gitea/workflows/deploy.yaml
Normal file
@ -0,0 +1,53 @@
|
||||
name: pyzebra CI/CD pipeline
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
env:
|
||||
CONDA: /opt/miniforge3
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: pyzebra
|
||||
steps:
|
||||
- run: $CONDA/bin/conda config --add channels conda-forge
|
||||
- run: $CONDA/bin/conda config --set solver libmamba
|
||||
|
||||
test-env:
|
||||
runs-on: pyzebra
|
||||
needs: prepare
|
||||
if: github.ref == 'refs/heads/main'
|
||||
env:
|
||||
BUILD_DIR: ${{ runner.temp }}/conda_build
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- run: $CONDA/bin/conda build --no-anaconda-upload --output-folder $BUILD_DIR ./conda-recipe
|
||||
- run: $CONDA/bin/conda remove --name test --all --keep-env -y
|
||||
- run: $CONDA/bin/conda install --name test --channel $BUILD_DIR python=3.8 pyzebra -y
|
||||
- run: sudo systemctl restart pyzebra-test.service
|
||||
|
||||
prod-env:
|
||||
runs-on: pyzebra
|
||||
needs: prepare
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
env:
|
||||
BUILD_DIR: ${{ runner.temp }}/conda_build
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- run: $CONDA/bin/conda build --token ${{ secrets.ANACONDA_TOKEN }} --output-folder $BUILD_DIR ./conda-recipe
|
||||
- run: $CONDA/bin/conda remove --name prod --all --keep-env -y
|
||||
- run: $CONDA/bin/conda install --name prod --channel $BUILD_DIR python=3.8 pyzebra -y
|
||||
- run: sudo systemctl restart pyzebra-prod.service
|
||||
|
||||
cleanup:
|
||||
runs-on: pyzebra
|
||||
needs: [test-env, prod-env]
|
||||
if: always()
|
||||
steps:
|
||||
- run: $CONDA/bin/conda build purge-all
|
33
.travis.yml
33
.travis.yml
@ -1,33 +0,0 @@
|
||||
language: python
|
||||
python:
|
||||
- 3.6
|
||||
- 3.7
|
||||
- 3.8
|
||||
|
||||
# Build only tagged commits
|
||||
if: tag IS present
|
||||
|
||||
before_install:
|
||||
- wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh
|
||||
- bash miniconda.sh -b -p $HOME/miniconda
|
||||
- export PATH="$HOME/miniconda/bin:$PATH"
|
||||
- conda config --append channels conda-forge
|
||||
- conda config --set always_yes yes
|
||||
- conda config --set anaconda_upload no
|
||||
|
||||
install:
|
||||
- conda update -q conda
|
||||
- conda install -q python=$TRAVIS_PYTHON_VERSION conda-build anaconda-client
|
||||
|
||||
script:
|
||||
- conda build conda-recipe
|
||||
|
||||
deploy:
|
||||
provider: script
|
||||
script: anaconda -t $ANACONDA_TOKEN upload $HOME/miniconda/conda-bld/**/pyzebra-*.tar.bz2
|
||||
on:
|
||||
branch: master
|
||||
tags: true
|
||||
|
||||
notifications:
|
||||
email: false
|
3
.vscode/launch.json
vendored
3
.vscode/launch.json
vendored
@ -5,9 +5,10 @@
|
||||
"name": "pyzebra",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"program": "${workspaceFolder}/pyzebra/cli.py",
|
||||
"program": "${workspaceFolder}/pyzebra/app/cli.py",
|
||||
"console": "internalConsole",
|
||||
"env": {},
|
||||
"justMyCode": false,
|
||||
},
|
||||
]
|
||||
}
|
||||
|
2
conda-recipe/bld.bat
Normal file
2
conda-recipe/bld.bat
Normal file
@ -0,0 +1,2 @@
|
||||
"%PYTHON%" setup.py install --single-version-externally-managed --record=record.txt
|
||||
if errorlevel 1 exit 1
|
@ -8,27 +8,27 @@ source:
|
||||
path: ..
|
||||
|
||||
build:
|
||||
noarch: python
|
||||
number: 0
|
||||
entry_points:
|
||||
- pyzebra = pyzebra.cli:main
|
||||
- pyzebra = pyzebra.app.cli:main
|
||||
|
||||
requirements:
|
||||
build:
|
||||
- python
|
||||
- python >=3.8
|
||||
- setuptools
|
||||
run:
|
||||
- python
|
||||
- python >=3.8
|
||||
- numpy
|
||||
- scipy
|
||||
- h5py
|
||||
- bokeh
|
||||
- bokeh =2.4
|
||||
- numba
|
||||
- lmfit
|
||||
- uncertainties
|
||||
- lmfit >=1.0.2
|
||||
|
||||
|
||||
about:
|
||||
home: https://github.com/paulscherrerinstitute/pyzebra
|
||||
home: https://gitlab.psi.ch/zebra/pyzebra
|
||||
summary: {{ data['description'] }}
|
||||
license: GNU GPLv3
|
||||
license_file: LICENSE
|
||||
|
19
make_release.py
Normal file → Executable file
19
make_release.py
Normal file → Executable file
@ -3,17 +3,23 @@
|
||||
import argparse
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
|
||||
def main():
|
||||
filepath = "pyzebra/__init__.py"
|
||||
default_branch = "main"
|
||||
branch = subprocess.check_output("git rev-parse --abbrev-ref HEAD", shell=True).decode().strip()
|
||||
if branch != default_branch:
|
||||
print(f"Aborting, not on '{default_branch}' branch.")
|
||||
return
|
||||
|
||||
version_filepath = os.path.join(os.path.basename(os.path.dirname(__file__)), "__init__.py")
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("level", type=str, choices=["patch", "minor", "major"])
|
||||
parser.add_argument("tag_msg", type=str, help="tag message")
|
||||
args = parser.parse_args()
|
||||
|
||||
with open(filepath) as f:
|
||||
with open(version_filepath) as f:
|
||||
file_content = f.read()
|
||||
|
||||
version = re.search(r'__version__ = "(.*?)"', file_content).group(1)
|
||||
@ -31,11 +37,12 @@ def main():
|
||||
|
||||
new_version = f"{major}.{minor}.{patch}"
|
||||
|
||||
with open(filepath, "w") as f:
|
||||
with open(version_filepath, "w") as f:
|
||||
f.write(re.sub(r'__version__ = "(.*?)"', f'__version__ = "{new_version}"', file_content))
|
||||
|
||||
os.system(f"git commit {filepath} -m 'Updating for version {new_version}'")
|
||||
os.system(f"git tag -a {new_version} -m '{args.tag_msg}'")
|
||||
os.system(f"git commit {version_filepath} -m 'Updating for version {new_version}'")
|
||||
os.system(f"git tag -a {new_version} -m 'Release {new_version}'")
|
||||
os.system("git push --follow-tags")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -1,10 +1,9 @@
|
||||
import pyzebra.ccl_dict_operation
|
||||
from pyzebra.anatric import *
|
||||
from pyzebra.ccl_findpeaks import ccl_findpeaks
|
||||
from pyzebra.comm_export import export_comm
|
||||
from pyzebra.fit2 import fitccl
|
||||
from pyzebra.ccl_io import *
|
||||
from pyzebra.ccl_process import *
|
||||
from pyzebra.h5 import *
|
||||
from pyzebra.load_1D import load_1D, parse_1D
|
||||
from pyzebra.sxtal_refgen import *
|
||||
from pyzebra.utils import *
|
||||
from pyzebra.xtal import *
|
||||
|
||||
__version__ = "0.1.1"
|
||||
__version__ = "0.7.11"
|
||||
|
@ -1,13 +1,11 @@
|
||||
import logging
|
||||
import subprocess
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DATA_FACTORY_IMPLEMENTATION = ["trics", "morph", "d10"]
|
||||
|
||||
ANATRIC_PATH = "/afs/psi.ch/project/sinq/rhel7/bin/anatric"
|
||||
DATA_FACTORY_IMPLEMENTATION = [
|
||||
"trics",
|
||||
"morph",
|
||||
"d10",
|
||||
]
|
||||
REFLECTION_PRINTER_FORMATS = [
|
||||
"rafin",
|
||||
"rafinf",
|
||||
@ -21,11 +19,21 @@ REFLECTION_PRINTER_FORMATS = [
|
||||
"oksana",
|
||||
]
|
||||
|
||||
ANATRIC_PATH = "/afs/psi.ch/project/sinq/rhel8/bin/anatric"
|
||||
ALGORITHMS = ["adaptivemaxcog", "adaptivedynamic"]
|
||||
|
||||
|
||||
def anatric(config_file):
|
||||
subprocess.run([ANATRIC_PATH, config_file], check=True)
|
||||
def anatric(config_file, anatric_path=ANATRIC_PATH, cwd=None, log=logger):
|
||||
comp_proc = subprocess.run(
|
||||
[anatric_path, config_file],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
cwd=cwd,
|
||||
check=True,
|
||||
text=True,
|
||||
)
|
||||
log.info(" ".join(comp_proc.args))
|
||||
log.info(comp_proc.stdout)
|
||||
|
||||
|
||||
class AnatricConfig:
|
||||
@ -52,10 +60,13 @@ class AnatricConfig:
|
||||
def save_as(self, filename):
|
||||
self._tree.write(filename)
|
||||
|
||||
def tostring(self):
|
||||
return ET.tostring(self._tree.getroot(), encoding="unicode")
|
||||
|
||||
def _get_attr(self, name, tag, attr):
|
||||
elem = self._tree.find(name).find(tag)
|
||||
if elem is None:
|
||||
return None
|
||||
return ""
|
||||
return elem.attrib[attr]
|
||||
|
||||
def _set_attr(self, name, tag, attr, value):
|
||||
@ -218,7 +229,7 @@ class AnatricConfig:
|
||||
elem = self._tree.find("crystal").find("UB")
|
||||
if elem is not None:
|
||||
return elem.text
|
||||
return None
|
||||
return ""
|
||||
|
||||
@crystal_UB.setter
|
||||
def crystal_UB(self, value):
|
||||
@ -237,12 +248,37 @@ class AnatricConfig:
|
||||
|
||||
@property
|
||||
def dataFactory_dist1(self):
|
||||
return self._tree.find("DataFactory").find("dist1").attrib["value"]
|
||||
elem = self._tree.find("DataFactory").find("dist1")
|
||||
if elem is not None:
|
||||
return elem.attrib["value"]
|
||||
return ""
|
||||
|
||||
@dataFactory_dist1.setter
|
||||
def dataFactory_dist1(self, value):
|
||||
self._tree.find("DataFactory").find("dist1").attrib["value"] = value
|
||||
|
||||
@property
|
||||
def dataFactory_dist2(self):
|
||||
elem = self._tree.find("DataFactory").find("dist2")
|
||||
if elem is not None:
|
||||
return elem.attrib["value"]
|
||||
return ""
|
||||
|
||||
@dataFactory_dist2.setter
|
||||
def dataFactory_dist2(self, value):
|
||||
self._tree.find("DataFactory").find("dist2").attrib["value"] = value
|
||||
|
||||
@property
|
||||
def dataFactory_dist3(self):
|
||||
elem = self._tree.find("DataFactory").find("dist3")
|
||||
if elem is not None:
|
||||
return elem.attrib["value"]
|
||||
return ""
|
||||
|
||||
@dataFactory_dist3.setter
|
||||
def dataFactory_dist3(self, value):
|
||||
self._tree.find("DataFactory").find("dist3").attrib["value"] = value
|
||||
|
||||
@property
|
||||
def reflectionPrinter_format(self):
|
||||
return self._tree.find("ReflectionPrinter").attrib["format"]
|
||||
@ -254,6 +290,14 @@ class AnatricConfig:
|
||||
|
||||
self._tree.find("ReflectionPrinter").attrib["format"] = value
|
||||
|
||||
@property
|
||||
def reflectionPrinter_file(self):
|
||||
return self._tree.find("ReflectionPrinter").attrib["file"]
|
||||
|
||||
@reflectionPrinter_file.setter
|
||||
def reflectionPrinter_file(self, value):
|
||||
self._tree.find("ReflectionPrinter").attrib["file"] = value
|
||||
|
||||
@property
|
||||
def algorithm(self):
|
||||
return self._tree.find("Algorithm").attrib["implementation"]
|
||||
@ -270,7 +314,7 @@ class AnatricConfig:
|
||||
def _get_alg_attr(self, alg, tag, attr):
|
||||
param_elem = self._alg_elems[alg].find(tag)
|
||||
if param_elem is None:
|
||||
return None
|
||||
return ""
|
||||
return param_elem.attrib[attr]
|
||||
|
||||
def _set_alg_attr(self, alg, tag, attr, value):
|
||||
|
@ -0,0 +1,4 @@
|
||||
from pyzebra.app.download_files import DownloadFiles
|
||||
from pyzebra.app.fit_controls import FitControls
|
||||
from pyzebra.app.input_controls import InputControls
|
||||
from pyzebra.app.plot_hkl import PlotHKL
|
@ -1,51 +0,0 @@
|
||||
import argparse
|
||||
import logging
|
||||
import sys
|
||||
from io import StringIO
|
||||
|
||||
from bokeh.io import curdoc
|
||||
from bokeh.layouts import column, row
|
||||
from bokeh.models import Tabs, TextAreaInput
|
||||
|
||||
import panel_ccl_integrate
|
||||
import panel_hdf_anatric
|
||||
import panel_hdf_viewer
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
prog="pyzebra", formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
doc = curdoc()
|
||||
doc.title = "pyzebra"
|
||||
|
||||
sys.stdout = StringIO()
|
||||
stdout_textareainput = TextAreaInput(title="print output:", height=150)
|
||||
|
||||
bokeh_stream = StringIO()
|
||||
bokeh_handler = logging.StreamHandler(bokeh_stream)
|
||||
bokeh_handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
|
||||
bokeh_logger = logging.getLogger('bokeh')
|
||||
bokeh_logger.addHandler(bokeh_handler)
|
||||
bokeh_log_textareainput = TextAreaInput(title="server output:", height=150)
|
||||
|
||||
# Final layout
|
||||
tab_hdf_viewer = panel_hdf_viewer.create()
|
||||
tab_hdf_anatric = panel_hdf_anatric.create()
|
||||
tab_ccl_integrate = panel_ccl_integrate.create()
|
||||
|
||||
doc.add_root(
|
||||
column(
|
||||
Tabs(tabs=[tab_hdf_viewer, tab_hdf_anatric, tab_ccl_integrate]),
|
||||
row(stdout_textareainput, bokeh_log_textareainput, sizing_mode="scale_both"),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def update_stdout():
|
||||
stdout_textareainput.value = sys.stdout.getvalue()
|
||||
bokeh_log_textareainput.value = bokeh_stream.getvalue()
|
||||
|
||||
|
||||
doc.add_periodic_callback(update_stdout, 1000)
|
12
pyzebra/app/cli.py
Normal file
12
pyzebra/app/cli.py
Normal file
@ -0,0 +1,12 @@
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
app_path = os.path.dirname(os.path.abspath(__file__))
|
||||
subprocess.run(["bokeh", "serve", app_path, *sys.argv[1:]], check=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
45
pyzebra/app/download_files.py
Normal file
45
pyzebra/app/download_files.py
Normal file
@ -0,0 +1,45 @@
|
||||
from bokeh.models import Button, ColumnDataSource, CustomJS
|
||||
|
||||
js_code = """
|
||||
let j = 0;
|
||||
for (let i = 0; i < source.data['name'].length; i++) {
|
||||
if (source.data['content'][i] === "") continue;
|
||||
|
||||
setTimeout(function() {
|
||||
const blob = new Blob([source.data['content'][i]], {type: 'text/plain'})
|
||||
const link = document.createElement('a');
|
||||
document.body.appendChild(link);
|
||||
const url = window.URL.createObjectURL(blob);
|
||||
link.href = url;
|
||||
link.download = source.data['name'][i] + source.data['ext'][i];
|
||||
link.click();
|
||||
window.URL.revokeObjectURL(url);
|
||||
document.body.removeChild(link);
|
||||
}, 100 * j)
|
||||
|
||||
j++;
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
class DownloadFiles:
|
||||
def __init__(self, n_files):
|
||||
self.n_files = n_files
|
||||
source = ColumnDataSource(
|
||||
data=dict(content=[""] * n_files, name=[""] * n_files, ext=[""] * n_files)
|
||||
)
|
||||
self._source = source
|
||||
|
||||
label = "Download File" if n_files == 1 else "Download Files"
|
||||
button = Button(label=label, button_type="success", width=200)
|
||||
button.js_on_click(CustomJS(args={"source": source}, code=js_code))
|
||||
self.button = button
|
||||
|
||||
def set_contents(self, contents):
|
||||
self._source.data.update(content=contents)
|
||||
|
||||
def set_names(self, names):
|
||||
self._source.data.update(name=names)
|
||||
|
||||
def set_extensions(self, extensions):
|
||||
self._source.data.update(ext=extensions)
|
175
pyzebra/app/fit_controls.py
Normal file
175
pyzebra/app/fit_controls.py
Normal file
@ -0,0 +1,175 @@
|
||||
import types
|
||||
|
||||
from bokeh.io import curdoc
|
||||
from bokeh.models import (
|
||||
Button,
|
||||
CellEditor,
|
||||
CheckboxEditor,
|
||||
CheckboxGroup,
|
||||
ColumnDataSource,
|
||||
DataTable,
|
||||
Dropdown,
|
||||
MultiSelect,
|
||||
NumberEditor,
|
||||
RadioGroup,
|
||||
Spinner,
|
||||
TableColumn,
|
||||
TextAreaInput,
|
||||
)
|
||||
|
||||
import pyzebra
|
||||
|
||||
|
||||
def _params_factory(function):
|
||||
if function == "linear":
|
||||
param_names = ["slope", "intercept"]
|
||||
elif function == "gaussian":
|
||||
param_names = ["amplitude", "center", "sigma"]
|
||||
elif function == "voigt":
|
||||
param_names = ["amplitude", "center", "sigma", "gamma"]
|
||||
elif function == "pvoigt":
|
||||
param_names = ["amplitude", "center", "sigma", "fraction"]
|
||||
elif function == "pseudovoigt1":
|
||||
param_names = ["amplitude", "center", "g_sigma", "l_sigma", "fraction"]
|
||||
else:
|
||||
raise ValueError("Unknown fit function")
|
||||
|
||||
n = len(param_names)
|
||||
params = dict(
|
||||
param=param_names, value=[None] * n, vary=[True] * n, min=[None] * n, max=[None] * n
|
||||
)
|
||||
|
||||
if function == "linear":
|
||||
params["value"] = [0, 1]
|
||||
params["vary"] = [False, True]
|
||||
params["min"] = [None, 0]
|
||||
|
||||
elif function == "gaussian":
|
||||
params["min"] = [0, None, None]
|
||||
|
||||
return params
|
||||
|
||||
|
||||
class FitControls:
|
||||
def __init__(self):
|
||||
self.log = curdoc().logger
|
||||
self.params = {}
|
||||
|
||||
def add_function_button_callback(click):
|
||||
# bokeh requires (str, str) for MultiSelect options
|
||||
new_tag = f"{click.item}-{function_select.tags[0]}"
|
||||
function_select.options.append((new_tag, click.item))
|
||||
self.params[new_tag] = _params_factory(click.item)
|
||||
function_select.tags[0] += 1
|
||||
|
||||
add_function_button = Dropdown(
|
||||
label="Add fit function",
|
||||
menu=[
|
||||
("Linear", "linear"),
|
||||
("Gaussian", "gaussian"),
|
||||
("Voigt", "voigt"),
|
||||
("Pseudo Voigt", "pvoigt"),
|
||||
# ("Pseudo Voigt1", "pseudovoigt1"),
|
||||
],
|
||||
width=145,
|
||||
)
|
||||
add_function_button.on_click(add_function_button_callback)
|
||||
self.add_function_button = add_function_button
|
||||
|
||||
def function_list_callback(_attr, old, new):
|
||||
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
|
||||
if len(new) > 1:
|
||||
# drop selection to the previous one
|
||||
function_select.value = old
|
||||
return
|
||||
|
||||
if len(old) > 1:
|
||||
# skip unnecessary update caused by selection drop
|
||||
return
|
||||
|
||||
if new:
|
||||
params_table_source.data.update(self.params[new[0]])
|
||||
else:
|
||||
params_table_source.data.update(dict(param=[], value=[], vary=[], min=[], max=[]))
|
||||
|
||||
function_select = MultiSelect(options=[], height=120, width=145)
|
||||
function_select.tags = [0]
|
||||
function_select.on_change("value", function_list_callback)
|
||||
self.function_select = function_select
|
||||
|
||||
def remove_function_button_callback():
|
||||
if function_select.value:
|
||||
sel_tag = function_select.value[0]
|
||||
del self.params[sel_tag]
|
||||
for elem in function_select.options:
|
||||
if elem[0] == sel_tag:
|
||||
function_select.options.remove(elem)
|
||||
break
|
||||
|
||||
function_select.value = []
|
||||
|
||||
remove_function_button = Button(label="Remove fit function", width=145)
|
||||
remove_function_button.on_click(remove_function_button_callback)
|
||||
self.remove_function_button = remove_function_button
|
||||
|
||||
params_table_source = ColumnDataSource(dict(param=[], value=[], vary=[], min=[], max=[]))
|
||||
self.params_table = DataTable(
|
||||
source=params_table_source,
|
||||
columns=[
|
||||
TableColumn(field="param", title="Parameter", editor=CellEditor()),
|
||||
TableColumn(field="value", title="Value", editor=NumberEditor()),
|
||||
TableColumn(field="vary", title="Vary", editor=CheckboxEditor()),
|
||||
TableColumn(field="min", title="Min", editor=NumberEditor()),
|
||||
TableColumn(field="max", title="Max", editor=NumberEditor()),
|
||||
],
|
||||
height=200,
|
||||
width=350,
|
||||
index_position=None,
|
||||
editable=True,
|
||||
auto_edit=True,
|
||||
)
|
||||
|
||||
# start with `background` and `gauss` fit functions added
|
||||
add_function_button_callback(types.SimpleNamespace(item="linear"))
|
||||
add_function_button_callback(types.SimpleNamespace(item="gaussian"))
|
||||
function_select.value = ["gaussian-1"] # put selection on gauss
|
||||
|
||||
self.from_spinner = Spinner(title="Fit from:", width=145)
|
||||
self.to_spinner = Spinner(title="to:", width=145)
|
||||
|
||||
self.area_method_radiogroup = RadioGroup(labels=["Function", "Area"], active=0, width=145)
|
||||
|
||||
self.lorentz_checkbox = CheckboxGroup(
|
||||
labels=["Lorentz Correction"], width=145, margin=(13, 5, 5, 5)
|
||||
)
|
||||
|
||||
self.result_textarea = TextAreaInput(title="Fit results:", width=750, height=200)
|
||||
|
||||
def _process_scan(self, scan):
|
||||
pyzebra.fit_scan(
|
||||
scan,
|
||||
self.params,
|
||||
fit_from=self.from_spinner.value,
|
||||
fit_to=self.to_spinner.value,
|
||||
log=self.log,
|
||||
)
|
||||
pyzebra.get_area(
|
||||
scan,
|
||||
area_method=pyzebra.AREA_METHODS[self.area_method_radiogroup.active],
|
||||
lorentz=self.lorentz_checkbox.active,
|
||||
)
|
||||
|
||||
def fit_scan(self, scan):
|
||||
self._process_scan(scan)
|
||||
|
||||
def fit_dataset(self, dataset):
|
||||
for scan in dataset:
|
||||
if scan["export"]:
|
||||
self._process_scan(scan)
|
||||
|
||||
def update_result_textarea(self, scan):
|
||||
fit = scan.get("fit")
|
||||
if fit is None:
|
||||
self.result_textarea.value = ""
|
||||
else:
|
||||
self.result_textarea.value = fit.fit_report()
|
159
pyzebra/app/input_controls.py
Normal file
159
pyzebra/app/input_controls.py
Normal file
@ -0,0 +1,159 @@
|
||||
import base64
|
||||
import io
|
||||
import os
|
||||
|
||||
from bokeh.io import curdoc
|
||||
from bokeh.models import Button, FileInput, MultiSelect, Spinner
|
||||
|
||||
import pyzebra
|
||||
|
||||
|
||||
class InputControls:
|
||||
def __init__(self, dataset, dlfiles, on_file_open=lambda: None, on_monitor_change=lambda: None):
|
||||
doc = curdoc()
|
||||
log = doc.logger
|
||||
|
||||
def filelist_select_update_for_proposal():
|
||||
proposal_path = proposal_textinput.name
|
||||
if proposal_path:
|
||||
file_list = []
|
||||
for file in os.listdir(proposal_path):
|
||||
if file.endswith((".ccl", ".dat")):
|
||||
file_list.append((os.path.join(proposal_path, file), file))
|
||||
filelist_select.options = file_list
|
||||
open_button.disabled = False
|
||||
append_button.disabled = False
|
||||
else:
|
||||
filelist_select.options = []
|
||||
open_button.disabled = True
|
||||
append_button.disabled = True
|
||||
|
||||
doc.add_periodic_callback(filelist_select_update_for_proposal, 5000)
|
||||
|
||||
def proposal_textinput_callback(_attr, _old, _new):
|
||||
filelist_select_update_for_proposal()
|
||||
|
||||
proposal_textinput = doc.proposal_textinput
|
||||
proposal_textinput.on_change("name", proposal_textinput_callback)
|
||||
|
||||
filelist_select = MultiSelect(title="Available .ccl/.dat files:", width=210, height=250)
|
||||
self.filelist_select = filelist_select
|
||||
|
||||
def open_button_callback():
|
||||
new_data = []
|
||||
for f_path in self.filelist_select.value:
|
||||
with open(f_path) as file:
|
||||
f_name = os.path.basename(f_path)
|
||||
base, ext = os.path.splitext(f_name)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext, log=log)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
continue
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
|
||||
if not new_data: # first file
|
||||
new_data = file_data
|
||||
pyzebra.merge_duplicates(new_data, log=log)
|
||||
dlfiles.set_names([base] * dlfiles.n_files)
|
||||
else:
|
||||
pyzebra.merge_datasets(new_data, file_data, log=log)
|
||||
|
||||
if new_data:
|
||||
dataset.clear()
|
||||
dataset.extend(new_data)
|
||||
on_file_open()
|
||||
append_upload_button.disabled = False
|
||||
|
||||
open_button = Button(label="Open New", width=100, disabled=True)
|
||||
open_button.on_click(open_button_callback)
|
||||
self.open_button = open_button
|
||||
|
||||
def append_button_callback():
|
||||
file_data = []
|
||||
for f_path in self.filelist_select.value:
|
||||
with open(f_path) as file:
|
||||
f_name = os.path.basename(f_path)
|
||||
_, ext = os.path.splitext(f_name)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext, log=log)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
continue
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
pyzebra.merge_datasets(dataset, file_data, log=log)
|
||||
|
||||
if file_data:
|
||||
on_file_open()
|
||||
|
||||
append_button = Button(label="Append", width=100, disabled=True)
|
||||
append_button.on_click(append_button_callback)
|
||||
self.append_button = append_button
|
||||
|
||||
def upload_button_callback(_attr, _old, _new):
|
||||
new_data = []
|
||||
for f_str, f_name in zip(upload_button.value, upload_button.filename):
|
||||
with io.StringIO(base64.b64decode(f_str).decode()) as file:
|
||||
base, ext = os.path.splitext(f_name)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext, log=log)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
continue
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
|
||||
if not new_data: # first file
|
||||
new_data = file_data
|
||||
pyzebra.merge_duplicates(new_data, log=log)
|
||||
dlfiles.set_names([base] * dlfiles.n_files)
|
||||
else:
|
||||
pyzebra.merge_datasets(new_data, file_data, log=log)
|
||||
|
||||
if new_data:
|
||||
dataset.clear()
|
||||
dataset.extend(new_data)
|
||||
on_file_open()
|
||||
append_upload_button.disabled = False
|
||||
|
||||
upload_button = FileInput(accept=".ccl,.dat", multiple=True, width=200)
|
||||
# for on_change("value", ...) or on_change("filename", ...),
|
||||
# see https://github.com/bokeh/bokeh/issues/11461
|
||||
upload_button.on_change("filename", upload_button_callback)
|
||||
self.upload_button = upload_button
|
||||
|
||||
def append_upload_button_callback(_attr, _old, _new):
|
||||
file_data = []
|
||||
for f_str, f_name in zip(append_upload_button.value, append_upload_button.filename):
|
||||
with io.StringIO(base64.b64decode(f_str).decode()) as file:
|
||||
_, ext = os.path.splitext(f_name)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext, log=log)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
continue
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
pyzebra.merge_datasets(dataset, file_data, log=log)
|
||||
|
||||
if file_data:
|
||||
on_file_open()
|
||||
|
||||
append_upload_button = FileInput(
|
||||
accept=".ccl,.dat", multiple=True, width=200, disabled=True
|
||||
)
|
||||
# for on_change("value", ...) or on_change("filename", ...),
|
||||
# see https://github.com/bokeh/bokeh/issues/11461
|
||||
append_upload_button.on_change("filename", append_upload_button_callback)
|
||||
self.append_upload_button = append_upload_button
|
||||
|
||||
def monitor_spinner_callback(_attr, _old, new):
|
||||
if dataset:
|
||||
pyzebra.normalize_dataset(dataset, new)
|
||||
on_monitor_change()
|
||||
|
||||
monitor_spinner = Spinner(title="Monitor:", mode="int", value=100_000, low=1, width=145)
|
||||
monitor_spinner.on_change("value", monitor_spinner_callback)
|
||||
self.monitor_spinner = monitor_spinner
|
112
pyzebra/app/main.py
Normal file
112
pyzebra/app/main.py
Normal file
@ -0,0 +1,112 @@
|
||||
import argparse
|
||||
import logging
|
||||
from io import StringIO
|
||||
|
||||
from bokeh.io import curdoc
|
||||
from bokeh.layouts import column, row
|
||||
from bokeh.models import Button, Panel, Tabs, TextAreaInput, TextInput
|
||||
|
||||
import pyzebra
|
||||
from pyzebra.app import (
|
||||
panel_ccl_compare,
|
||||
panel_ccl_integrate,
|
||||
panel_ccl_prepare,
|
||||
panel_hdf_anatric,
|
||||
panel_hdf_param_study,
|
||||
panel_hdf_viewer,
|
||||
panel_param_study,
|
||||
panel_plot_data,
|
||||
panel_spind,
|
||||
)
|
||||
|
||||
doc = curdoc()
|
||||
doc.title = "pyzebra"
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
"--anatric-path", type=str, default=pyzebra.ANATRIC_PATH, help="path to anatric executable"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--sxtal-refgen-path",
|
||||
type=str,
|
||||
default=pyzebra.SXTAL_REFGEN_PATH,
|
||||
help="path to Sxtal_Refgen executable",
|
||||
)
|
||||
|
||||
parser.add_argument("--spind-path", type=str, default=None, help="path to spind scripts folder")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
doc.anatric_path = args.anatric_path
|
||||
doc.spind_path = args.spind_path
|
||||
doc.sxtal_refgen_path = args.sxtal_refgen_path
|
||||
|
||||
stream = StringIO()
|
||||
handler = logging.StreamHandler(stream)
|
||||
handler.setFormatter(
|
||||
logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
||||
)
|
||||
logger = logging.getLogger(str(id(doc)))
|
||||
logger.setLevel(logging.INFO)
|
||||
logger.addHandler(handler)
|
||||
doc.logger = logger
|
||||
|
||||
log_textareainput = TextAreaInput(title="Logging output:")
|
||||
|
||||
|
||||
def proposal_textinput_callback(_attr, _old, _new):
|
||||
apply_button.disabled = False
|
||||
|
||||
|
||||
proposal_textinput = TextInput(title="Proposal number:", name="")
|
||||
proposal_textinput.on_change("value_input", proposal_textinput_callback)
|
||||
doc.proposal_textinput = proposal_textinput
|
||||
|
||||
|
||||
def apply_button_callback():
|
||||
proposal = proposal_textinput.value.strip()
|
||||
if proposal:
|
||||
try:
|
||||
proposal_path = pyzebra.find_proposal_path(proposal)
|
||||
except ValueError as e:
|
||||
logger.exception(e)
|
||||
return
|
||||
apply_button.disabled = True
|
||||
else:
|
||||
proposal_path = ""
|
||||
|
||||
proposal_textinput.name = proposal_path
|
||||
|
||||
|
||||
apply_button = Button(label="Apply", button_type="primary")
|
||||
apply_button.on_click(apply_button_callback)
|
||||
|
||||
# Final layout
|
||||
doc.add_root(
|
||||
column(
|
||||
Tabs(
|
||||
tabs=[
|
||||
Panel(child=column(proposal_textinput, apply_button), title="user config"),
|
||||
panel_hdf_viewer.create(),
|
||||
panel_hdf_anatric.create(),
|
||||
panel_ccl_prepare.create(),
|
||||
panel_plot_data.create(),
|
||||
panel_ccl_integrate.create(),
|
||||
panel_ccl_compare.create(),
|
||||
panel_param_study.create(),
|
||||
panel_hdf_param_study.create(),
|
||||
panel_spind.create(),
|
||||
]
|
||||
),
|
||||
row(log_textareainput, sizing_mode="scale_both"),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def update_stdout():
|
||||
log_textareainput.value = stream.getvalue()
|
||||
|
||||
|
||||
doc.add_periodic_callback(update_stdout, 1000)
|
538
pyzebra/app/panel_ccl_compare.py
Normal file
538
pyzebra/app/panel_ccl_compare.py
Normal file
@ -0,0 +1,538 @@
|
||||
import base64
|
||||
import io
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
import numpy as np
|
||||
from bokeh.io import curdoc
|
||||
from bokeh.layouts import column, row
|
||||
from bokeh.models import (
|
||||
Button,
|
||||
CellEditor,
|
||||
CheckboxEditor,
|
||||
ColumnDataSource,
|
||||
DataTable,
|
||||
Div,
|
||||
FileInput,
|
||||
MultiSelect,
|
||||
Panel,
|
||||
RadioGroup,
|
||||
Select,
|
||||
Spacer,
|
||||
Span,
|
||||
Spinner,
|
||||
TableColumn,
|
||||
TextAreaInput,
|
||||
Whisker,
|
||||
)
|
||||
from bokeh.plotting import figure
|
||||
|
||||
import pyzebra
|
||||
from pyzebra import EXPORT_TARGETS, app
|
||||
|
||||
|
||||
def create():
|
||||
doc = curdoc()
|
||||
log = doc.logger
|
||||
dataset1 = []
|
||||
dataset2 = []
|
||||
app_dlfiles = app.DownloadFiles(n_files=2)
|
||||
|
||||
def file_select_update_for_proposal():
|
||||
proposal_path = proposal_textinput.name
|
||||
if proposal_path:
|
||||
file_list = []
|
||||
for file in os.listdir(proposal_path):
|
||||
if file.endswith((".ccl")):
|
||||
file_list.append((os.path.join(proposal_path, file), file))
|
||||
file_select.options = file_list
|
||||
file_open_button.disabled = False
|
||||
else:
|
||||
file_select.options = []
|
||||
file_open_button.disabled = True
|
||||
|
||||
doc.add_periodic_callback(file_select_update_for_proposal, 5000)
|
||||
|
||||
def proposal_textinput_callback(_attr, _old, _new):
|
||||
file_select_update_for_proposal()
|
||||
|
||||
proposal_textinput = doc.proposal_textinput
|
||||
proposal_textinput.on_change("name", proposal_textinput_callback)
|
||||
|
||||
def _init_datatable():
|
||||
# dataset2 should have the same metadata as dataset1
|
||||
scan_list = [s["idx"] for s in dataset1]
|
||||
hkl = [f'{s["h"]} {s["k"]} {s["l"]}' for s in dataset1]
|
||||
export = [s["export"] for s in dataset1]
|
||||
|
||||
twotheta = [np.median(s["twotheta"]) if "twotheta" in s else None for s in dataset1]
|
||||
gamma = [np.median(s["gamma"]) if "gamma" in s else None for s in dataset1]
|
||||
omega = [np.median(s["omega"]) if "omega" in s else None for s in dataset1]
|
||||
chi = [np.median(s["chi"]) if "chi" in s else None for s in dataset1]
|
||||
phi = [np.median(s["phi"]) if "phi" in s else None for s in dataset1]
|
||||
nu = [np.median(s["nu"]) if "nu" in s else None for s in dataset1]
|
||||
|
||||
scan_table_source.data.update(
|
||||
scan=scan_list,
|
||||
hkl=hkl,
|
||||
fit=[0] * len(scan_list),
|
||||
export=export,
|
||||
twotheta=twotheta,
|
||||
gamma=gamma,
|
||||
omega=omega,
|
||||
chi=chi,
|
||||
phi=phi,
|
||||
nu=nu,
|
||||
)
|
||||
scan_table_source.selected.indices = []
|
||||
scan_table_source.selected.indices = [0]
|
||||
|
||||
merge_options = [(str(i), f"{i} ({idx})") for i, idx in enumerate(scan_list)]
|
||||
merge_from_select.options = merge_options
|
||||
merge_from_select.value = merge_options[0][0]
|
||||
|
||||
file_select = MultiSelect(title="Select 2 .ccl files:", width=210, height=250)
|
||||
|
||||
def file_open_button_callback():
|
||||
if len(file_select.value) != 2:
|
||||
log.warning("Select exactly 2 .ccl files.")
|
||||
return
|
||||
|
||||
new_data1 = []
|
||||
new_data2 = []
|
||||
for ind, f_path in enumerate(file_select.value):
|
||||
with open(f_path) as file:
|
||||
f_name = os.path.basename(f_path)
|
||||
base, ext = os.path.splitext(f_name)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext, log=log)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
pyzebra.merge_duplicates(file_data, log=log)
|
||||
|
||||
if ind == 0:
|
||||
app_dlfiles.set_names([base, base])
|
||||
new_data1 = file_data
|
||||
else: # ind = 1
|
||||
new_data2 = file_data
|
||||
|
||||
# ignore extra scans at the end of the longest of the two files
|
||||
min_len = min(len(new_data1), len(new_data2))
|
||||
new_data1 = new_data1[:min_len]
|
||||
new_data2 = new_data2[:min_len]
|
||||
|
||||
nonlocal dataset1, dataset2
|
||||
dataset1 = new_data1
|
||||
dataset2 = new_data2
|
||||
_init_datatable()
|
||||
|
||||
file_open_button = Button(label="Open New", width=100, disabled=True)
|
||||
file_open_button.on_click(file_open_button_callback)
|
||||
|
||||
def upload_button_callback(_attr, _old, _new):
|
||||
if len(upload_button.filename) != 2:
|
||||
log.warning("Upload exactly 2 .ccl files.")
|
||||
return
|
||||
|
||||
new_data1 = []
|
||||
new_data2 = []
|
||||
for ind, (f_str, f_name) in enumerate(zip(upload_button.value, upload_button.filename)):
|
||||
with io.StringIO(base64.b64decode(f_str).decode()) as file:
|
||||
base, ext = os.path.splitext(f_name)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext, log=log)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return
|
||||
|
||||
pyzebra.normalize_dataset(file_data, monitor_spinner.value)
|
||||
pyzebra.merge_duplicates(file_data, log=log)
|
||||
|
||||
if ind == 0:
|
||||
app_dlfiles.set_names([base, base])
|
||||
new_data1 = file_data
|
||||
else: # ind = 1
|
||||
new_data2 = file_data
|
||||
|
||||
# ignore extra scans at the end of the longest of the two files
|
||||
min_len = min(len(new_data1), len(new_data2))
|
||||
new_data1 = new_data1[:min_len]
|
||||
new_data2 = new_data2[:min_len]
|
||||
|
||||
nonlocal dataset1, dataset2
|
||||
dataset1 = new_data1
|
||||
dataset2 = new_data2
|
||||
_init_datatable()
|
||||
|
||||
upload_div = Div(text="or upload 2 .ccl files:", margin=(5, 5, 0, 5))
|
||||
upload_button = FileInput(accept=".ccl", multiple=True, width=200)
|
||||
# for on_change("value", ...) or on_change("filename", ...),
|
||||
# see https://github.com/bokeh/bokeh/issues/11461
|
||||
upload_button.on_change("filename", upload_button_callback)
|
||||
|
||||
def monitor_spinner_callback(_attr, old, new):
|
||||
if dataset1 and dataset2:
|
||||
pyzebra.normalize_dataset(dataset1, new)
|
||||
pyzebra.normalize_dataset(dataset2, new)
|
||||
_update_plot()
|
||||
|
||||
monitor_spinner = Spinner(title="Monitor:", mode="int", value=100_000, low=1, width=145)
|
||||
monitor_spinner.on_change("value", monitor_spinner_callback)
|
||||
|
||||
def _update_table():
|
||||
fit_ok = [(1 if "fit" in scan else 0) for scan in dataset1]
|
||||
export = [scan["export"] for scan in dataset1]
|
||||
scan_table_source.data.update(fit=fit_ok, export=export)
|
||||
|
||||
def _update_plot():
|
||||
scatter_sources = [scatter1_source, scatter2_source]
|
||||
fit_sources = [fit1_source, fit2_source]
|
||||
bkg_sources = [bkg1_source, bkg2_source]
|
||||
peak_sources = [peak1_source, peak2_source]
|
||||
fit_output = ""
|
||||
|
||||
for ind, scan in enumerate(_get_selected_scan()):
|
||||
scatter_source = scatter_sources[ind]
|
||||
fit_source = fit_sources[ind]
|
||||
bkg_source = bkg_sources[ind]
|
||||
peak_source = peak_sources[ind]
|
||||
scan_motor = scan["scan_motor"]
|
||||
|
||||
y = scan["counts"]
|
||||
y_err = scan["counts_err"]
|
||||
x = scan[scan_motor]
|
||||
|
||||
plot.axis[0].axis_label = scan_motor
|
||||
scatter_source.data.update(x=x, y=y, y_upper=y + y_err, y_lower=y - y_err)
|
||||
|
||||
fit = scan.get("fit")
|
||||
if fit is not None:
|
||||
x_fit = np.linspace(x[0], x[-1], 100)
|
||||
fit_source.data.update(x=x_fit, y=fit.eval(x=x_fit))
|
||||
|
||||
x_bkg = []
|
||||
y_bkg = []
|
||||
xs_peak = []
|
||||
ys_peak = []
|
||||
comps = fit.eval_components(x=x_fit)
|
||||
for i, model in enumerate(app_fitctrl.params):
|
||||
if "linear" in model:
|
||||
x_bkg = x_fit
|
||||
y_bkg = comps[f"f{i}_"]
|
||||
|
||||
elif any(val in model for val in ("gaussian", "voigt", "pvoigt")):
|
||||
xs_peak.append(x_fit)
|
||||
ys_peak.append(comps[f"f{i}_"])
|
||||
|
||||
bkg_source.data.update(x=x_bkg, y=y_bkg)
|
||||
peak_source.data.update(xs=xs_peak, ys=ys_peak)
|
||||
if fit_output:
|
||||
fit_output = fit_output + "\n\n"
|
||||
fit_output = fit_output + fit.fit_report()
|
||||
|
||||
else:
|
||||
fit_source.data.update(x=[], y=[])
|
||||
bkg_source.data.update(x=[], y=[])
|
||||
peak_source.data.update(xs=[], ys=[])
|
||||
|
||||
app_fitctrl.result_textarea.value = fit_output
|
||||
|
||||
# Main plot
|
||||
plot = figure(
|
||||
x_axis_label="Scan motor",
|
||||
y_axis_label="Counts",
|
||||
height=470,
|
||||
width=700,
|
||||
tools="pan,wheel_zoom,reset",
|
||||
)
|
||||
|
||||
scatter1_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
|
||||
plot.circle(
|
||||
source=scatter1_source,
|
||||
line_color="steelblue",
|
||||
fill_color="steelblue",
|
||||
legend_label="data 1",
|
||||
)
|
||||
plot.add_layout(Whisker(source=scatter1_source, base="x", upper="y_upper", lower="y_lower"))
|
||||
|
||||
scatter2_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
|
||||
plot.circle(
|
||||
source=scatter2_source,
|
||||
line_color="firebrick",
|
||||
fill_color="firebrick",
|
||||
legend_label="data 2",
|
||||
)
|
||||
plot.add_layout(Whisker(source=scatter2_source, base="x", upper="y_upper", lower="y_lower"))
|
||||
|
||||
fit1_source = ColumnDataSource(dict(x=[0], y=[0]))
|
||||
plot.line(source=fit1_source, legend_label="best fit 1")
|
||||
|
||||
fit2_source = ColumnDataSource(dict(x=[0], y=[0]))
|
||||
plot.line(source=fit2_source, line_color="firebrick", legend_label="best fit 2")
|
||||
|
||||
bkg1_source = ColumnDataSource(dict(x=[0], y=[0]))
|
||||
plot.line(
|
||||
source=bkg1_source, line_color="steelblue", line_dash="dashed", legend_label="linear 1"
|
||||
)
|
||||
|
||||
bkg2_source = ColumnDataSource(dict(x=[0], y=[0]))
|
||||
plot.line(
|
||||
source=bkg2_source, line_color="firebrick", line_dash="dashed", legend_label="linear 2"
|
||||
)
|
||||
|
||||
peak1_source = ColumnDataSource(dict(xs=[[0]], ys=[[0]]))
|
||||
plot.multi_line(
|
||||
source=peak1_source, line_color="steelblue", line_dash="dashed", legend_label="peak 1"
|
||||
)
|
||||
|
||||
peak2_source = ColumnDataSource(dict(xs=[[0]], ys=[[0]]))
|
||||
plot.multi_line(
|
||||
source=peak2_source, line_color="firebrick", line_dash="dashed", legend_label="peak 2"
|
||||
)
|
||||
|
||||
fit_from_span = Span(location=None, dimension="height", line_dash="dashed")
|
||||
plot.add_layout(fit_from_span)
|
||||
|
||||
fit_to_span = Span(location=None, dimension="height", line_dash="dashed")
|
||||
plot.add_layout(fit_to_span)
|
||||
|
||||
plot.y_range.only_visible = True
|
||||
plot.toolbar.logo = None
|
||||
plot.legend.click_policy = "hide"
|
||||
|
||||
# Scan select
|
||||
def scan_table_select_callback(_attr, old, new):
|
||||
if not new:
|
||||
# skip empty selections
|
||||
return
|
||||
|
||||
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
|
||||
if len(new) > 1:
|
||||
# drop selection to the previous one
|
||||
scan_table_source.selected.indices = old
|
||||
return
|
||||
|
||||
if len(old) > 1:
|
||||
# skip unnecessary update caused by selection drop
|
||||
return
|
||||
|
||||
_update_plot()
|
||||
|
||||
def scan_table_source_callback(_attr, _old, new):
|
||||
# unfortunately, we don't know if the change comes from data update or user input
|
||||
# also `old` and `new` are the same for non-scalars
|
||||
for scan1, scan2, export in zip(dataset1, dataset2, new["export"]):
|
||||
scan1["export"] = export
|
||||
scan2["export"] = export
|
||||
_update_preview()
|
||||
|
||||
scan_table_source = ColumnDataSource(
|
||||
dict(
|
||||
scan=[],
|
||||
hkl=[],
|
||||
fit=[],
|
||||
export=[],
|
||||
twotheta=[],
|
||||
gamma=[],
|
||||
omega=[],
|
||||
chi=[],
|
||||
phi=[],
|
||||
nu=[],
|
||||
)
|
||||
)
|
||||
scan_table_source.on_change("data", scan_table_source_callback)
|
||||
scan_table_source.selected.on_change("indices", scan_table_select_callback)
|
||||
|
||||
scan_table = DataTable(
|
||||
source=scan_table_source,
|
||||
columns=[
|
||||
TableColumn(field="scan", title="Scan", editor=CellEditor(), width=50),
|
||||
TableColumn(field="hkl", title="hkl", editor=CellEditor(), width=100),
|
||||
TableColumn(field="fit", title="Fit", editor=CellEditor(), width=50),
|
||||
TableColumn(field="export", title="Export", editor=CheckboxEditor(), width=50),
|
||||
TableColumn(field="twotheta", title="2theta", editor=CellEditor(), width=50),
|
||||
TableColumn(field="gamma", title="gamma", editor=CellEditor(), width=50),
|
||||
TableColumn(field="omega", title="omega", editor=CellEditor(), width=50),
|
||||
TableColumn(field="chi", title="chi", editor=CellEditor(), width=50),
|
||||
TableColumn(field="phi", title="phi", editor=CellEditor(), width=50),
|
||||
TableColumn(field="nu", title="nu", editor=CellEditor(), width=50),
|
||||
],
|
||||
width=310, # +60 because of the index column, but excluding twotheta onwards
|
||||
height=350,
|
||||
autosize_mode="none",
|
||||
editable=True,
|
||||
)
|
||||
|
||||
def _get_selected_scan():
|
||||
ind = scan_table_source.selected.indices[0]
|
||||
return dataset1[ind], dataset2[ind]
|
||||
|
||||
merge_from_select = Select(title="scan:", width=145)
|
||||
|
||||
def merge_button_callback():
|
||||
scan_into1, scan_into2 = _get_selected_scan()
|
||||
scan_from1 = dataset1[int(merge_from_select.value)]
|
||||
scan_from2 = dataset2[int(merge_from_select.value)]
|
||||
|
||||
if scan_into1 is scan_from1:
|
||||
log.warning("Selected scans for merging are identical")
|
||||
return
|
||||
|
||||
pyzebra.merge_scans(scan_into1, scan_from1, log=log)
|
||||
pyzebra.merge_scans(scan_into2, scan_from2, log=log)
|
||||
_update_table()
|
||||
_update_plot()
|
||||
|
||||
merge_button = Button(label="Merge into current", width=145)
|
||||
merge_button.on_click(merge_button_callback)
|
||||
|
||||
def restore_button_callback():
|
||||
scan1, scan2 = _get_selected_scan()
|
||||
pyzebra.restore_scan(scan1)
|
||||
pyzebra.restore_scan(scan2)
|
||||
_update_table()
|
||||
_update_plot()
|
||||
|
||||
restore_button = Button(label="Restore scan", width=145)
|
||||
restore_button.on_click(restore_button_callback)
|
||||
|
||||
app_fitctrl = app.FitControls()
|
||||
|
||||
def fit_from_spinner_callback(_attr, _old, new):
|
||||
fit_from_span.location = new
|
||||
|
||||
app_fitctrl.from_spinner.on_change("value", fit_from_spinner_callback)
|
||||
|
||||
def fit_to_spinner_callback(_attr, _old, new):
|
||||
fit_to_span.location = new
|
||||
|
||||
app_fitctrl.to_spinner.on_change("value", fit_to_spinner_callback)
|
||||
|
||||
def proc_all_button_callback():
|
||||
app_fitctrl.fit_dataset(dataset1)
|
||||
app_fitctrl.fit_dataset(dataset2)
|
||||
|
||||
_update_plot()
|
||||
_update_table()
|
||||
|
||||
proc_all_button = Button(label="Process All", button_type="primary", width=145)
|
||||
proc_all_button.on_click(proc_all_button_callback)
|
||||
|
||||
def proc_button_callback():
|
||||
scan1, scan2 = _get_selected_scan()
|
||||
app_fitctrl.fit_scan(scan1)
|
||||
app_fitctrl.fit_scan(scan2)
|
||||
|
||||
_update_plot()
|
||||
_update_table()
|
||||
|
||||
proc_button = Button(label="Process Current", width=145)
|
||||
proc_button.on_click(proc_button_callback)
|
||||
|
||||
intensity_diff_div = Div(text="Intensity difference:", margin=(5, 5, 0, 5))
|
||||
intensity_diff_radiobutton = RadioGroup(
|
||||
labels=["file1 - file2", "file2 - file1"], active=0, width=145
|
||||
)
|
||||
|
||||
export_preview_textinput = TextAreaInput(title="Export file(s) preview:", width=500, height=400)
|
||||
|
||||
def _update_preview():
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
temp_file = temp_dir + "/temp"
|
||||
export_data1 = []
|
||||
export_data2 = []
|
||||
for scan1, scan2 in zip(dataset1, dataset2):
|
||||
if scan1["export"]:
|
||||
export_data1.append(scan1)
|
||||
export_data2.append(scan2)
|
||||
|
||||
if intensity_diff_radiobutton.active:
|
||||
export_data1, export_data2 = export_data2, export_data1
|
||||
|
||||
pyzebra.export_ccl_compare(
|
||||
export_data1,
|
||||
export_data2,
|
||||
temp_file,
|
||||
export_target_select.value,
|
||||
hkl_precision=int(hkl_precision_select.value),
|
||||
)
|
||||
|
||||
exported_content = ""
|
||||
file_content = []
|
||||
for ext in EXPORT_TARGETS[export_target_select.value]:
|
||||
fname = temp_file + ext
|
||||
if os.path.isfile(fname):
|
||||
with open(fname) as f:
|
||||
content = f.read()
|
||||
exported_content += f"{ext} file:\n" + content
|
||||
else:
|
||||
content = ""
|
||||
file_content.append(content)
|
||||
|
||||
app_dlfiles.set_contents(file_content)
|
||||
export_preview_textinput.value = exported_content
|
||||
|
||||
def export_target_select_callback(_attr, _old, new):
|
||||
app_dlfiles.set_extensions(EXPORT_TARGETS[new])
|
||||
_update_preview()
|
||||
|
||||
export_target_select = Select(
|
||||
title="Export target:", options=list(EXPORT_TARGETS.keys()), value="fullprof", width=80
|
||||
)
|
||||
export_target_select.on_change("value", export_target_select_callback)
|
||||
app_dlfiles.set_extensions(EXPORT_TARGETS[export_target_select.value])
|
||||
|
||||
def hkl_precision_select_callback(_attr, _old, _new):
|
||||
_update_preview()
|
||||
|
||||
hkl_precision_select = Select(
|
||||
title="hkl precision:", options=["2", "3", "4"], value="2", width=80
|
||||
)
|
||||
hkl_precision_select.on_change("value", hkl_precision_select_callback)
|
||||
|
||||
area_method_div = Div(text="Intensity:", margin=(5, 5, 0, 5))
|
||||
fitpeak_controls = row(
|
||||
column(
|
||||
app_fitctrl.add_function_button,
|
||||
app_fitctrl.function_select,
|
||||
app_fitctrl.remove_function_button,
|
||||
),
|
||||
app_fitctrl.params_table,
|
||||
Spacer(width=20),
|
||||
column(
|
||||
app_fitctrl.from_spinner,
|
||||
app_fitctrl.lorentz_checkbox,
|
||||
area_method_div,
|
||||
app_fitctrl.area_method_radiogroup,
|
||||
intensity_diff_div,
|
||||
intensity_diff_radiobutton,
|
||||
),
|
||||
column(app_fitctrl.to_spinner, proc_button, proc_all_button),
|
||||
)
|
||||
|
||||
scan_layout = column(
|
||||
scan_table,
|
||||
row(monitor_spinner, column(Spacer(height=19), restore_button)),
|
||||
row(column(Spacer(height=19), merge_button), merge_from_select),
|
||||
)
|
||||
|
||||
import_layout = column(file_select, file_open_button, upload_div, upload_button)
|
||||
|
||||
export_layout = column(
|
||||
export_preview_textinput,
|
||||
row(
|
||||
export_target_select,
|
||||
hkl_precision_select,
|
||||
column(Spacer(height=19), row(app_dlfiles.button)),
|
||||
),
|
||||
)
|
||||
|
||||
tab_layout = column(
|
||||
row(import_layout, scan_layout, plot, Spacer(width=30), export_layout),
|
||||
row(fitpeak_controls, app_fitctrl.result_textarea),
|
||||
)
|
||||
|
||||
return Panel(child=tab_layout, title="ccl compare")
|
@ -1,548 +1,367 @@
|
||||
import base64
|
||||
import io
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
import numpy as np
|
||||
from bokeh.io import curdoc
|
||||
from bokeh.layouts import column, row
|
||||
from bokeh.models import (
|
||||
Asterisk,
|
||||
BasicTicker,
|
||||
Button,
|
||||
CellEditor,
|
||||
CheckboxEditor,
|
||||
ColumnDataSource,
|
||||
CustomJS,
|
||||
DataRange1d,
|
||||
DataTable,
|
||||
Div,
|
||||
FileInput,
|
||||
Grid,
|
||||
Line,
|
||||
LinearAxis,
|
||||
Panel,
|
||||
Plot,
|
||||
RadioButtonGroup,
|
||||
Scatter,
|
||||
Select,
|
||||
Spacer,
|
||||
Span,
|
||||
Spinner,
|
||||
TableColumn,
|
||||
TextAreaInput,
|
||||
TextInput,
|
||||
Toggle,
|
||||
Whisker,
|
||||
)
|
||||
from bokeh.plotting import figure
|
||||
|
||||
import pyzebra
|
||||
|
||||
|
||||
javaScript = """
|
||||
setTimeout(function() {
|
||||
const filename = 'output' + js_data.data['ext']
|
||||
const blob = new Blob([js_data.data['cont']], {type: 'text/plain'})
|
||||
const link = document.createElement('a');
|
||||
document.body.appendChild(link);
|
||||
const url = window.URL.createObjectURL(blob);
|
||||
link.href = url;
|
||||
link.download = filename;
|
||||
link.click();
|
||||
window.URL.revokeObjectURL(url);
|
||||
document.body.removeChild(link);
|
||||
}, 500);
|
||||
"""
|
||||
|
||||
PROPOSAL_PATH = "/afs/psi.ch/project/sinqdata/2020/zebra/"
|
||||
from pyzebra import EXPORT_TARGETS, app
|
||||
|
||||
|
||||
def create():
|
||||
det_data = {}
|
||||
peak_pos_textinput_lock = False
|
||||
js_data = ColumnDataSource(data=dict(cont=[], ext=[]))
|
||||
doc = curdoc()
|
||||
log = doc.logger
|
||||
dataset = []
|
||||
app_dlfiles = app.DownloadFiles(n_files=2)
|
||||
|
||||
def proposal_textinput_callback(_attr, _old, new):
|
||||
ccl_path = os.path.join(PROPOSAL_PATH, new)
|
||||
ccl_file_list = []
|
||||
for file in os.listdir(ccl_path):
|
||||
if file.endswith(".ccl"):
|
||||
ccl_file_list.append((os.path.join(ccl_path, file), file))
|
||||
ccl_file_select.options = ccl_file_list
|
||||
ccl_file_select.value = ccl_file_list[0][0]
|
||||
def _init_datatable():
|
||||
scan_list = [s["idx"] for s in dataset]
|
||||
hkl = [f'{s["h"]} {s["k"]} {s["l"]}' for s in dataset]
|
||||
export = [s["export"] for s in dataset]
|
||||
|
||||
proposal_textinput = TextInput(title="Enter proposal number:", default_size=145)
|
||||
proposal_textinput.on_change("value", proposal_textinput_callback)
|
||||
twotheta = [np.median(s["twotheta"]) if "twotheta" in s else None for s in dataset]
|
||||
gamma = [np.median(s["gamma"]) if "gamma" in s else None for s in dataset]
|
||||
omega = [np.median(s["omega"]) if "omega" in s else None for s in dataset]
|
||||
chi = [np.median(s["chi"]) if "chi" in s else None for s in dataset]
|
||||
phi = [np.median(s["phi"]) if "phi" in s else None for s in dataset]
|
||||
nu = [np.median(s["nu"]) if "nu" in s else None for s in dataset]
|
||||
|
||||
def ccl_file_select_callback(_attr, _old, new):
|
||||
nonlocal det_data
|
||||
with open(new) as file:
|
||||
_, ext = os.path.splitext(new)
|
||||
det_data = pyzebra.parse_1D(file, ext)
|
||||
|
||||
scan_list = list(det_data["scan"].keys())
|
||||
hkl = [
|
||||
f'{int(m["h_index"])} {int(m["k_index"])} {int(m["l_index"])}'
|
||||
for m in det_data["scan"].values()
|
||||
]
|
||||
scan_table_source.data.update(
|
||||
scan=scan_list, hkl=hkl, peaks=[0] * len(scan_list), fit=[0] * len(scan_list)
|
||||
scan=scan_list,
|
||||
hkl=hkl,
|
||||
fit=[0] * len(scan_list),
|
||||
export=export,
|
||||
twotheta=twotheta,
|
||||
gamma=gamma,
|
||||
omega=omega,
|
||||
chi=chi,
|
||||
phi=phi,
|
||||
nu=nu,
|
||||
)
|
||||
scan_table_source.selected.indices = []
|
||||
scan_table_source.selected.indices = [0]
|
||||
|
||||
ccl_file_select = Select(title="Available .ccl files")
|
||||
ccl_file_select.on_change("value", ccl_file_select_callback)
|
||||
|
||||
def upload_button_callback(_attr, _old, new):
|
||||
nonlocal det_data
|
||||
with io.StringIO(base64.b64decode(new).decode()) as file:
|
||||
_, ext = os.path.splitext(upload_button.filename)
|
||||
det_data = pyzebra.parse_1D(file, ext)
|
||||
|
||||
scan_list = list(det_data["scan"].keys())
|
||||
hkl = [
|
||||
f'{int(m["h_index"])} {int(m["k_index"])} {int(m["l_index"])}'
|
||||
for m in det_data["scan"].values()
|
||||
]
|
||||
scan_table_source.data.update(
|
||||
scan=scan_list, hkl=hkl, peaks=[0] * len(scan_list), fit=[0] * len(scan_list)
|
||||
)
|
||||
scan_table_source.selected.indices = []
|
||||
scan_table_source.selected.indices = [0]
|
||||
|
||||
upload_button = FileInput(accept=".ccl")
|
||||
upload_button.on_change("value", upload_button_callback)
|
||||
merge_options = [(str(i), f"{i} ({idx})") for i, idx in enumerate(scan_list)]
|
||||
merge_from_select.options = merge_options
|
||||
merge_from_select.value = merge_options[0][0]
|
||||
|
||||
def _update_table():
|
||||
num_of_peaks = [scan.get("num_of_peaks", 0) for scan in det_data["scan"].values()]
|
||||
fit_ok = [(1 if "fit" in scan else 0) for scan in det_data["scan"].values()]
|
||||
scan_table_source.data.update(peaks=num_of_peaks, fit=fit_ok)
|
||||
fit_ok = [(1 if "fit" in scan else 0) for scan in dataset]
|
||||
export = [scan["export"] for scan in dataset]
|
||||
scan_table_source.data.update(fit=fit_ok, export=export)
|
||||
|
||||
def _update_plot(ind):
|
||||
nonlocal peak_pos_textinput_lock
|
||||
peak_pos_textinput_lock = True
|
||||
def _update_plot():
|
||||
scan = _get_selected_scan()
|
||||
scan_motor = scan["scan_motor"]
|
||||
|
||||
scan = det_data["scan"][ind]
|
||||
y = scan["Counts"]
|
||||
x = scan["om"]
|
||||
y = scan["counts"]
|
||||
y_err = scan["counts_err"]
|
||||
x = scan[scan_motor]
|
||||
|
||||
plot_scatter_source.data.update(x=x, y=y, y_upper=y + np.sqrt(y), y_lower=y - np.sqrt(y))
|
||||
|
||||
num_of_peaks = scan.get("num_of_peaks")
|
||||
if num_of_peaks is not None and num_of_peaks > 0:
|
||||
peak_indexes = scan["peak_indexes"]
|
||||
if len(peak_indexes) == 1:
|
||||
peak_pos_textinput.value = str(scan["om"][peak_indexes[0]])
|
||||
else:
|
||||
peak_pos_textinput.value = str([scan["om"][ind] for ind in peak_indexes])
|
||||
|
||||
plot_peak_source.data.update(x=scan["om"][peak_indexes], y=scan["peak_heights"])
|
||||
plot_line_smooth_source.data.update(x=x, y=scan["smooth_peaks"])
|
||||
else:
|
||||
peak_pos_textinput.value = None
|
||||
plot_peak_source.data.update(x=[], y=[])
|
||||
plot_line_smooth_source.data.update(x=[], y=[])
|
||||
|
||||
peak_pos_textinput_lock = False
|
||||
plot.axis[0].axis_label = scan_motor
|
||||
scatter_source.data.update(x=x, y=y, y_upper=y + y_err, y_lower=y - y_err)
|
||||
|
||||
fit = scan.get("fit")
|
||||
if fit is not None:
|
||||
plot_gauss_source.data.update(x=x, y=scan["fit"]["comps"]["gaussian"])
|
||||
plot_bkg_source.data.update(x=x, y=scan["fit"]["comps"]["background"])
|
||||
params = fit["result"].params
|
||||
fit_output_textinput.value = (
|
||||
"%s \n"
|
||||
"Gaussian: centre = %9.4f, sigma = %9.4f, area = %9.4f \n"
|
||||
"background: slope = %9.4f, intercept = %9.4f \n"
|
||||
"Int. area = %9.4f +/- %9.4f \n"
|
||||
"fit area = %9.4f +/- %9.4f \n"
|
||||
"ratio((fit-int)/fit) = %9.4f"
|
||||
% (
|
||||
ind,
|
||||
params["g_cen"].value,
|
||||
params["g_width"].value,
|
||||
params["g_amp"].value,
|
||||
params["slope"].value,
|
||||
params["intercept"].value,
|
||||
fit["int_area"].n,
|
||||
fit["int_area"].s,
|
||||
params["g_amp"].value,
|
||||
params["g_amp"].stderr,
|
||||
(params["g_amp"].value - fit["int_area"].n) / params["g_amp"].value,
|
||||
)
|
||||
)
|
||||
numfit_min, numfit_max = fit["numfit"]
|
||||
if numfit_min is None:
|
||||
numfit_min_span.location = None
|
||||
else:
|
||||
numfit_min_span.location = x[numfit_min]
|
||||
x_fit = np.linspace(x[0], x[-1], 100)
|
||||
fit_source.data.update(x=x_fit, y=fit.eval(x=x_fit))
|
||||
|
||||
if numfit_max is None:
|
||||
numfit_max_span.location = None
|
||||
else:
|
||||
numfit_max_span.location = x[numfit_max]
|
||||
x_bkg = []
|
||||
y_bkg = []
|
||||
xs_peak = []
|
||||
ys_peak = []
|
||||
comps = fit.eval_components(x=x_fit)
|
||||
for i, model in enumerate(app_fitctrl.params):
|
||||
if "linear" in model:
|
||||
x_bkg = x_fit
|
||||
y_bkg = comps[f"f{i}_"]
|
||||
|
||||
elif any(val in model for val in ("gaussian", "voigt", "pvoigt")):
|
||||
xs_peak.append(x_fit)
|
||||
ys_peak.append(comps[f"f{i}_"])
|
||||
|
||||
bkg_source.data.update(x=x_bkg, y=y_bkg)
|
||||
peak_source.data.update(xs=xs_peak, ys=ys_peak)
|
||||
|
||||
else:
|
||||
plot_gauss_source.data.update(x=[], y=[])
|
||||
plot_bkg_source.data.update(x=[], y=[])
|
||||
fit_output_textinput.value = ""
|
||||
numfit_min_span.location = None
|
||||
numfit_max_span.location = None
|
||||
fit_source.data.update(x=[], y=[])
|
||||
bkg_source.data.update(x=[], y=[])
|
||||
peak_source.data.update(xs=[], ys=[])
|
||||
|
||||
app_fitctrl.update_result_textarea(scan)
|
||||
|
||||
app_inputctrl = app.InputControls(
|
||||
dataset, app_dlfiles, on_file_open=_init_datatable, on_monitor_change=_update_plot
|
||||
)
|
||||
|
||||
# Main plot
|
||||
plot = Plot(
|
||||
x_range=DataRange1d(),
|
||||
y_range=DataRange1d(),
|
||||
plot_height=400,
|
||||
plot_width=700,
|
||||
toolbar_location=None,
|
||||
plot = figure(
|
||||
x_axis_label="Scan motor",
|
||||
y_axis_label="Counts",
|
||||
height=470,
|
||||
width=700,
|
||||
tools="pan,wheel_zoom,reset",
|
||||
)
|
||||
|
||||
plot.add_layout(LinearAxis(axis_label="Counts"), place="left")
|
||||
plot.add_layout(LinearAxis(axis_label="Omega"), place="below")
|
||||
|
||||
plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
|
||||
plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
|
||||
|
||||
plot_scatter_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
|
||||
plot.add_glyph(plot_scatter_source, Scatter(x="x", y="y", line_color="steelblue"))
|
||||
plot.add_layout(Whisker(source=plot_scatter_source, base="x", upper="y_upper", lower="y_lower"))
|
||||
|
||||
plot_line_smooth_source = ColumnDataSource(dict(x=[0], y=[0]))
|
||||
plot.add_glyph(
|
||||
plot_line_smooth_source, Line(x="x", y="y", line_color="steelblue", line_dash="dashed")
|
||||
scatter_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
|
||||
plot.circle(
|
||||
source=scatter_source, line_color="steelblue", fill_color="steelblue", legend_label="data"
|
||||
)
|
||||
plot.add_layout(Whisker(source=scatter_source, base="x", upper="y_upper", lower="y_lower"))
|
||||
|
||||
plot_gauss_source = ColumnDataSource(dict(x=[0], y=[0]))
|
||||
plot.add_glyph(plot_gauss_source, Line(x="x", y="y", line_color="red", line_dash="dashed"))
|
||||
fit_source = ColumnDataSource(dict(x=[0], y=[0]))
|
||||
plot.line(source=fit_source, legend_label="best fit")
|
||||
|
||||
plot_bkg_source = ColumnDataSource(dict(x=[0], y=[0]))
|
||||
plot.add_glyph(plot_bkg_source, Line(x="x", y="y", line_color="green", line_dash="dashed"))
|
||||
bkg_source = ColumnDataSource(dict(x=[0], y=[0]))
|
||||
plot.line(source=bkg_source, line_color="green", line_dash="dashed", legend_label="linear")
|
||||
|
||||
plot_peak_source = ColumnDataSource(dict(x=[], y=[]))
|
||||
plot.add_glyph(plot_peak_source, Asterisk(x="x", y="y", size=10, line_color="red"))
|
||||
peak_source = ColumnDataSource(dict(xs=[[0]], ys=[[0]]))
|
||||
plot.multi_line(source=peak_source, line_color="red", line_dash="dashed", legend_label="peak")
|
||||
|
||||
numfit_min_span = Span(location=None, dimension="height", line_dash="dashed")
|
||||
plot.add_layout(numfit_min_span)
|
||||
fit_from_span = Span(location=None, dimension="height", line_dash="dashed")
|
||||
plot.add_layout(fit_from_span)
|
||||
|
||||
numfit_max_span = Span(location=None, dimension="height", line_dash="dashed")
|
||||
plot.add_layout(numfit_max_span)
|
||||
fit_to_span = Span(location=None, dimension="height", line_dash="dashed")
|
||||
plot.add_layout(fit_to_span)
|
||||
|
||||
plot.y_range.only_visible = True
|
||||
plot.toolbar.logo = None
|
||||
plot.legend.click_policy = "hide"
|
||||
|
||||
# Scan select
|
||||
def scan_table_callback(_attr, _old, new):
|
||||
if new:
|
||||
_update_plot(scan_table_source.data["scan"][new[-1]])
|
||||
def scan_table_select_callback(_attr, old, new):
|
||||
if not new:
|
||||
# skip empty selections
|
||||
return
|
||||
|
||||
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
|
||||
if len(new) > 1:
|
||||
# drop selection to the previous one
|
||||
scan_table_source.selected.indices = old
|
||||
return
|
||||
|
||||
if len(old) > 1:
|
||||
# skip unnecessary update caused by selection drop
|
||||
return
|
||||
|
||||
_update_plot()
|
||||
|
||||
def scan_table_source_callback(_attr, _old, new):
|
||||
# unfortunately, we don't know if the change comes from data update or user input
|
||||
# also `old` and `new` are the same for non-scalars
|
||||
for scan, export in zip(dataset, new["export"]):
|
||||
scan["export"] = export
|
||||
_update_preview()
|
||||
|
||||
scan_table_source = ColumnDataSource(
|
||||
dict(
|
||||
scan=[],
|
||||
hkl=[],
|
||||
fit=[],
|
||||
export=[],
|
||||
twotheta=[],
|
||||
gamma=[],
|
||||
omega=[],
|
||||
chi=[],
|
||||
phi=[],
|
||||
nu=[],
|
||||
)
|
||||
)
|
||||
scan_table_source.on_change("data", scan_table_source_callback)
|
||||
scan_table_source.selected.on_change("indices", scan_table_select_callback)
|
||||
|
||||
scan_table_source = ColumnDataSource(dict(scan=[], hkl=[], peaks=[], fit=[]))
|
||||
scan_table = DataTable(
|
||||
source=scan_table_source,
|
||||
columns=[
|
||||
TableColumn(field="scan", title="scan"),
|
||||
TableColumn(field="hkl", title="hkl"),
|
||||
TableColumn(field="peaks", title="Peaks"),
|
||||
TableColumn(field="fit", title="Fit"),
|
||||
TableColumn(field="scan", title="Scan", editor=CellEditor(), width=50),
|
||||
TableColumn(field="hkl", title="hkl", editor=CellEditor(), width=100),
|
||||
TableColumn(field="fit", title="Fit", editor=CellEditor(), width=50),
|
||||
TableColumn(field="export", title="Export", editor=CheckboxEditor(), width=50),
|
||||
TableColumn(field="twotheta", title="2theta", editor=CellEditor(), width=50),
|
||||
TableColumn(field="gamma", title="gamma", editor=CellEditor(), width=50),
|
||||
TableColumn(field="omega", title="omega", editor=CellEditor(), width=50),
|
||||
TableColumn(field="chi", title="chi", editor=CellEditor(), width=50),
|
||||
TableColumn(field="phi", title="phi", editor=CellEditor(), width=50),
|
||||
TableColumn(field="nu", title="nu", editor=CellEditor(), width=50),
|
||||
],
|
||||
width=200,
|
||||
index_position=None,
|
||||
width=310, # +60 because of the index column, but excluding twotheta onwards
|
||||
height=350,
|
||||
autosize_mode="none",
|
||||
editable=True,
|
||||
)
|
||||
|
||||
scan_table_source.selected.on_change("indices", scan_table_callback)
|
||||
def _get_selected_scan():
|
||||
return dataset[scan_table_source.selected.indices[0]]
|
||||
|
||||
def peak_pos_textinput_callback(_attr, _old, new):
|
||||
if new is not None and not peak_pos_textinput_lock:
|
||||
sel_ind = scan_table_source.selected.indices[-1]
|
||||
scan_name = scan_table_source.data["scan"][sel_ind]
|
||||
scan = det_data["scan"][scan_name]
|
||||
merge_from_select = Select(title="scan:", width=145)
|
||||
|
||||
scan["num_of_peaks"] = 1
|
||||
peak_ind = (np.abs(scan["om"] - float(new))).argmin()
|
||||
scan["peak_indexes"] = np.array([peak_ind], dtype=np.int64)
|
||||
scan["peak_heights"] = np.array([scan["smooth_peaks"][peak_ind]])
|
||||
_update_table()
|
||||
_update_plot(scan_name)
|
||||
def merge_button_callback():
|
||||
scan_into = _get_selected_scan()
|
||||
scan_from = dataset[int(merge_from_select.value)]
|
||||
|
||||
peak_pos_textinput = TextInput(title="Peak position:", default_size=145)
|
||||
peak_pos_textinput.on_change("value", peak_pos_textinput_callback)
|
||||
if scan_into is scan_from:
|
||||
log.warning("Selected scans for merging are identical")
|
||||
return
|
||||
|
||||
peak_int_ratio_spinner = Spinner(
|
||||
title="Peak intensity ratio:", value=0.8, step=0.01, low=0, high=1, default_size=145
|
||||
)
|
||||
peak_prominence_spinner = Spinner(title="Peak prominence:", value=50, low=0, default_size=145)
|
||||
smooth_toggle = Toggle(label="Smooth curve", default_size=145)
|
||||
window_size_spinner = Spinner(title="Window size:", value=7, step=2, low=1, default_size=145)
|
||||
poly_order_spinner = Spinner(title="Poly order:", value=3, low=0, default_size=145)
|
||||
pyzebra.merge_scans(scan_into, scan_from, log=log)
|
||||
_update_table()
|
||||
_update_plot()
|
||||
|
||||
centre_guess = Spinner(default_size=100)
|
||||
centre_vary = Toggle(default_size=100, active=True)
|
||||
centre_min = Spinner(default_size=100)
|
||||
centre_max = Spinner(default_size=100)
|
||||
sigma_guess = Spinner(default_size=100)
|
||||
sigma_vary = Toggle(default_size=100, active=True)
|
||||
sigma_min = Spinner(default_size=100)
|
||||
sigma_max = Spinner(default_size=100)
|
||||
ampl_guess = Spinner(default_size=100)
|
||||
ampl_vary = Toggle(default_size=100, active=True)
|
||||
ampl_min = Spinner(default_size=100)
|
||||
ampl_max = Spinner(default_size=100)
|
||||
slope_guess = Spinner(default_size=100)
|
||||
slope_vary = Toggle(default_size=100, active=True)
|
||||
slope_min = Spinner(default_size=100)
|
||||
slope_max = Spinner(default_size=100)
|
||||
offset_guess = Spinner(default_size=100)
|
||||
offset_vary = Toggle(default_size=100, active=True)
|
||||
offset_min = Spinner(default_size=100)
|
||||
offset_max = Spinner(default_size=100)
|
||||
integ_from = Spinner(title="Integrate from:", default_size=145)
|
||||
integ_to = Spinner(title="to:", default_size=145)
|
||||
merge_button = Button(label="Merge into current", width=145)
|
||||
merge_button.on_click(merge_button_callback)
|
||||
|
||||
def fitparam_reset_button_callback():
|
||||
centre_guess.value = None
|
||||
centre_vary.active = True
|
||||
centre_min.value = None
|
||||
centre_max.value = None
|
||||
sigma_guess.value = None
|
||||
sigma_vary.active = True
|
||||
sigma_min.value = None
|
||||
sigma_max.value = None
|
||||
ampl_guess.value = None
|
||||
ampl_vary.active = True
|
||||
ampl_min.value = None
|
||||
ampl_max.value = None
|
||||
slope_guess.value = None
|
||||
slope_vary.active = True
|
||||
slope_min.value = None
|
||||
slope_max.value = None
|
||||
offset_guess.value = None
|
||||
offset_vary.active = True
|
||||
offset_min.value = None
|
||||
offset_max.value = None
|
||||
integ_from.value = None
|
||||
integ_to.value = None
|
||||
def restore_button_callback():
|
||||
pyzebra.restore_scan(_get_selected_scan())
|
||||
_update_table()
|
||||
_update_plot()
|
||||
|
||||
fitparam_reset_button = Button(label="Reset to defaults", default_size=145)
|
||||
fitparam_reset_button.on_click(fitparam_reset_button_callback)
|
||||
restore_button = Button(label="Restore scan", width=145)
|
||||
restore_button.on_click(restore_button_callback)
|
||||
|
||||
fit_output_textinput = TextAreaInput(title="Fit results:", width=450, height=400)
|
||||
app_fitctrl = app.FitControls()
|
||||
|
||||
def peakfind_all_button_callback():
|
||||
for scan in det_data["scan"].values():
|
||||
pyzebra.ccl_findpeaks(
|
||||
scan,
|
||||
int_threshold=peak_int_ratio_spinner.value,
|
||||
prominence=peak_prominence_spinner.value,
|
||||
smooth=smooth_toggle.active,
|
||||
window_size=window_size_spinner.value,
|
||||
poly_order=poly_order_spinner.value,
|
||||
)
|
||||
def fit_from_spinner_callback(_attr, _old, new):
|
||||
fit_from_span.location = new
|
||||
|
||||
app_fitctrl.from_spinner.on_change("value", fit_from_spinner_callback)
|
||||
|
||||
def fit_to_spinner_callback(_attr, _old, new):
|
||||
fit_to_span.location = new
|
||||
|
||||
app_fitctrl.to_spinner.on_change("value", fit_to_spinner_callback)
|
||||
|
||||
def proc_all_button_callback():
|
||||
app_fitctrl.fit_dataset(dataset)
|
||||
|
||||
_update_plot()
|
||||
_update_table()
|
||||
|
||||
sel_ind = scan_table_source.selected.indices[-1]
|
||||
_update_plot(scan_table_source.data["scan"][sel_ind])
|
||||
proc_all_button = Button(label="Process All", button_type="primary", width=145)
|
||||
proc_all_button.on_click(proc_all_button_callback)
|
||||
|
||||
peakfind_all_button = Button(label="Peak Find All", button_type="primary", default_size=145)
|
||||
peakfind_all_button.on_click(peakfind_all_button_callback)
|
||||
def proc_button_callback():
|
||||
app_fitctrl.fit_scan(_get_selected_scan())
|
||||
|
||||
def peakfind_button_callback():
|
||||
sel_ind = scan_table_source.selected.indices[-1]
|
||||
scan = scan_table_source.data["scan"][sel_ind]
|
||||
pyzebra.ccl_findpeaks(
|
||||
det_data["scan"][scan],
|
||||
int_threshold=peak_int_ratio_spinner.value,
|
||||
prominence=peak_prominence_spinner.value,
|
||||
smooth=smooth_toggle.active,
|
||||
window_size=window_size_spinner.value,
|
||||
poly_order=poly_order_spinner.value,
|
||||
)
|
||||
|
||||
_update_table()
|
||||
_update_plot(scan)
|
||||
|
||||
peakfind_button = Button(label="Peak Find Current", default_size=145)
|
||||
peakfind_button.on_click(peakfind_button_callback)
|
||||
|
||||
def fit_all_button_callback():
|
||||
for scan in det_data["scan"].values():
|
||||
pyzebra.fitccl(
|
||||
scan,
|
||||
guess=[
|
||||
centre_guess.value,
|
||||
sigma_guess.value,
|
||||
ampl_guess.value,
|
||||
slope_guess.value,
|
||||
offset_guess.value,
|
||||
],
|
||||
vary=[
|
||||
centre_vary.active,
|
||||
sigma_vary.active,
|
||||
ampl_vary.active,
|
||||
slope_vary.active,
|
||||
offset_vary.active,
|
||||
],
|
||||
constraints_min=[
|
||||
centre_min.value,
|
||||
sigma_min.value,
|
||||
ampl_min.value,
|
||||
slope_min.value,
|
||||
offset_min.value,
|
||||
],
|
||||
constraints_max=[
|
||||
centre_max.value,
|
||||
sigma_max.value,
|
||||
ampl_max.value,
|
||||
slope_max.value,
|
||||
offset_max.value,
|
||||
],
|
||||
numfit_min=integ_from.value,
|
||||
numfit_max=integ_to.value,
|
||||
)
|
||||
|
||||
sel_ind = scan_table_source.selected.indices[-1]
|
||||
_update_plot(scan_table_source.data["scan"][sel_ind])
|
||||
_update_plot()
|
||||
_update_table()
|
||||
|
||||
fit_all_button = Button(label="Fit All", button_type="primary", default_size=145)
|
||||
fit_all_button.on_click(fit_all_button_callback)
|
||||
proc_button = Button(label="Process Current", width=145)
|
||||
proc_button.on_click(proc_button_callback)
|
||||
|
||||
def fit_button_callback():
|
||||
sel_ind = scan_table_source.selected.indices[-1]
|
||||
scan = scan_table_source.data["scan"][sel_ind]
|
||||
|
||||
pyzebra.fitccl(
|
||||
det_data["scan"][scan],
|
||||
guess=[
|
||||
centre_guess.value,
|
||||
sigma_guess.value,
|
||||
ampl_guess.value,
|
||||
slope_guess.value,
|
||||
offset_guess.value,
|
||||
],
|
||||
vary=[
|
||||
centre_vary.active,
|
||||
sigma_vary.active,
|
||||
ampl_vary.active,
|
||||
slope_vary.active,
|
||||
offset_vary.active,
|
||||
],
|
||||
constraints_min=[
|
||||
centre_min.value,
|
||||
sigma_min.value,
|
||||
ampl_min.value,
|
||||
slope_min.value,
|
||||
offset_min.value,
|
||||
],
|
||||
constraints_max=[
|
||||
centre_max.value,
|
||||
sigma_max.value,
|
||||
ampl_max.value,
|
||||
slope_max.value,
|
||||
offset_max.value,
|
||||
],
|
||||
numfit_min=integ_from.value,
|
||||
numfit_max=integ_to.value,
|
||||
)
|
||||
|
||||
_update_plot(scan)
|
||||
_update_table()
|
||||
|
||||
fit_button = Button(label="Fit Current", default_size=145)
|
||||
fit_button.on_click(fit_button_callback)
|
||||
|
||||
def area_method_radiobutton_callback(_attr, _old, new):
|
||||
det_data["meta"]["area_method"] = ("fit", "integ")[new]
|
||||
|
||||
area_method_radiobutton = RadioButtonGroup(
|
||||
labels=["Fit", "Integral"], active=0, default_size=145
|
||||
)
|
||||
area_method_radiobutton.on_change("active", area_method_radiobutton_callback)
|
||||
|
||||
preview_output_textinput = TextAreaInput(title="Export file preview:", width=450, height=400)
|
||||
|
||||
def preview_output_button_callback():
|
||||
if det_data["meta"]["indices"] == "hkl":
|
||||
ext = ".comm"
|
||||
elif det_data["meta"]["indices"] == "real":
|
||||
ext = ".incomm"
|
||||
export_preview_textinput = TextAreaInput(title="Export file(s) preview:", width=500, height=400)
|
||||
|
||||
def _update_preview():
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
temp_file = temp_dir + "/temp"
|
||||
pyzebra.export_comm(det_data, temp_file)
|
||||
export_data = []
|
||||
for scan in dataset:
|
||||
if scan["export"]:
|
||||
export_data.append(scan)
|
||||
|
||||
with open(f"{temp_file}{ext}") as f:
|
||||
preview_output_textinput.value = f.read()
|
||||
pyzebra.export_1D(
|
||||
export_data,
|
||||
temp_file,
|
||||
export_target_select.value,
|
||||
hkl_precision=int(hkl_precision_select.value),
|
||||
)
|
||||
|
||||
preview_output_button = Button(label="Preview file", default_size=220)
|
||||
preview_output_button.on_click(preview_output_button_callback)
|
||||
exported_content = ""
|
||||
file_content = []
|
||||
for ext in EXPORT_TARGETS[export_target_select.value]:
|
||||
fname = temp_file + ext
|
||||
if os.path.isfile(fname):
|
||||
with open(fname) as f:
|
||||
content = f.read()
|
||||
exported_content += f"{ext} file:\n" + content
|
||||
else:
|
||||
content = ""
|
||||
file_content.append(content)
|
||||
|
||||
def export_results(det_data):
|
||||
if det_data["meta"]["indices"] == "hkl":
|
||||
ext = ".comm"
|
||||
elif det_data["meta"]["indices"] == "real":
|
||||
ext = ".incomm"
|
||||
app_dlfiles.set_contents(file_content)
|
||||
export_preview_textinput.value = exported_content
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
temp_file = temp_dir + "/temp"
|
||||
pyzebra.export_comm(det_data, temp_file)
|
||||
def export_target_select_callback(_attr, _old, new):
|
||||
app_dlfiles.set_extensions(EXPORT_TARGETS[new])
|
||||
_update_preview()
|
||||
|
||||
with open(f"{temp_file}{ext}") as f:
|
||||
output_content = f.read()
|
||||
|
||||
return output_content, ext
|
||||
|
||||
def save_button_callback():
|
||||
cont, ext = export_results(det_data)
|
||||
js_data.data.update(cont=[cont], ext=[ext])
|
||||
|
||||
save_button = Button(label="Download file", button_type="success", default_size=220)
|
||||
save_button.on_click(save_button_callback)
|
||||
save_button.js_on_click(CustomJS(args={"js_data": js_data}, code=javaScript))
|
||||
|
||||
findpeak_controls = column(
|
||||
row(peak_pos_textinput, column(Spacer(height=19), smooth_toggle)),
|
||||
row(peak_int_ratio_spinner, peak_prominence_spinner),
|
||||
row(window_size_spinner, poly_order_spinner),
|
||||
row(peakfind_button, peakfind_all_button),
|
||||
export_target_select = Select(
|
||||
title="Export target:", options=list(EXPORT_TARGETS.keys()), value="fullprof", width=80
|
||||
)
|
||||
export_target_select.on_change("value", export_target_select_callback)
|
||||
app_dlfiles.set_extensions(EXPORT_TARGETS[export_target_select.value])
|
||||
|
||||
div_1 = Div(text="Guess:")
|
||||
div_2 = Div(text="Vary:")
|
||||
div_3 = Div(text="Min:")
|
||||
div_4 = Div(text="Max:")
|
||||
div_5 = Div(text="Gauss Centre:", margin=[5, 5, -5, 5])
|
||||
div_6 = Div(text="Gauss Sigma:", margin=[5, 5, -5, 5])
|
||||
div_7 = Div(text="Gauss Ampl.:", margin=[5, 5, -5, 5])
|
||||
div_8 = Div(text="Slope:", margin=[5, 5, -5, 5])
|
||||
div_9 = Div(text="Offset:", margin=[5, 5, -5, 5])
|
||||
def hkl_precision_select_callback(_attr, _old, _new):
|
||||
_update_preview()
|
||||
|
||||
hkl_precision_select = Select(
|
||||
title="hkl precision:", options=["2", "3", "4"], value="2", width=80
|
||||
)
|
||||
hkl_precision_select.on_change("value", hkl_precision_select_callback)
|
||||
|
||||
area_method_div = Div(text="Intensity:", margin=(5, 5, 0, 5))
|
||||
fitpeak_controls = row(
|
||||
column(
|
||||
Spacer(height=36),
|
||||
div_1,
|
||||
Spacer(height=12),
|
||||
div_2,
|
||||
Spacer(height=12),
|
||||
div_3,
|
||||
Spacer(height=12),
|
||||
div_4,
|
||||
app_fitctrl.add_function_button,
|
||||
app_fitctrl.function_select,
|
||||
app_fitctrl.remove_function_button,
|
||||
),
|
||||
column(div_5, centre_guess, centre_vary, centre_min, centre_max),
|
||||
column(div_6, sigma_guess, sigma_vary, sigma_min, sigma_max),
|
||||
column(div_7, ampl_guess, ampl_vary, ampl_min, ampl_max),
|
||||
column(div_8, slope_guess, slope_vary, slope_min, slope_max),
|
||||
column(div_9, offset_guess, offset_vary, offset_min, offset_max),
|
||||
app_fitctrl.params_table,
|
||||
Spacer(width=20),
|
||||
column(
|
||||
row(integ_from, integ_to),
|
||||
row(fitparam_reset_button, area_method_radiobutton),
|
||||
row(fit_button, fit_all_button),
|
||||
app_fitctrl.from_spinner,
|
||||
app_fitctrl.lorentz_checkbox,
|
||||
area_method_div,
|
||||
app_fitctrl.area_method_radiogroup,
|
||||
),
|
||||
column(app_fitctrl.to_spinner, proc_button, proc_all_button),
|
||||
)
|
||||
|
||||
scan_layout = column(
|
||||
scan_table,
|
||||
row(app_inputctrl.monitor_spinner, column(Spacer(height=19), restore_button)),
|
||||
row(column(Spacer(height=19), merge_button), merge_from_select),
|
||||
)
|
||||
|
||||
upload_div = Div(text="or upload new .ccl/.dat files:", margin=(5, 5, 0, 5))
|
||||
append_upload_div = Div(text="append extra files:", margin=(5, 5, 0, 5))
|
||||
import_layout = column(
|
||||
app_inputctrl.filelist_select,
|
||||
row(app_inputctrl.open_button, app_inputctrl.append_button),
|
||||
upload_div,
|
||||
app_inputctrl.upload_button,
|
||||
append_upload_div,
|
||||
app_inputctrl.append_upload_button,
|
||||
)
|
||||
|
||||
export_layout = column(
|
||||
export_preview_textinput,
|
||||
row(
|
||||
export_target_select,
|
||||
hkl_precision_select,
|
||||
column(Spacer(height=19), row(app_dlfiles.button)),
|
||||
),
|
||||
)
|
||||
|
||||
export_layout = column(preview_output_textinput, row(preview_output_button, save_button))
|
||||
|
||||
upload_div = Div(text="Or upload .ccl file:")
|
||||
tab_layout = column(
|
||||
row(proposal_textinput, ccl_file_select),
|
||||
row(column(Spacer(height=5), upload_div), upload_button),
|
||||
row(scan_table, plot, Spacer(width=30), fit_output_textinput, export_layout),
|
||||
row(findpeak_controls, Spacer(width=30), fitpeak_controls),
|
||||
row(import_layout, scan_layout, plot, Spacer(width=30), export_layout),
|
||||
row(fitpeak_controls, app_fitctrl.result_textarea),
|
||||
)
|
||||
|
||||
return Panel(child=tab_layout, title="ccl integrate")
|
||||
|
724
pyzebra/app/panel_ccl_prepare.py
Normal file
724
pyzebra/app/panel_ccl_prepare.py
Normal file
@ -0,0 +1,724 @@
|
||||
import base64
|
||||
import io
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
import numpy as np
|
||||
from bokeh.io import curdoc
|
||||
from bokeh.layouts import column, row
|
||||
from bokeh.models import (
|
||||
Arrow,
|
||||
Button,
|
||||
CheckboxGroup,
|
||||
ColumnDataSource,
|
||||
Div,
|
||||
FileInput,
|
||||
HoverTool,
|
||||
Legend,
|
||||
LegendItem,
|
||||
MultiSelect,
|
||||
NormalHead,
|
||||
NumericInput,
|
||||
Panel,
|
||||
RadioGroup,
|
||||
Select,
|
||||
Spacer,
|
||||
Spinner,
|
||||
TextAreaInput,
|
||||
TextInput,
|
||||
)
|
||||
from bokeh.palettes import Dark2
|
||||
from bokeh.plotting import figure
|
||||
|
||||
import pyzebra
|
||||
from pyzebra import app
|
||||
|
||||
ANG_CHUNK_DEFAULTS = {"2theta": 30, "gamma": 30, "omega": 30, "chi": 35, "phi": 35, "nu": 10}
|
||||
SORT_OPT_BI = ["2theta", "chi", "phi", "omega"]
|
||||
SORT_OPT_NB = ["gamma", "nu", "omega"]
|
||||
|
||||
|
||||
def create():
|
||||
doc = curdoc()
|
||||
log = doc.logger
|
||||
ang_lims = {}
|
||||
cif_data = {}
|
||||
params = {}
|
||||
res_files = {}
|
||||
_update_slice = None
|
||||
app_dlfiles = app.DownloadFiles(n_files=1)
|
||||
|
||||
anglim_div = Div(text="Angular min/max limits:", margin=(5, 5, 0, 5))
|
||||
sttgamma_ti = TextInput(title="stt/gamma", width=100)
|
||||
omega_ti = TextInput(title="omega", width=100)
|
||||
chinu_ti = TextInput(title="chi/nu", width=100)
|
||||
phi_ti = TextInput(title="phi", width=100)
|
||||
|
||||
def _update_ang_lims(ang_lims):
|
||||
sttgamma_ti.value = " ".join(ang_lims["gamma"][:2])
|
||||
omega_ti.value = " ".join(ang_lims["omega"][:2])
|
||||
if ang_lims["geom"] == "nb":
|
||||
chinu_ti.value = " ".join(ang_lims["nu"][:2])
|
||||
phi_ti.value = ""
|
||||
else: # ang_lims["geom"] == "bi"
|
||||
chinu_ti.value = " ".join(ang_lims["chi"][:2])
|
||||
phi_ti.value = " ".join(ang_lims["phi"][:2])
|
||||
|
||||
def _update_params(params):
|
||||
if "WAVE" in params:
|
||||
wavelen_input.value = params["WAVE"]
|
||||
if "SPGR" in params:
|
||||
cryst_space_group.value = params["SPGR"]
|
||||
if "CELL" in params:
|
||||
cryst_cell.value = params["CELL"]
|
||||
if "UBMAT" in params:
|
||||
ub_matrix.value = " ".join(params["UBMAT"])
|
||||
if "HLIM" in params:
|
||||
ranges_hkl.value = params["HLIM"]
|
||||
if "SRANG" in params:
|
||||
ranges_srang.value = params["SRANG"]
|
||||
if "lattiCE" in params:
|
||||
magstruct_lattice.value = params["lattiCE"]
|
||||
if "kvect" in params:
|
||||
magstruct_kvec.value = params["kvect"]
|
||||
|
||||
def open_geom_callback(_attr, _old, new):
|
||||
nonlocal ang_lims
|
||||
with io.StringIO(base64.b64decode(new).decode()) as fileobj:
|
||||
ang_lims = pyzebra.read_geom_file(fileobj)
|
||||
_update_ang_lims(ang_lims)
|
||||
|
||||
open_geom_div = Div(text="Open GEOM:")
|
||||
open_geom = FileInput(accept=".geom", width=200)
|
||||
open_geom.on_change("value", open_geom_callback)
|
||||
|
||||
def open_cfl_callback(_attr, _old, new):
|
||||
nonlocal params
|
||||
with io.StringIO(base64.b64decode(new).decode()) as fileobj:
|
||||
params = pyzebra.read_cfl_file(fileobj)
|
||||
_update_params(params)
|
||||
|
||||
open_cfl_div = Div(text="Open CFL:")
|
||||
open_cfl = FileInput(accept=".cfl", width=200)
|
||||
open_cfl.on_change("value", open_cfl_callback)
|
||||
|
||||
def open_cif_callback(_attr, _old, new):
|
||||
nonlocal cif_data
|
||||
with io.StringIO(base64.b64decode(new).decode()) as fileobj:
|
||||
cif_data = pyzebra.read_cif_file(fileobj)
|
||||
_update_params(cif_data)
|
||||
|
||||
open_cif_div = Div(text="Open CIF:")
|
||||
open_cif = FileInput(accept=".cif", width=200)
|
||||
open_cif.on_change("value", open_cif_callback)
|
||||
|
||||
wavelen_div = Div(text="Wavelength:", margin=(5, 5, 0, 5))
|
||||
wavelen_input = TextInput(title="value", width=70)
|
||||
|
||||
def wavelen_select_callback(_attr, _old, new):
|
||||
if new:
|
||||
wavelen_input.value = new
|
||||
else:
|
||||
wavelen_input.value = ""
|
||||
|
||||
wavelen_select = Select(
|
||||
title="preset", options=["", "0.788", "1.178", "1.383", "2.305"], width=70
|
||||
)
|
||||
wavelen_select.on_change("value", wavelen_select_callback)
|
||||
|
||||
cryst_div = Div(text="Crystal structure:", margin=(5, 5, 0, 5))
|
||||
cryst_space_group = TextInput(title="space group", width=100)
|
||||
cryst_cell = TextInput(title="cell", width=250)
|
||||
|
||||
def ub_matrix_calc_callback():
|
||||
params = dict()
|
||||
params["SPGR"] = cryst_space_group.value
|
||||
params["CELL"] = cryst_cell.value
|
||||
try:
|
||||
ub = pyzebra.calc_ub_matrix(params, log=log)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return
|
||||
ub_matrix.value = " ".join(ub)
|
||||
|
||||
ub_matrix_calc = Button(label="UB matrix:", button_type="primary", width=100)
|
||||
ub_matrix_calc.on_click(ub_matrix_calc_callback)
|
||||
|
||||
ub_matrix = TextInput(title="\u200B", width=600)
|
||||
|
||||
ranges_div = Div(text="Ranges:", margin=(5, 5, 0, 5))
|
||||
ranges_hkl = TextInput(title="HKL", value="-25 25 -25 25 -25 25", width=250)
|
||||
ranges_srang = TextInput(title="sin(θ)/λ", value="0.0 0.7", width=100)
|
||||
|
||||
magstruct_div = Div(text="Magnetic structure:", margin=(5, 5, 0, 5))
|
||||
magstruct_lattice = TextInput(title="lattice", width=100)
|
||||
magstruct_kvec = TextAreaInput(title="k vector", width=150)
|
||||
|
||||
def sorting0_callback(_attr, _old, new):
|
||||
sorting_0_dt.value = ANG_CHUNK_DEFAULTS[new]
|
||||
|
||||
def sorting1_callback(_attr, _old, new):
|
||||
sorting_1_dt.value = ANG_CHUNK_DEFAULTS[new]
|
||||
|
||||
def sorting2_callback(_attr, _old, new):
|
||||
sorting_2_dt.value = ANG_CHUNK_DEFAULTS[new]
|
||||
|
||||
sorting_0 = Select(title="1st", width=100)
|
||||
sorting_0.on_change("value", sorting0_callback)
|
||||
sorting_0_dt = NumericInput(title="Δ", width=70)
|
||||
sorting_1 = Select(title="2nd", width=100)
|
||||
sorting_1.on_change("value", sorting1_callback)
|
||||
sorting_1_dt = NumericInput(title="Δ", width=70)
|
||||
sorting_2 = Select(title="3rd", width=100)
|
||||
sorting_2.on_change("value", sorting2_callback)
|
||||
sorting_2_dt = NumericInput(title="Δ", width=70)
|
||||
|
||||
def geom_radiogroup_callback(_attr, _old, new):
|
||||
nonlocal ang_lims, params
|
||||
if new == 0:
|
||||
geom_file = pyzebra.get_zebraBI_default_geom_file()
|
||||
sort_opt = SORT_OPT_BI
|
||||
else:
|
||||
geom_file = pyzebra.get_zebraNB_default_geom_file()
|
||||
sort_opt = SORT_OPT_NB
|
||||
cfl_file = pyzebra.get_zebra_default_cfl_file()
|
||||
|
||||
ang_lims = pyzebra.read_geom_file(geom_file)
|
||||
_update_ang_lims(ang_lims)
|
||||
params = pyzebra.read_cfl_file(cfl_file)
|
||||
_update_params(params)
|
||||
|
||||
sorting_0.options = sorting_1.options = sorting_2.options = sort_opt
|
||||
sorting_0.value = sort_opt[0]
|
||||
sorting_1.value = sort_opt[1]
|
||||
sorting_2.value = sort_opt[2]
|
||||
|
||||
geom_radiogroup_div = Div(text="Geometry:", margin=(5, 5, 0, 5))
|
||||
geom_radiogroup = RadioGroup(labels=["bisecting", "normal beam"], width=150)
|
||||
geom_radiogroup.on_change("active", geom_radiogroup_callback)
|
||||
geom_radiogroup.active = 0
|
||||
|
||||
def go_button_callback():
|
||||
ang_lims["gamma"][0], ang_lims["gamma"][1] = sttgamma_ti.value.strip().split()
|
||||
ang_lims["omega"][0], ang_lims["omega"][1] = omega_ti.value.strip().split()
|
||||
if ang_lims["geom"] == "nb":
|
||||
ang_lims["nu"][0], ang_lims["nu"][1] = chinu_ti.value.strip().split()
|
||||
else: # ang_lims["geom"] == "bi"
|
||||
ang_lims["chi"][0], ang_lims["chi"][1] = chinu_ti.value.strip().split()
|
||||
ang_lims["phi"][0], ang_lims["phi"][1] = phi_ti.value.strip().split()
|
||||
|
||||
if cif_data:
|
||||
params.update(cif_data)
|
||||
|
||||
params["WAVE"] = wavelen_input.value
|
||||
params["SPGR"] = cryst_space_group.value
|
||||
params["CELL"] = cryst_cell.value
|
||||
params["UBMAT"] = ub_matrix.value.split()
|
||||
params["HLIM"] = ranges_hkl.value
|
||||
params["SRANG"] = ranges_srang.value
|
||||
params["lattiCE"] = magstruct_lattice.value
|
||||
kvects = magstruct_kvec.value.split("\n")
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
geom_path = os.path.join(temp_dir, "zebra.geom")
|
||||
if open_geom.value:
|
||||
geom_template = io.StringIO(base64.b64decode(open_geom.value).decode())
|
||||
else:
|
||||
geom_template = None
|
||||
pyzebra.export_geom_file(geom_path, ang_lims, geom_template)
|
||||
|
||||
log.info(f"Content of {geom_path}:")
|
||||
with open(geom_path) as f:
|
||||
log.info(f.read())
|
||||
|
||||
priority = [sorting_0.value, sorting_1.value, sorting_2.value]
|
||||
chunks = [sorting_0_dt.value, sorting_1_dt.value, sorting_2_dt.value]
|
||||
if geom_radiogroup.active == 0:
|
||||
sort_hkl_file = pyzebra.sort_hkl_file_bi
|
||||
priority.extend(set(SORT_OPT_BI) - set(priority))
|
||||
else:
|
||||
sort_hkl_file = pyzebra.sort_hkl_file_nb
|
||||
|
||||
# run sxtal_refgen for each kvect provided
|
||||
for i, kvect in enumerate(kvects, start=1):
|
||||
params["kvect"] = kvect
|
||||
if open_cfl.filename:
|
||||
base_fname = f"{os.path.splitext(open_cfl.filename)[0]}_{i}"
|
||||
else:
|
||||
base_fname = f"zebra_{i}"
|
||||
|
||||
cfl_path = os.path.join(temp_dir, base_fname + ".cfl")
|
||||
if open_cfl.value:
|
||||
cfl_template = io.StringIO(base64.b64decode(open_cfl.value).decode())
|
||||
else:
|
||||
cfl_template = None
|
||||
pyzebra.export_cfl_file(cfl_path, params, cfl_template)
|
||||
|
||||
log.info(f"Content of {cfl_path}:")
|
||||
with open(cfl_path) as f:
|
||||
log.info(f.read())
|
||||
|
||||
comp_proc = subprocess.run(
|
||||
[pyzebra.SXTAL_REFGEN_PATH, cfl_path],
|
||||
cwd=temp_dir,
|
||||
check=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True,
|
||||
)
|
||||
log.info(" ".join(comp_proc.args))
|
||||
log.info(comp_proc.stdout)
|
||||
|
||||
if i == 1: # all hkl files are identical, so keep only one
|
||||
hkl_fname = base_fname + ".hkl"
|
||||
hkl_fpath = os.path.join(temp_dir, hkl_fname)
|
||||
with open(hkl_fpath) as f:
|
||||
res_files[hkl_fname] = f.read()
|
||||
|
||||
hkl_fname_sorted = base_fname + "_sorted.hkl"
|
||||
hkl_fpath_sorted = os.path.join(temp_dir, hkl_fname_sorted)
|
||||
sort_hkl_file(hkl_fpath, hkl_fpath_sorted, priority, chunks)
|
||||
with open(hkl_fpath_sorted) as f:
|
||||
res_files[hkl_fname_sorted] = f.read()
|
||||
|
||||
mhkl_fname = base_fname + ".mhkl"
|
||||
mhkl_fpath = os.path.join(temp_dir, mhkl_fname)
|
||||
with open(mhkl_fpath) as f:
|
||||
res_files[mhkl_fname] = f.read()
|
||||
|
||||
mhkl_fname_sorted = base_fname + "_sorted.mhkl"
|
||||
mhkl_fpath_sorted = os.path.join(temp_dir, hkl_fname_sorted)
|
||||
sort_hkl_file(mhkl_fpath, mhkl_fpath_sorted, priority, chunks)
|
||||
with open(mhkl_fpath_sorted) as f:
|
||||
res_files[mhkl_fname_sorted] = f.read()
|
||||
|
||||
created_lists.options = list(res_files)
|
||||
|
||||
go_button = Button(label="GO", button_type="primary", width=50)
|
||||
go_button.on_click(go_button_callback)
|
||||
|
||||
def created_lists_callback(_attr, _old, new):
|
||||
sel_file = new[0]
|
||||
file_text = res_files[sel_file]
|
||||
preview_lists.value = file_text
|
||||
app_dlfiles.set_contents([file_text])
|
||||
app_dlfiles.set_names([sel_file])
|
||||
|
||||
created_lists = MultiSelect(title="Created lists:", width=200, height=150)
|
||||
created_lists.on_change("value", created_lists_callback)
|
||||
preview_lists = TextAreaInput(title="Preview selected list:", width=600, height=150)
|
||||
|
||||
def plot_list_callback():
|
||||
nonlocal _update_slice
|
||||
fname = created_lists.value
|
||||
with io.StringIO(preview_lists.value) as fileobj:
|
||||
fdata = pyzebra.parse_hkl(fileobj, fname)
|
||||
_update_slice = _prepare_plotting(fname, [fdata])
|
||||
_update_slice()
|
||||
|
||||
plot_list = Button(label="Plot selected list", button_type="primary", width=200)
|
||||
plot_list.on_click(plot_list_callback)
|
||||
|
||||
# Plot
|
||||
upload_data_div = Div(text="Open hkl/mhkl data:")
|
||||
upload_data = FileInput(accept=".hkl,.mhkl", multiple=True, width=200)
|
||||
|
||||
min_grid_x = -10
|
||||
max_grid_x = 10
|
||||
min_grid_y = -10
|
||||
max_grid_y = 10
|
||||
cmap = Dark2[8]
|
||||
syms = ["circle", "inverted_triangle", "square", "diamond", "star", "triangle"]
|
||||
|
||||
def _prepare_plotting(filenames, filedata):
|
||||
orth_dir = list(map(float, hkl_normal.value.split()))
|
||||
x_dir = list(map(float, hkl_in_plane_x.value.split()))
|
||||
|
||||
k = np.array(k_vectors.value.split()).astype(float).reshape(-1, 3)
|
||||
tol_k = tol_k_ni.value
|
||||
|
||||
lattice = list(map(float, cryst_cell.value.strip().split()))
|
||||
alpha = lattice[3] * np.pi / 180.0
|
||||
beta = lattice[4] * np.pi / 180.0
|
||||
gamma = lattice[5] * np.pi / 180.0
|
||||
|
||||
# reciprocal angle parameters
|
||||
beta_star = np.arccos(
|
||||
(np.cos(alpha) * np.cos(gamma) - np.cos(beta)) / (np.sin(alpha) * np.sin(gamma))
|
||||
)
|
||||
gamma_star = np.arccos(
|
||||
(np.cos(alpha) * np.cos(beta) - np.cos(gamma)) / (np.sin(alpha) * np.sin(beta))
|
||||
)
|
||||
|
||||
# conversion matrix
|
||||
M = np.array(
|
||||
[
|
||||
[1, np.cos(gamma_star), np.cos(beta_star)],
|
||||
[0, np.sin(gamma_star), -np.sin(beta_star) * np.cos(alpha)],
|
||||
[0, 0, np.sin(beta_star) * np.sin(alpha)],
|
||||
]
|
||||
)
|
||||
|
||||
# Get last lattice vector
|
||||
y_dir = np.cross(x_dir, orth_dir) # Second axes of plotting plane
|
||||
|
||||
# Rescale such that smallest element of y-dir vector is 1
|
||||
y_dir2 = y_dir[y_dir != 0]
|
||||
min_val = np.min(np.abs(y_dir2))
|
||||
y_dir = y_dir / min_val
|
||||
|
||||
# Possibly flip direction of ydir:
|
||||
if y_dir[np.argmax(abs(y_dir))] < 0:
|
||||
y_dir = -y_dir
|
||||
|
||||
# Display the resulting y_dir
|
||||
hkl_in_plane_y.value = " ".join([f"{val:.1f}" for val in y_dir])
|
||||
|
||||
# Save length of lattice vectors
|
||||
x_length = np.linalg.norm(x_dir)
|
||||
y_length = np.linalg.norm(y_dir)
|
||||
|
||||
# Save str for labels
|
||||
xlabel_str = " ".join(map(str, x_dir))
|
||||
ylabel_str = " ".join(map(str, y_dir))
|
||||
|
||||
# Normalize lattice vectors
|
||||
y_dir = y_dir / np.linalg.norm(y_dir)
|
||||
x_dir = x_dir / np.linalg.norm(x_dir)
|
||||
orth_dir = orth_dir / np.linalg.norm(orth_dir)
|
||||
|
||||
# Calculate cartesian equivalents of lattice vectors
|
||||
x_c = np.matmul(M, x_dir)
|
||||
y_c = np.matmul(M, y_dir)
|
||||
o_c = np.matmul(M, orth_dir)
|
||||
|
||||
# Calulcate vertical direction in plotting plame
|
||||
y_vert = np.cross(x_c, o_c) # verical direction in plotting plane
|
||||
if y_vert[np.argmax(abs(y_vert))] < 0:
|
||||
y_vert = -y_vert
|
||||
y_vert = y_vert / np.linalg.norm(y_vert)
|
||||
|
||||
# Normalize all directions
|
||||
y_c = y_c / np.linalg.norm(y_c)
|
||||
x_c = x_c / np.linalg.norm(x_c)
|
||||
o_c = o_c / np.linalg.norm(o_c)
|
||||
|
||||
# Read all data
|
||||
hkl_coord = []
|
||||
intensity_vec = []
|
||||
k_flag_vec = []
|
||||
file_flag_vec = []
|
||||
|
||||
for j, fdata in enumerate(filedata):
|
||||
for ind in range(len(fdata["counts"])):
|
||||
# Recognize k_flag_vec
|
||||
hkl = np.array([fdata["h"][ind], fdata["k"][ind], fdata["l"][ind]])
|
||||
reduced_hkl_m = np.minimum(1 - hkl % 1, hkl % 1)
|
||||
for k_ind, _k in enumerate(k):
|
||||
if all(np.abs(reduced_hkl_m - _k) < tol_k):
|
||||
k_flag_vec.append(k_ind)
|
||||
break
|
||||
else:
|
||||
# not required
|
||||
continue
|
||||
|
||||
# Save data
|
||||
hkl_coord.append(hkl)
|
||||
intensity_vec.append(fdata["counts"][ind])
|
||||
file_flag_vec.append(j)
|
||||
|
||||
x_spacing = np.dot(M @ x_dir, x_c) * x_length
|
||||
y_spacing = np.dot(M @ y_dir, y_vert) * y_length
|
||||
y_spacingx = np.dot(M @ y_dir, x_c) * y_length
|
||||
|
||||
# Plot coordinate system
|
||||
arrow1.x_end = x_spacing
|
||||
arrow1.y_end = 0
|
||||
arrow2.x_end = y_spacingx
|
||||
arrow2.y_end = y_spacing
|
||||
|
||||
# Add labels
|
||||
kvect_source.data.update(
|
||||
x=[x_spacing / 4, -0.1],
|
||||
y=[x_spacing / 4 - 0.5, y_spacing / 2],
|
||||
text=[xlabel_str, ylabel_str],
|
||||
)
|
||||
|
||||
# Plot grid lines
|
||||
xs, ys = [], []
|
||||
xs_minor, ys_minor = [], []
|
||||
for yy in np.arange(min_grid_y, max_grid_y, 1):
|
||||
# Calculate end and start point
|
||||
hkl1 = min_grid_x * x_dir + yy * y_dir
|
||||
hkl2 = max_grid_x * x_dir + yy * y_dir
|
||||
hkl1 = M @ hkl1
|
||||
hkl2 = M @ hkl2
|
||||
|
||||
# Project points onto axes
|
||||
x1 = np.dot(x_c, hkl1) * x_length
|
||||
y1 = np.dot(y_vert, hkl1) * y_length
|
||||
x2 = np.dot(x_c, hkl2) * x_length
|
||||
y2 = np.dot(y_vert, hkl2) * y_length
|
||||
|
||||
xs.append([x1, x2])
|
||||
ys.append([y1, y2])
|
||||
|
||||
for xx in np.arange(min_grid_x, max_grid_x, 1):
|
||||
# Calculate end and start point
|
||||
hkl1 = xx * x_dir + min_grid_y * y_dir
|
||||
hkl2 = xx * x_dir + max_grid_y * y_dir
|
||||
hkl1 = M @ hkl1
|
||||
hkl2 = M @ hkl2
|
||||
|
||||
# Project points onto axes
|
||||
x1 = np.dot(x_c, hkl1) * x_length
|
||||
y1 = np.dot(y_vert, hkl1) * y_length
|
||||
x2 = np.dot(x_c, hkl2) * x_length
|
||||
y2 = np.dot(y_vert, hkl2) * y_length
|
||||
|
||||
xs.append([x1, x2])
|
||||
ys.append([y1, y2])
|
||||
|
||||
for yy in np.arange(min_grid_y, max_grid_y, 0.5):
|
||||
# Calculate end and start point
|
||||
hkl1 = min_grid_x * x_dir + yy * y_dir
|
||||
hkl2 = max_grid_x * x_dir + yy * y_dir
|
||||
hkl1 = M @ hkl1
|
||||
hkl2 = M @ hkl2
|
||||
|
||||
# Project points onto axes
|
||||
x1 = np.dot(x_c, hkl1) * x_length
|
||||
y1 = np.dot(y_vert, hkl1) * y_length
|
||||
x2 = np.dot(x_c, hkl2) * x_length
|
||||
y2 = np.dot(y_vert, hkl2) * y_length
|
||||
|
||||
xs_minor.append([x1, x2])
|
||||
ys_minor.append([y1, y2])
|
||||
|
||||
for xx in np.arange(min_grid_x, max_grid_x, 0.5):
|
||||
# Calculate end and start point
|
||||
hkl1 = xx * x_dir + min_grid_y * y_dir
|
||||
hkl2 = xx * x_dir + max_grid_y * y_dir
|
||||
hkl1 = M @ hkl1
|
||||
hkl2 = M @ hkl2
|
||||
|
||||
# Project points onto axes
|
||||
x1 = np.dot(x_c, hkl1) * x_length
|
||||
y1 = np.dot(y_vert, hkl1) * y_length
|
||||
x2 = np.dot(x_c, hkl2) * x_length
|
||||
y2 = np.dot(y_vert, hkl2) * y_length
|
||||
|
||||
xs_minor.append([x1, x2])
|
||||
ys_minor.append([y1, y2])
|
||||
|
||||
grid_source.data.update(xs=xs, ys=ys)
|
||||
minor_grid_source.data.update(xs=xs_minor, ys=ys_minor)
|
||||
|
||||
def _update_slice():
|
||||
cut_tol = hkl_delta.value
|
||||
cut_or = hkl_cut.value
|
||||
|
||||
# different symbols based on file number
|
||||
file_flag = 0 in disting_opt_cb.active
|
||||
# scale marker size according to intensity
|
||||
intensity_flag = 1 in disting_opt_cb.active
|
||||
# use color to mark different propagation vectors
|
||||
prop_legend_flag = 2 in disting_opt_cb.active
|
||||
|
||||
scan_x, scan_y = [], []
|
||||
scan_m, scan_s, scan_c, scan_l, scan_hkl = [], [], [], [], []
|
||||
for j in range(len(hkl_coord)):
|
||||
# Get middle hkl from list
|
||||
hklm = M @ hkl_coord[j]
|
||||
|
||||
# Decide if point is in the cut
|
||||
proj = np.dot(hklm, o_c)
|
||||
if abs(proj - cut_or) >= cut_tol:
|
||||
continue
|
||||
|
||||
# Project onto axes
|
||||
hklmx = np.dot(hklm, x_c)
|
||||
hklmy = np.dot(hklm, y_vert)
|
||||
|
||||
if intensity_flag and max(intensity_vec) != 0:
|
||||
markersize = max(6, int(intensity_vec[j] / max(intensity_vec) * 30))
|
||||
else:
|
||||
markersize = 6
|
||||
|
||||
if file_flag:
|
||||
plot_symbol = syms[file_flag_vec[j]]
|
||||
else:
|
||||
plot_symbol = "circle"
|
||||
|
||||
if prop_legend_flag:
|
||||
col_value = cmap[k_flag_vec[j]]
|
||||
else:
|
||||
col_value = "black"
|
||||
|
||||
# Plot middle point of scan
|
||||
scan_x.append(hklmx)
|
||||
scan_y.append(hklmy)
|
||||
scan_m.append(plot_symbol)
|
||||
scan_s.append(markersize)
|
||||
|
||||
# Color and legend label
|
||||
scan_c.append(col_value)
|
||||
scan_l.append(filenames[file_flag_vec[j]])
|
||||
scan_hkl.append(hkl_coord[j])
|
||||
|
||||
scatter_source.data.update(
|
||||
x=scan_x, y=scan_y, m=scan_m, s=scan_s, c=scan_c, l=scan_l, hkl=scan_hkl
|
||||
)
|
||||
|
||||
# Legend items for different file entries (symbol)
|
||||
legend_items = []
|
||||
if file_flag:
|
||||
labels, inds = np.unique(scatter_source.data["l"], return_index=True)
|
||||
for label, ind in zip(labels, inds):
|
||||
legend_items.append(LegendItem(label=label, renderers=[scatter], index=ind))
|
||||
|
||||
# Legend items for propagation vector (color)
|
||||
if prop_legend_flag:
|
||||
labels, inds = np.unique(scatter_source.data["c"], return_index=True)
|
||||
for label, ind in zip(labels, inds):
|
||||
label = f"k={k[cmap.index(label)]}"
|
||||
legend_items.append(LegendItem(label=label, renderers=[scatter], index=ind))
|
||||
|
||||
plot.legend.items = legend_items
|
||||
|
||||
return _update_slice
|
||||
|
||||
def plot_file_callback():
|
||||
nonlocal _update_slice
|
||||
fnames = []
|
||||
fdata = []
|
||||
for j, fname in enumerate(upload_data.filename):
|
||||
with io.StringIO(base64.b64decode(upload_data.value[j]).decode()) as file:
|
||||
_, ext = os.path.splitext(fname)
|
||||
try:
|
||||
file_data = pyzebra.parse_hkl(file, ext)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return
|
||||
|
||||
fnames.append(fname)
|
||||
fdata.append(file_data)
|
||||
|
||||
_update_slice = _prepare_plotting(fnames, fdata)
|
||||
_update_slice()
|
||||
|
||||
plot_file = Button(label="Plot selected file(s)", button_type="primary", width=200)
|
||||
plot_file.on_click(plot_file_callback)
|
||||
|
||||
plot = figure(height=550, width=550 + 32, tools="pan,wheel_zoom,reset")
|
||||
plot.toolbar.logo = None
|
||||
|
||||
plot.xaxis.visible = False
|
||||
plot.xgrid.visible = False
|
||||
plot.yaxis.visible = False
|
||||
plot.ygrid.visible = False
|
||||
|
||||
arrow1 = Arrow(x_start=0, y_start=0, x_end=0, y_end=0, end=NormalHead(size=10))
|
||||
plot.add_layout(arrow1)
|
||||
arrow2 = Arrow(x_start=0, y_start=0, x_end=0, y_end=0, end=NormalHead(size=10))
|
||||
plot.add_layout(arrow2)
|
||||
|
||||
kvect_source = ColumnDataSource(dict(x=[], y=[], text=[]))
|
||||
plot.text(source=kvect_source)
|
||||
|
||||
grid_source = ColumnDataSource(dict(xs=[], ys=[]))
|
||||
plot.multi_line(source=grid_source, line_color="gray")
|
||||
|
||||
minor_grid_source = ColumnDataSource(dict(xs=[], ys=[]))
|
||||
plot.multi_line(source=minor_grid_source, line_color="gray", line_dash="dotted")
|
||||
|
||||
scatter_source = ColumnDataSource(dict(x=[], y=[], m=[], s=[], c=[], l=[], hkl=[]))
|
||||
scatter = plot.scatter(
|
||||
source=scatter_source, marker="m", size="s", fill_color="c", line_color="c"
|
||||
)
|
||||
|
||||
plot.x_range.renderers = [scatter]
|
||||
plot.y_range.renderers = [scatter]
|
||||
|
||||
plot.add_layout(Legend(items=[], location="top_left", click_policy="hide"))
|
||||
|
||||
plot.add_tools(HoverTool(renderers=[scatter], tooltips=[("hkl", "@hkl")]))
|
||||
|
||||
hkl_div = Div(text="HKL:", margin=(5, 5, 0, 5))
|
||||
hkl_normal = TextInput(title="normal", value="0 0 1", width=70)
|
||||
|
||||
def hkl_cut_callback(_attr, _old, _new):
|
||||
if _update_slice is not None:
|
||||
_update_slice()
|
||||
|
||||
hkl_cut = Spinner(title="cut", value=0, step=0.1, width=70)
|
||||
hkl_cut.on_change("value_throttled", hkl_cut_callback)
|
||||
|
||||
hkl_delta = NumericInput(title="delta", value=0.1, mode="float", width=70)
|
||||
hkl_in_plane_x = TextInput(title="in-plane X", value="1 0 0", width=70)
|
||||
hkl_in_plane_y = TextInput(title="in-plane Y", value="", width=100, disabled=True)
|
||||
|
||||
disting_opt_div = Div(text="Distinguish options:", margin=(5, 5, 0, 5))
|
||||
disting_opt_cb = CheckboxGroup(
|
||||
labels=["files (symbols)", "intensities (size)", "k vectors nucl/magn (colors)"],
|
||||
active=[0, 1, 2],
|
||||
width=200,
|
||||
)
|
||||
|
||||
k_vectors = TextAreaInput(
|
||||
title="k vectors:", value="0.0 0.0 0.0\n0.5 0.0 0.0\n0.5 0.5 0.0", width=150
|
||||
)
|
||||
tol_k_ni = NumericInput(title="k tolerance:", value=0.01, mode="float", width=100)
|
||||
|
||||
fileinput_layout = row(open_cfl_div, open_cfl, open_cif_div, open_cif, open_geom_div, open_geom)
|
||||
|
||||
geom_layout = column(geom_radiogroup_div, geom_radiogroup)
|
||||
wavelen_layout = column(wavelen_div, row(wavelen_select, wavelen_input))
|
||||
anglim_layout = column(anglim_div, row(sttgamma_ti, omega_ti, chinu_ti, phi_ti))
|
||||
cryst_layout = column(cryst_div, row(cryst_space_group, cryst_cell))
|
||||
ubmat_layout = row(column(Spacer(height=19), ub_matrix_calc), ub_matrix)
|
||||
ranges_layout = column(ranges_div, row(ranges_hkl, ranges_srang))
|
||||
magstruct_layout = column(magstruct_div, row(magstruct_lattice, magstruct_kvec))
|
||||
sorting_layout = row(
|
||||
sorting_0,
|
||||
sorting_0_dt,
|
||||
Spacer(width=30),
|
||||
sorting_1,
|
||||
sorting_1_dt,
|
||||
Spacer(width=30),
|
||||
sorting_2,
|
||||
sorting_2_dt,
|
||||
)
|
||||
|
||||
column1_layout = column(
|
||||
fileinput_layout,
|
||||
Spacer(height=10),
|
||||
row(geom_layout, wavelen_layout, Spacer(width=50), anglim_layout),
|
||||
cryst_layout,
|
||||
ubmat_layout,
|
||||
row(ranges_layout, Spacer(width=50), magstruct_layout),
|
||||
row(sorting_layout, Spacer(width=30), column(Spacer(height=19), go_button)),
|
||||
row(created_lists, preview_lists),
|
||||
row(app_dlfiles.button, plot_list),
|
||||
)
|
||||
|
||||
column2_layout = column(
|
||||
row(upload_data_div, upload_data, plot_file),
|
||||
row(
|
||||
plot,
|
||||
column(
|
||||
hkl_div,
|
||||
row(hkl_normal, hkl_cut, hkl_delta),
|
||||
row(hkl_in_plane_x, hkl_in_plane_y),
|
||||
k_vectors,
|
||||
tol_k_ni,
|
||||
disting_opt_div,
|
||||
disting_opt_cb,
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
tab_layout = row(column1_layout, column2_layout)
|
||||
|
||||
return Panel(child=tab_layout, title="ccl prepare")
|
@ -1,5 +1,6 @@
|
||||
import base64
|
||||
import io
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
|
||||
@ -10,24 +11,27 @@ from bokeh.models import (
|
||||
Div,
|
||||
FileInput,
|
||||
Panel,
|
||||
RadioButtonGroup,
|
||||
Select,
|
||||
Spacer,
|
||||
Tabs,
|
||||
TextAreaInput,
|
||||
TextInput,
|
||||
)
|
||||
|
||||
import pyzebra
|
||||
from pyzebra.anatric import DATA_FACTORY_IMPLEMENTATION, REFLECTION_PRINTER_FORMATS
|
||||
from pyzebra import DATA_FACTORY_IMPLEMENTATION, REFLECTION_PRINTER_FORMATS
|
||||
|
||||
|
||||
def create():
|
||||
doc = curdoc()
|
||||
log = doc.logger
|
||||
config = pyzebra.AnatricConfig()
|
||||
|
||||
def _load_config_file(file):
|
||||
config.load_from_file(file)
|
||||
|
||||
logfile_textinput.value = config.logfile
|
||||
logfile_verbosity_select.value = config.logfile_verbosity
|
||||
logfile_verbosity.value = config.logfile_verbosity
|
||||
|
||||
filelist_type.value = config.filelist_type
|
||||
filelist_format_textinput.value = config.filelist_format
|
||||
@ -42,11 +46,16 @@ def create():
|
||||
ub_textareainput.value = config.crystal_UB
|
||||
|
||||
dataFactory_implementation_select.value = config.dataFactory_implementation
|
||||
dataFactory_dist1_textinput.value = config.dataFactory_dist1
|
||||
if config.dataFactory_dist1 is not None:
|
||||
dataFactory_dist1_textinput.value = config.dataFactory_dist1
|
||||
if config.dataFactory_dist2 is not None:
|
||||
dataFactory_dist2_textinput.value = config.dataFactory_dist2
|
||||
if config.dataFactory_dist3 is not None:
|
||||
dataFactory_dist3_textinput.value = config.dataFactory_dist3
|
||||
reflectionPrinter_format_select.value = config.reflectionPrinter_format
|
||||
|
||||
set_active_widgets(config.algorithm)
|
||||
if config.algorithm == "adaptivemaxcog":
|
||||
algorithm_params.active = 0
|
||||
threshold_textinput.value = config.threshold
|
||||
shell_textinput.value = config.shell
|
||||
steepness_textinput.value = config.steepness
|
||||
@ -55,6 +64,7 @@ def create():
|
||||
aps_window_textinput.value = str(tuple(map(int, config.aps_window.values())))
|
||||
|
||||
elif config.algorithm == "adaptivedynamic":
|
||||
algorithm_params.active = 1
|
||||
adm_window_textinput.value = str(tuple(map(int, config.adm_window.values())))
|
||||
border_textinput.value = str(tuple(map(int, config.border.values())))
|
||||
minWindow_textinput.value = str(tuple(map(int, config.minWindow.values())))
|
||||
@ -64,46 +74,16 @@ def create():
|
||||
loop_textinput.value = config.loop
|
||||
minPeakCount_textinput.value = config.minPeakCount
|
||||
displacementCurve_textinput.value = "\n".join(map(str, config.displacementCurve))
|
||||
|
||||
else:
|
||||
raise ValueError("Unknown processing mode.")
|
||||
|
||||
def set_active_widgets(implementation):
|
||||
if implementation == "adaptivemaxcog":
|
||||
mode_radio_button_group.active = 0
|
||||
disable_adaptivemaxcog = False
|
||||
disable_adaptivedynamic = True
|
||||
|
||||
elif implementation == "adaptivedynamic":
|
||||
mode_radio_button_group.active = 1
|
||||
disable_adaptivemaxcog = True
|
||||
disable_adaptivedynamic = False
|
||||
else:
|
||||
raise ValueError("Implementation can be either 'adaptivemaxcog' or 'adaptivedynamic'")
|
||||
|
||||
threshold_textinput.disabled = disable_adaptivemaxcog
|
||||
shell_textinput.disabled = disable_adaptivemaxcog
|
||||
steepness_textinput.disabled = disable_adaptivemaxcog
|
||||
duplicateDistance_textinput.disabled = disable_adaptivemaxcog
|
||||
maxequal_textinput.disabled = disable_adaptivemaxcog
|
||||
aps_window_textinput.disabled = disable_adaptivemaxcog
|
||||
|
||||
adm_window_textinput.disabled = disable_adaptivedynamic
|
||||
border_textinput.disabled = disable_adaptivedynamic
|
||||
minWindow_textinput.disabled = disable_adaptivedynamic
|
||||
reflectionFile_textinput.disabled = disable_adaptivedynamic
|
||||
targetMonitor_textinput.disabled = disable_adaptivedynamic
|
||||
smoothSize_textinput.disabled = disable_adaptivedynamic
|
||||
loop_textinput.disabled = disable_adaptivedynamic
|
||||
minPeakCount_textinput.disabled = disable_adaptivedynamic
|
||||
displacementCurve_textinput.disabled = disable_adaptivedynamic
|
||||
|
||||
upload_div = Div(text="Open XML configuration file:")
|
||||
|
||||
def upload_button_callback(_attr, _old, new):
|
||||
with io.BytesIO(base64.b64decode(new)) as file:
|
||||
_load_config_file(file)
|
||||
|
||||
upload_button = FileInput(accept=".xml")
|
||||
upload_div = Div(text="Open .xml config:")
|
||||
upload_button = FileInput(accept=".xml", width=200)
|
||||
upload_button.on_change("value", upload_button_callback)
|
||||
|
||||
# General parameters
|
||||
@ -111,16 +91,14 @@ def create():
|
||||
def logfile_textinput_callback(_attr, _old, new):
|
||||
config.logfile = new
|
||||
|
||||
logfile_textinput = TextInput(title="Logfile:", value="logfile.log", width=520)
|
||||
logfile_textinput = TextInput(title="Logfile:", value="logfile.log")
|
||||
logfile_textinput.on_change("value", logfile_textinput_callback)
|
||||
|
||||
def logfile_verbosity_select_callback(_attr, _old, new):
|
||||
def logfile_verbosity_callback(_attr, _old, new):
|
||||
config.logfile_verbosity = new
|
||||
|
||||
logfile_verbosity_select = Select(
|
||||
title="verbosity:", options=["0", "5", "10", "15", "30"], width=70
|
||||
)
|
||||
logfile_verbosity_select.on_change("value", logfile_verbosity_select_callback)
|
||||
logfile_verbosity = TextInput(title="verbosity:", width=70)
|
||||
logfile_verbosity.on_change("value", logfile_verbosity_callback)
|
||||
|
||||
# ---- FileList
|
||||
def filelist_type_callback(_attr, _old, new):
|
||||
@ -132,7 +110,7 @@ def create():
|
||||
def filelist_format_textinput_callback(_attr, _old, new):
|
||||
config.filelist_format = new
|
||||
|
||||
filelist_format_textinput = TextInput(title="format:", width=490)
|
||||
filelist_format_textinput = TextInput(title="format:", width=290)
|
||||
filelist_format_textinput.on_change("value", filelist_format_textinput_callback)
|
||||
|
||||
def filelist_datapath_textinput_callback(_attr, _old, new):
|
||||
@ -147,20 +125,20 @@ def create():
|
||||
ranges.append(re.findall(r"\b\d+\b", line))
|
||||
config.filelist_ranges = ranges
|
||||
|
||||
filelist_ranges_textareainput = TextAreaInput(title="ranges:", height=100)
|
||||
filelist_ranges_textareainput = TextAreaInput(title="ranges:", rows=1)
|
||||
filelist_ranges_textareainput.on_change("value", filelist_ranges_textareainput_callback)
|
||||
|
||||
# ---- crystal
|
||||
def crystal_sample_textinput_callback(_attr, _old, new):
|
||||
config.crystal_sample = new
|
||||
|
||||
crystal_sample_textinput = TextInput(title="Sample Name:")
|
||||
crystal_sample_textinput = TextInput(title="Sample Name:", width=290)
|
||||
crystal_sample_textinput.on_change("value", crystal_sample_textinput_callback)
|
||||
|
||||
def lambda_textinput_callback(_attr, _old, new):
|
||||
config.crystal_lambda = new
|
||||
|
||||
lambda_textinput = TextInput(title="lambda:", width=140)
|
||||
lambda_textinput = TextInput(title="lambda:", width=100)
|
||||
lambda_textinput.on_change("value", lambda_textinput_callback)
|
||||
|
||||
def ub_textareainput_callback(_attr, _old, new):
|
||||
@ -172,19 +150,19 @@ def create():
|
||||
def zeroOM_textinput_callback(_attr, _old, new):
|
||||
config.crystal_zeroOM = new
|
||||
|
||||
zeroOM_textinput = TextInput(title="zeroOM:", width=140)
|
||||
zeroOM_textinput = TextInput(title="zeroOM:", width=100)
|
||||
zeroOM_textinput.on_change("value", zeroOM_textinput_callback)
|
||||
|
||||
def zeroSTT_textinput_callback(_attr, _old, new):
|
||||
config.crystal_zeroSTT = new
|
||||
|
||||
zeroSTT_textinput = TextInput(title="zeroSTT:", width=140)
|
||||
zeroSTT_textinput = TextInput(title="zeroSTT:", width=100)
|
||||
zeroSTT_textinput.on_change("value", zeroSTT_textinput_callback)
|
||||
|
||||
def zeroCHI_textinput_callback(_attr, _old, new):
|
||||
config.crystal_zeroCHI = new
|
||||
|
||||
zeroCHI_textinput = TextInput(title="zeroCHI:", width=140)
|
||||
zeroCHI_textinput = TextInput(title="zeroCHI:", width=100)
|
||||
zeroCHI_textinput.on_change("value", zeroCHI_textinput_callback)
|
||||
|
||||
# ---- DataFactory
|
||||
@ -192,16 +170,28 @@ def create():
|
||||
config.dataFactory_implementation = new
|
||||
|
||||
dataFactory_implementation_select = Select(
|
||||
title="DataFactory implementation:", options=DATA_FACTORY_IMPLEMENTATION, width=300,
|
||||
title="DataFactory implement.:", options=DATA_FACTORY_IMPLEMENTATION, width=145
|
||||
)
|
||||
dataFactory_implementation_select.on_change("value", dataFactory_implementation_select_callback)
|
||||
|
||||
def dataFactory_dist1_textinput_callback(_attr, _old, new):
|
||||
config.dataFactory_dist1 = new
|
||||
|
||||
dataFactory_dist1_textinput = TextInput(title="dist1:", width=290)
|
||||
dataFactory_dist1_textinput = TextInput(title="dist1:", width=75)
|
||||
dataFactory_dist1_textinput.on_change("value", dataFactory_dist1_textinput_callback)
|
||||
|
||||
def dataFactory_dist2_textinput_callback(_attr, _old, new):
|
||||
config.dataFactory_dist2 = new
|
||||
|
||||
dataFactory_dist2_textinput = TextInput(title="dist2:", width=75)
|
||||
dataFactory_dist2_textinput.on_change("value", dataFactory_dist2_textinput_callback)
|
||||
|
||||
def dataFactory_dist3_textinput_callback(_attr, _old, new):
|
||||
config.dataFactory_dist3 = new
|
||||
|
||||
dataFactory_dist3_textinput = TextInput(title="dist3:", width=75)
|
||||
dataFactory_dist3_textinput.on_change("value", dataFactory_dist3_textinput_callback)
|
||||
|
||||
# ---- BackgroundProcessor
|
||||
|
||||
# ---- DetectorEfficency
|
||||
@ -211,7 +201,7 @@ def create():
|
||||
config.reflectionPrinter_format = new
|
||||
|
||||
reflectionPrinter_format_select = Select(
|
||||
title="ReflectionPrinter format:", options=REFLECTION_PRINTER_FORMATS, width=300,
|
||||
title="ReflectionPrinter format:", options=REFLECTION_PRINTER_FORMATS, width=145
|
||||
)
|
||||
reflectionPrinter_format_select.on_change("value", reflectionPrinter_format_select_callback)
|
||||
|
||||
@ -220,42 +210,42 @@ def create():
|
||||
def threshold_textinput_callback(_attr, _old, new):
|
||||
config.threshold = new
|
||||
|
||||
threshold_textinput = TextInput(title="Threshold:")
|
||||
threshold_textinput = TextInput(title="Threshold:", width=145)
|
||||
threshold_textinput.on_change("value", threshold_textinput_callback)
|
||||
|
||||
# ---- shell
|
||||
def shell_textinput_callback(_attr, _old, new):
|
||||
config.shell = new
|
||||
|
||||
shell_textinput = TextInput(title="Shell:")
|
||||
shell_textinput = TextInput(title="Shell:", width=145)
|
||||
shell_textinput.on_change("value", shell_textinput_callback)
|
||||
|
||||
# ---- steepness
|
||||
def steepness_textinput_callback(_attr, _old, new):
|
||||
config.steepness = new
|
||||
|
||||
steepness_textinput = TextInput(title="Steepness:")
|
||||
steepness_textinput = TextInput(title="Steepness:", width=145)
|
||||
steepness_textinput.on_change("value", steepness_textinput_callback)
|
||||
|
||||
# ---- duplicateDistance
|
||||
def duplicateDistance_textinput_callback(_attr, _old, new):
|
||||
config.duplicateDistance = new
|
||||
|
||||
duplicateDistance_textinput = TextInput(title="Duplicate Distance:")
|
||||
duplicateDistance_textinput = TextInput(title="Duplicate Distance:", width=145)
|
||||
duplicateDistance_textinput.on_change("value", duplicateDistance_textinput_callback)
|
||||
|
||||
# ---- maxequal
|
||||
def maxequal_textinput_callback(_attr, _old, new):
|
||||
config.maxequal = new
|
||||
|
||||
maxequal_textinput = TextInput(title="Max Equal:")
|
||||
maxequal_textinput = TextInput(title="Max Equal:", width=145)
|
||||
maxequal_textinput.on_change("value", maxequal_textinput_callback)
|
||||
|
||||
# ---- window
|
||||
def aps_window_textinput_callback(_attr, _old, new):
|
||||
config.aps_window = dict(zip(("x", "y", "z"), re.findall(r"\b\d+\b", new)))
|
||||
|
||||
aps_window_textinput = TextInput(title="Window (x, y, z):")
|
||||
aps_window_textinput = TextInput(title="Window (x, y, z):", width=145)
|
||||
aps_window_textinput.on_change("value", aps_window_textinput_callback)
|
||||
|
||||
# Adaptive Dynamic Mask Integration (adaptivedynamic)
|
||||
@ -263,56 +253,56 @@ def create():
|
||||
def adm_window_textinput_callback(_attr, _old, new):
|
||||
config.adm_window = dict(zip(("x", "y", "z"), re.findall(r"\b\d+\b", new)))
|
||||
|
||||
adm_window_textinput = TextInput(title="Window (x, y, z):")
|
||||
adm_window_textinput = TextInput(title="Window (x, y, z):", width=145)
|
||||
adm_window_textinput.on_change("value", adm_window_textinput_callback)
|
||||
|
||||
# ---- border
|
||||
def border_textinput_callback(_attr, _old, new):
|
||||
config.border = dict(zip(("x", "y", "z"), re.findall(r"\b\d+\b", new)))
|
||||
|
||||
border_textinput = TextInput(title="Border (x, y, z):")
|
||||
border_textinput = TextInput(title="Border (x, y, z):", width=145)
|
||||
border_textinput.on_change("value", border_textinput_callback)
|
||||
|
||||
# ---- minWindow
|
||||
def minWindow_textinput_callback(_attr, _old, new):
|
||||
config.minWindow = dict(zip(("x", "y", "z"), re.findall(r"\b\d+\b", new)))
|
||||
|
||||
minWindow_textinput = TextInput(title="Min Window (x, y, z):")
|
||||
minWindow_textinput = TextInput(title="Min Window (x, y, z):", width=145)
|
||||
minWindow_textinput.on_change("value", minWindow_textinput_callback)
|
||||
|
||||
# ---- reflectionFile
|
||||
def reflectionFile_textinput_callback(_attr, _old, new):
|
||||
config.reflectionFile = new
|
||||
|
||||
reflectionFile_textinput = TextInput(title="Reflection File:")
|
||||
reflectionFile_textinput = TextInput(title="Reflection File:", width=145)
|
||||
reflectionFile_textinput.on_change("value", reflectionFile_textinput_callback)
|
||||
|
||||
# ---- targetMonitor
|
||||
def targetMonitor_textinput_callback(_attr, _old, new):
|
||||
config.targetMonitor = new
|
||||
|
||||
targetMonitor_textinput = TextInput(title="Target Monitor:")
|
||||
targetMonitor_textinput = TextInput(title="Target Monitor:", width=145)
|
||||
targetMonitor_textinput.on_change("value", targetMonitor_textinput_callback)
|
||||
|
||||
# ---- smoothSize
|
||||
def smoothSize_textinput_callback(_attr, _old, new):
|
||||
config.smoothSize = new
|
||||
|
||||
smoothSize_textinput = TextInput(title="Smooth Size:")
|
||||
smoothSize_textinput = TextInput(title="Smooth Size:", width=145)
|
||||
smoothSize_textinput.on_change("value", smoothSize_textinput_callback)
|
||||
|
||||
# ---- loop
|
||||
def loop_textinput_callback(_attr, _old, new):
|
||||
config.loop = new
|
||||
|
||||
loop_textinput = TextInput(title="Loop:")
|
||||
loop_textinput = TextInput(title="Loop:", width=145)
|
||||
loop_textinput.on_change("value", loop_textinput_callback)
|
||||
|
||||
# ---- minPeakCount
|
||||
def minPeakCount_textinput_callback(_attr, _old, new):
|
||||
config.minPeakCount = new
|
||||
|
||||
minPeakCount_textinput = TextInput(title="Min Peak Count:")
|
||||
minPeakCount_textinput = TextInput(title="Min Peak Count:", width=145)
|
||||
minPeakCount_textinput.on_change("value", minPeakCount_textinput_callback)
|
||||
|
||||
# ---- displacementCurve
|
||||
@ -323,87 +313,87 @@ def create():
|
||||
config.displacementCurve = maps
|
||||
|
||||
displacementCurve_textinput = TextAreaInput(
|
||||
title="Displacement Curve (twotheta, x, y):", height=100
|
||||
title="Displ. Curve (2θ, x, y):", width=145, height=100
|
||||
)
|
||||
displacementCurve_textinput.on_change("value", displacementCurve_textinput_callback)
|
||||
|
||||
def mode_radio_button_group_callback(active):
|
||||
if active == 0:
|
||||
def algorithm_tabs_callback(_attr, _old, new):
|
||||
if new == 0:
|
||||
config.algorithm = "adaptivemaxcog"
|
||||
set_active_widgets("adaptivemaxcog")
|
||||
else:
|
||||
config.algorithm = "adaptivedynamic"
|
||||
set_active_widgets("adaptivedynamic")
|
||||
|
||||
mode_radio_button_group = RadioButtonGroup(
|
||||
labels=["Adaptive Peak Detection", "Adaptive Dynamic Integration"], active=0
|
||||
algorithm_params = Tabs(
|
||||
tabs=[
|
||||
Panel(
|
||||
child=column(
|
||||
row(threshold_textinput, shell_textinput, steepness_textinput),
|
||||
row(duplicateDistance_textinput, maxequal_textinput, aps_window_textinput),
|
||||
),
|
||||
title="Peak Search",
|
||||
),
|
||||
Panel(
|
||||
child=column(
|
||||
row(adm_window_textinput, border_textinput, minWindow_textinput),
|
||||
row(reflectionFile_textinput, targetMonitor_textinput, smoothSize_textinput),
|
||||
row(loop_textinput, minPeakCount_textinput, displacementCurve_textinput),
|
||||
),
|
||||
title="Dynamic Integration",
|
||||
),
|
||||
]
|
||||
)
|
||||
mode_radio_button_group.on_click(mode_radio_button_group_callback)
|
||||
set_active_widgets("adaptivemaxcog")
|
||||
algorithm_params.on_change("active", algorithm_tabs_callback)
|
||||
|
||||
def process_button_callback():
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
temp_file = temp_dir + "/temp.xml"
|
||||
temp_file = temp_dir + "/config.xml"
|
||||
config.save_as(temp_file)
|
||||
pyzebra.anatric(temp_file)
|
||||
try:
|
||||
pyzebra.anatric(temp_file, anatric_path=doc.anatric_path, cwd=temp_dir, log=log)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return
|
||||
|
||||
with open(config.logfile) as f_log:
|
||||
with open(os.path.join(temp_dir, config.logfile)) as f_log:
|
||||
output_log.value = f_log.read()
|
||||
|
||||
with open(os.path.join(temp_dir, config.reflectionPrinter_file)) as f_res:
|
||||
output_res.value = f_res.read()
|
||||
|
||||
process_button = Button(label="Process", button_type="primary")
|
||||
process_button.on_click(process_button_callback)
|
||||
|
||||
output_log = TextAreaInput(title="Logfile output:", height=700, disabled=True)
|
||||
output_config = TextAreaInput(title="Current config:", height=700, width=400, disabled=True)
|
||||
output_log = TextAreaInput(title="Logfile output:", height=320, width=465, disabled=True)
|
||||
output_res = TextAreaInput(title="Result output:", height=320, width=465, disabled=True)
|
||||
output_config = TextAreaInput(title="Current config:", height=320, width=465, disabled=True)
|
||||
|
||||
general_params_layout = column(
|
||||
row(column(Spacer(height=2), upload_div), upload_button),
|
||||
row(logfile_textinput, logfile_verbosity),
|
||||
row(filelist_type, filelist_format_textinput),
|
||||
filelist_datapath_textinput,
|
||||
filelist_ranges_textareainput,
|
||||
row(crystal_sample_textinput, lambda_textinput),
|
||||
ub_textareainput,
|
||||
row(zeroOM_textinput, zeroSTT_textinput, zeroCHI_textinput),
|
||||
row(
|
||||
dataFactory_implementation_select,
|
||||
dataFactory_dist1_textinput,
|
||||
dataFactory_dist2_textinput,
|
||||
dataFactory_dist3_textinput,
|
||||
),
|
||||
row(reflectionPrinter_format_select),
|
||||
)
|
||||
|
||||
tab_layout = row(
|
||||
column(
|
||||
upload_div,
|
||||
upload_button,
|
||||
row(logfile_textinput, logfile_verbosity_select),
|
||||
row(filelist_type, filelist_format_textinput),
|
||||
filelist_datapath_textinput,
|
||||
filelist_ranges_textareainput,
|
||||
crystal_sample_textinput,
|
||||
row(lambda_textinput, zeroOM_textinput, zeroSTT_textinput, zeroCHI_textinput),
|
||||
ub_textareainput,
|
||||
row(dataFactory_implementation_select, dataFactory_dist1_textinput),
|
||||
reflectionPrinter_format_select,
|
||||
process_button,
|
||||
),
|
||||
column(
|
||||
mode_radio_button_group,
|
||||
row(
|
||||
column(
|
||||
threshold_textinput,
|
||||
shell_textinput,
|
||||
steepness_textinput,
|
||||
duplicateDistance_textinput,
|
||||
maxequal_textinput,
|
||||
aps_window_textinput,
|
||||
),
|
||||
column(
|
||||
adm_window_textinput,
|
||||
border_textinput,
|
||||
minWindow_textinput,
|
||||
reflectionFile_textinput,
|
||||
targetMonitor_textinput,
|
||||
smoothSize_textinput,
|
||||
loop_textinput,
|
||||
minPeakCount_textinput,
|
||||
displacementCurve_textinput,
|
||||
),
|
||||
),
|
||||
),
|
||||
output_config,
|
||||
output_log,
|
||||
general_params_layout,
|
||||
column(output_config, algorithm_params, row(process_button)),
|
||||
column(output_log, output_res),
|
||||
)
|
||||
|
||||
async def update_config():
|
||||
config.save_as("debug.xml")
|
||||
with open("debug.xml") as f_config:
|
||||
output_config.value = f_config.read()
|
||||
output_config.value = config.tostring()
|
||||
|
||||
curdoc().add_periodic_callback(update_config, 1000)
|
||||
doc.add_periodic_callback(update_config, 1000)
|
||||
|
||||
return Panel(child=tab_layout, title="hdf anatric")
|
||||
|
518
pyzebra/app/panel_hdf_param_study.py
Normal file
518
pyzebra/app/panel_hdf_param_study.py
Normal file
@ -0,0 +1,518 @@
|
||||
import base64
|
||||
import io
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
from bokeh.io import curdoc
|
||||
from bokeh.layouts import column, gridplot, row
|
||||
from bokeh.models import (
|
||||
Button,
|
||||
CellEditor,
|
||||
CheckboxGroup,
|
||||
ColumnDataSource,
|
||||
DataTable,
|
||||
Div,
|
||||
FileInput,
|
||||
LinearColorMapper,
|
||||
MultiSelect,
|
||||
NumberEditor,
|
||||
NumberFormatter,
|
||||
Panel,
|
||||
Range1d,
|
||||
Select,
|
||||
Spinner,
|
||||
TableColumn,
|
||||
Tabs,
|
||||
)
|
||||
from bokeh.plotting import figure
|
||||
|
||||
import pyzebra
|
||||
|
||||
IMAGE_W = 256
|
||||
IMAGE_H = 128
|
||||
IMAGE_PLOT_W = int(IMAGE_W * 2.4) + 52
|
||||
IMAGE_PLOT_H = int(IMAGE_H * 2.4) + 27
|
||||
|
||||
|
||||
def create():
|
||||
doc = curdoc()
|
||||
log = doc.logger
|
||||
dataset = []
|
||||
cami_meta = {}
|
||||
|
||||
num_formatter = NumberFormatter(format="0.00", nan_format="")
|
||||
|
||||
def file_select_update():
|
||||
if data_source.value == "proposal number":
|
||||
proposal_path = proposal_textinput.name
|
||||
if proposal_path:
|
||||
file_list = []
|
||||
for file in os.listdir(proposal_path):
|
||||
if file.endswith(".hdf"):
|
||||
file_list.append((os.path.join(proposal_path, file), file))
|
||||
file_select.options = file_list
|
||||
else:
|
||||
file_select.options = []
|
||||
|
||||
else: # "cami file"
|
||||
if not cami_meta:
|
||||
file_select.options = []
|
||||
return
|
||||
|
||||
file_list = cami_meta["filelist"]
|
||||
file_select.options = [(entry, os.path.basename(entry)) for entry in file_list]
|
||||
|
||||
def data_source_callback(_attr, _old, _new):
|
||||
file_select_update()
|
||||
|
||||
data_source = Select(
|
||||
title="Data Source:",
|
||||
value="proposal number",
|
||||
options=["proposal number", "cami file"],
|
||||
width=210,
|
||||
)
|
||||
data_source.on_change("value", data_source_callback)
|
||||
|
||||
doc.add_periodic_callback(file_select_update, 5000)
|
||||
|
||||
def proposal_textinput_callback(_attr, _old, _new):
|
||||
file_select_update()
|
||||
|
||||
proposal_textinput = doc.proposal_textinput
|
||||
proposal_textinput.on_change("name", proposal_textinput_callback)
|
||||
|
||||
def upload_button_callback(_attr, _old, new):
|
||||
nonlocal cami_meta
|
||||
with io.StringIO(base64.b64decode(new).decode()) as file:
|
||||
cami_meta = pyzebra.parse_h5meta(file)
|
||||
data_source.value = "cami file"
|
||||
file_select_update()
|
||||
|
||||
upload_div = Div(text="or upload .cami file:", margin=(5, 5, 0, 5))
|
||||
upload_button = FileInput(accept=".cami", width=200)
|
||||
upload_button.on_change("value", upload_button_callback)
|
||||
|
||||
file_select = MultiSelect(title="Available .hdf files:", width=210, height=320)
|
||||
|
||||
def _init_datatable():
|
||||
file_list = []
|
||||
for scan in dataset:
|
||||
file_list.append(os.path.basename(scan["original_filename"]))
|
||||
|
||||
scan_table_source.data.update(
|
||||
file=file_list,
|
||||
param=[None] * len(dataset),
|
||||
frame=[None] * len(dataset),
|
||||
x_pos=[None] * len(dataset),
|
||||
y_pos=[None] * len(dataset),
|
||||
)
|
||||
scan_table_source.selected.indices = []
|
||||
scan_table_source.selected.indices = [0]
|
||||
|
||||
param_select.value = "user defined"
|
||||
|
||||
def _update_table():
|
||||
frame = []
|
||||
x_pos = []
|
||||
y_pos = []
|
||||
for scan in dataset:
|
||||
if "fit" in scan:
|
||||
framei = scan["fit"]["frame"]
|
||||
x_posi = scan["fit"]["x_pos"]
|
||||
y_posi = scan["fit"]["y_pos"]
|
||||
else:
|
||||
framei = x_posi = y_posi = None
|
||||
|
||||
frame.append(framei)
|
||||
x_pos.append(x_posi)
|
||||
y_pos.append(y_posi)
|
||||
|
||||
scan_table_source.data.update(frame=frame, x_pos=x_pos, y_pos=y_pos)
|
||||
|
||||
def _file_open():
|
||||
new_data = []
|
||||
for f_name in file_select.value:
|
||||
try:
|
||||
new_data.append(pyzebra.read_detector_data(f_name))
|
||||
except KeyError as e:
|
||||
log.exception(e)
|
||||
return
|
||||
|
||||
dataset.extend(new_data)
|
||||
|
||||
_init_datatable()
|
||||
|
||||
def file_open_button_callback():
|
||||
nonlocal dataset
|
||||
dataset = []
|
||||
_file_open()
|
||||
|
||||
file_open_button = Button(label="Open New", width=100)
|
||||
file_open_button.on_click(file_open_button_callback)
|
||||
|
||||
def file_append_button_callback():
|
||||
_file_open()
|
||||
|
||||
file_append_button = Button(label="Append", width=100)
|
||||
file_append_button.on_click(file_append_button_callback)
|
||||
|
||||
# Scan select
|
||||
def scan_table_select_callback(_attr, old, new):
|
||||
if not new:
|
||||
# skip empty selections
|
||||
return
|
||||
|
||||
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
|
||||
if len(new) > 1:
|
||||
# drop selection to the previous one
|
||||
scan_table_source.selected.indices = old
|
||||
return
|
||||
|
||||
if len(old) > 1:
|
||||
# skip unnecessary update caused by selection drop
|
||||
return
|
||||
|
||||
scan = dataset[new[0]]
|
||||
|
||||
zebra_mode = scan["zebra_mode"]
|
||||
if zebra_mode == "nb":
|
||||
metadata_table_source.data.update(geom=["normal beam"])
|
||||
else: # zebra_mode == "bi"
|
||||
metadata_table_source.data.update(geom=["bisecting"])
|
||||
|
||||
if "mf" in scan:
|
||||
metadata_table_source.data.update(mf=[scan["mf"][0]])
|
||||
else:
|
||||
metadata_table_source.data.update(mf=[None])
|
||||
|
||||
if "temp" in scan:
|
||||
metadata_table_source.data.update(temp=[scan["temp"][0]])
|
||||
else:
|
||||
metadata_table_source.data.update(temp=[None])
|
||||
|
||||
_update_proj_plots()
|
||||
|
||||
def scan_table_source_callback(_attr, _old, _new):
|
||||
pass
|
||||
|
||||
scan_table_source = ColumnDataSource(dict(file=[], param=[], frame=[], x_pos=[], y_pos=[]))
|
||||
scan_table_source.selected.on_change("indices", scan_table_select_callback)
|
||||
scan_table_source.on_change("data", scan_table_source_callback)
|
||||
|
||||
scan_table = DataTable(
|
||||
source=scan_table_source,
|
||||
columns=[
|
||||
TableColumn(field="file", title="file", editor=CellEditor(), width=150),
|
||||
TableColumn(
|
||||
field="param",
|
||||
title="param",
|
||||
formatter=num_formatter,
|
||||
editor=NumberEditor(),
|
||||
width=50,
|
||||
),
|
||||
TableColumn(
|
||||
field="frame", title="Frame", formatter=num_formatter, editor=CellEditor(), width=70
|
||||
),
|
||||
TableColumn(
|
||||
field="x_pos", title="X", formatter=num_formatter, editor=CellEditor(), width=70
|
||||
),
|
||||
TableColumn(
|
||||
field="y_pos", title="Y", formatter=num_formatter, editor=CellEditor(), width=70
|
||||
),
|
||||
],
|
||||
width=470, # +60 because of the index column
|
||||
height=420,
|
||||
editable=True,
|
||||
autosize_mode="none",
|
||||
)
|
||||
|
||||
def _get_selected_scan():
|
||||
return dataset[scan_table_source.selected.indices[0]]
|
||||
|
||||
def param_select_callback(_attr, _old, new):
|
||||
if new == "user defined":
|
||||
param = [None] * len(dataset)
|
||||
else:
|
||||
# TODO: which value to take?
|
||||
param = [scan[new][0] for scan in dataset]
|
||||
|
||||
scan_table_source.data["param"] = param
|
||||
_update_param_plot()
|
||||
|
||||
param_select = Select(
|
||||
title="Parameter:",
|
||||
options=["user defined", "temp", "mf", "h", "k", "l"],
|
||||
value="user defined",
|
||||
width=145,
|
||||
)
|
||||
param_select.on_change("value", param_select_callback)
|
||||
|
||||
def _update_proj_plots():
|
||||
scan = _get_selected_scan()
|
||||
counts = scan["counts"]
|
||||
n_im, n_y, n_x = counts.shape
|
||||
im_proj_x = np.mean(counts, axis=1)
|
||||
im_proj_y = np.mean(counts, axis=2)
|
||||
|
||||
# normalize for simpler colormapping
|
||||
im_proj_max_val = max(np.max(im_proj_x), np.max(im_proj_y))
|
||||
im_proj_x = 1000 * im_proj_x / im_proj_max_val
|
||||
im_proj_y = 1000 * im_proj_y / im_proj_max_val
|
||||
|
||||
proj_x_image_source.data.update(image=[im_proj_x], dw=[n_x], dh=[n_im])
|
||||
proj_y_image_source.data.update(image=[im_proj_y], dw=[n_y], dh=[n_im])
|
||||
|
||||
if proj_auto_checkbox.active:
|
||||
im_min = min(np.min(im_proj_x), np.min(im_proj_y))
|
||||
im_max = max(np.max(im_proj_x), np.max(im_proj_y))
|
||||
|
||||
proj_display_min_spinner.value = im_min
|
||||
proj_display_max_spinner.value = im_max
|
||||
|
||||
frame_range.start = 0
|
||||
frame_range.end = n_im
|
||||
frame_range.reset_start = 0
|
||||
frame_range.reset_end = n_im
|
||||
frame_range.bounds = (0, n_im)
|
||||
|
||||
scan_motor = scan["scan_motor"]
|
||||
proj_y_plot.yaxis.axis_label = f"Scanning motor, {scan_motor}"
|
||||
|
||||
var = scan[scan_motor]
|
||||
var_start = var[0]
|
||||
var_end = var[-1] + (var[-1] - var[0]) / (n_im - 1)
|
||||
|
||||
scanning_motor_range.start = var_start
|
||||
scanning_motor_range.end = var_end
|
||||
scanning_motor_range.reset_start = var_start
|
||||
scanning_motor_range.reset_end = var_end
|
||||
# handle both, ascending and descending sequences
|
||||
scanning_motor_range.bounds = (min(var_start, var_end), max(var_start, var_end))
|
||||
|
||||
# shared frame ranges
|
||||
frame_range = Range1d(0, 1, bounds=(0, 1))
|
||||
scanning_motor_range = Range1d(0, 1, bounds=(0, 1))
|
||||
color_mapper_proj = LinearColorMapper()
|
||||
|
||||
det_x_range = Range1d(0, IMAGE_W, bounds=(0, IMAGE_W))
|
||||
proj_x_plot = figure(
|
||||
title="Projections on X-axis",
|
||||
x_axis_label="Coordinate X, pix",
|
||||
y_axis_label="Frame",
|
||||
x_range=det_x_range,
|
||||
y_range=frame_range,
|
||||
extra_y_ranges={"scanning_motor": scanning_motor_range},
|
||||
height=540,
|
||||
width=IMAGE_PLOT_W - 3,
|
||||
tools="pan,box_zoom,wheel_zoom,reset",
|
||||
active_scroll="wheel_zoom",
|
||||
)
|
||||
|
||||
proj_x_plot.yaxis.major_label_orientation = "vertical"
|
||||
proj_x_plot.toolbar.tools[2].maintain_focus = False
|
||||
|
||||
proj_x_image_source = ColumnDataSource(
|
||||
dict(image=[np.zeros((1, 1), dtype="float32")], x=[0], y=[0], dw=[IMAGE_W], dh=[1])
|
||||
)
|
||||
|
||||
proj_x_plot.image(source=proj_x_image_source, color_mapper=color_mapper_proj)
|
||||
|
||||
det_y_range = Range1d(0, IMAGE_H, bounds=(0, IMAGE_H))
|
||||
proj_y_plot = figure(
|
||||
title="Projections on Y-axis",
|
||||
x_axis_label="Coordinate Y, pix",
|
||||
y_axis_label="Scanning motor",
|
||||
y_axis_location="right",
|
||||
x_range=det_y_range,
|
||||
y_range=frame_range,
|
||||
extra_y_ranges={"scanning_motor": scanning_motor_range},
|
||||
height=540,
|
||||
width=IMAGE_PLOT_H + 22,
|
||||
tools="pan,box_zoom,wheel_zoom,reset",
|
||||
active_scroll="wheel_zoom",
|
||||
)
|
||||
|
||||
proj_y_plot.yaxis.y_range_name = "scanning_motor"
|
||||
proj_y_plot.yaxis.major_label_orientation = "vertical"
|
||||
proj_y_plot.toolbar.tools[2].maintain_focus = False
|
||||
|
||||
proj_y_image_source = ColumnDataSource(
|
||||
dict(image=[np.zeros((1, 1), dtype="float32")], x=[0], y=[0], dw=[IMAGE_H], dh=[1])
|
||||
)
|
||||
|
||||
proj_y_plot.image(source=proj_y_image_source, color_mapper=color_mapper_proj)
|
||||
|
||||
def colormap_select_callback(_attr, _old, new):
|
||||
color_mapper_proj.palette = new
|
||||
|
||||
colormap_select = Select(
|
||||
title="Colormap:",
|
||||
options=[("Greys256", "greys"), ("Plasma256", "plasma"), ("Cividis256", "cividis")],
|
||||
width=210,
|
||||
)
|
||||
colormap_select.on_change("value", colormap_select_callback)
|
||||
colormap_select.value = "Plasma256"
|
||||
|
||||
def proj_auto_checkbox_callback(_attr, _old, new):
|
||||
if 0 in new:
|
||||
proj_display_min_spinner.disabled = True
|
||||
proj_display_max_spinner.disabled = True
|
||||
else:
|
||||
proj_display_min_spinner.disabled = False
|
||||
proj_display_max_spinner.disabled = False
|
||||
|
||||
_update_proj_plots()
|
||||
|
||||
proj_auto_checkbox = CheckboxGroup(
|
||||
labels=["Projections Intensity Range"], active=[0], width=145, margin=[10, 5, 0, 5]
|
||||
)
|
||||
proj_auto_checkbox.on_change("active", proj_auto_checkbox_callback)
|
||||
|
||||
def proj_display_max_spinner_callback(_attr, _old, new):
|
||||
color_mapper_proj.high = new
|
||||
|
||||
proj_display_max_spinner = Spinner(
|
||||
value=1, disabled=bool(proj_auto_checkbox.active), mode="int", width=100
|
||||
)
|
||||
proj_display_max_spinner.on_change("value", proj_display_max_spinner_callback)
|
||||
|
||||
def proj_display_min_spinner_callback(_attr, _old, new):
|
||||
color_mapper_proj.low = new
|
||||
|
||||
proj_display_min_spinner = Spinner(
|
||||
value=0, disabled=bool(proj_auto_checkbox.active), mode="int", width=100
|
||||
)
|
||||
proj_display_min_spinner.on_change("value", proj_display_min_spinner_callback)
|
||||
|
||||
metadata_table_source = ColumnDataSource(dict(geom=[""], temp=[None], mf=[None]))
|
||||
metadata_table = DataTable(
|
||||
source=metadata_table_source,
|
||||
columns=[
|
||||
TableColumn(field="geom", title="Geometry", width=100),
|
||||
TableColumn(field="temp", title="Temperature", formatter=num_formatter, width=100),
|
||||
TableColumn(field="mf", title="Magnetic Field", formatter=num_formatter, width=100),
|
||||
],
|
||||
width=300,
|
||||
height=50,
|
||||
autosize_mode="none",
|
||||
index_position=None,
|
||||
)
|
||||
|
||||
def _update_param_plot():
|
||||
x = []
|
||||
y = []
|
||||
fit_param = fit_param_select.value
|
||||
for s, p in zip(dataset, scan_table_source.data["param"]):
|
||||
if "fit" in s and fit_param:
|
||||
x.append(p)
|
||||
y.append(s["fit"][fit_param])
|
||||
param_scatter_source.data.update(x=x, y=y)
|
||||
|
||||
# Parameter plot
|
||||
param_plot = figure(
|
||||
x_axis_label="Parameter",
|
||||
y_axis_label="Fit parameter",
|
||||
height=400,
|
||||
width=700,
|
||||
tools="pan,wheel_zoom,reset",
|
||||
)
|
||||
|
||||
param_scatter_source = ColumnDataSource(dict(x=[], y=[]))
|
||||
param_plot.circle(source=param_scatter_source)
|
||||
|
||||
param_plot.toolbar.logo = None
|
||||
|
||||
def fit_param_select_callback(_attr, _old, _new):
|
||||
_update_param_plot()
|
||||
|
||||
fit_param_select = Select(title="Fit parameter", options=[], width=145)
|
||||
fit_param_select.on_change("value", fit_param_select_callback)
|
||||
|
||||
def proc_all_button_callback():
|
||||
for scan in dataset:
|
||||
pyzebra.fit_event(
|
||||
scan,
|
||||
int(np.floor(frame_range.start)),
|
||||
int(np.ceil(frame_range.end)),
|
||||
int(np.floor(det_y_range.start)),
|
||||
int(np.ceil(det_y_range.end)),
|
||||
int(np.floor(det_x_range.start)),
|
||||
int(np.ceil(det_x_range.end)),
|
||||
)
|
||||
|
||||
_update_table()
|
||||
|
||||
for scan in dataset:
|
||||
if "fit" in scan:
|
||||
options = list(scan["fit"].keys())
|
||||
fit_param_select.options = options
|
||||
fit_param_select.value = options[0]
|
||||
break
|
||||
|
||||
_update_param_plot()
|
||||
|
||||
proc_all_button = Button(label="Process All", button_type="primary", width=145)
|
||||
proc_all_button.on_click(proc_all_button_callback)
|
||||
|
||||
def proc_button_callback():
|
||||
scan = _get_selected_scan()
|
||||
pyzebra.fit_event(
|
||||
scan,
|
||||
int(np.floor(frame_range.start)),
|
||||
int(np.ceil(frame_range.end)),
|
||||
int(np.floor(det_y_range.start)),
|
||||
int(np.ceil(det_y_range.end)),
|
||||
int(np.floor(det_x_range.start)),
|
||||
int(np.ceil(det_x_range.end)),
|
||||
)
|
||||
|
||||
_update_table()
|
||||
|
||||
for scan in dataset:
|
||||
if "fit" in scan:
|
||||
options = list(scan["fit"].keys())
|
||||
fit_param_select.options = options
|
||||
fit_param_select.value = options[0]
|
||||
break
|
||||
|
||||
_update_param_plot()
|
||||
|
||||
proc_button = Button(label="Process Current", width=145)
|
||||
proc_button.on_click(proc_button_callback)
|
||||
|
||||
layout_controls = row(
|
||||
colormap_select,
|
||||
column(proj_auto_checkbox, row(proj_display_min_spinner, proj_display_max_spinner)),
|
||||
proc_button,
|
||||
proc_all_button,
|
||||
)
|
||||
|
||||
layout_proj = column(
|
||||
gridplot(
|
||||
[[proj_x_plot, proj_y_plot]], toolbar_options={"logo": None}, toolbar_location="right"
|
||||
),
|
||||
layout_controls,
|
||||
)
|
||||
|
||||
# Plot tabs
|
||||
plots = Tabs(
|
||||
tabs=[
|
||||
Panel(child=layout_proj, title="single scan"),
|
||||
Panel(child=column(param_plot, row(fit_param_select)), title="parameter plot"),
|
||||
]
|
||||
)
|
||||
|
||||
# Final layout
|
||||
import_layout = column(
|
||||
data_source,
|
||||
upload_div,
|
||||
upload_button,
|
||||
file_select,
|
||||
row(file_open_button, file_append_button),
|
||||
)
|
||||
|
||||
scan_layout = column(scan_table, row(param_select, metadata_table))
|
||||
|
||||
tab_layout = column(row(import_layout, scan_layout, plots))
|
||||
|
||||
return Panel(child=tab_layout, title="hdf param study")
|
File diff suppressed because it is too large
Load Diff
518
pyzebra/app/panel_param_study.py
Normal file
518
pyzebra/app/panel_param_study.py
Normal file
@ -0,0 +1,518 @@
|
||||
import itertools
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
import numpy as np
|
||||
from bokeh.io import curdoc
|
||||
from bokeh.layouts import column, row
|
||||
from bokeh.models import (
|
||||
Button,
|
||||
CellEditor,
|
||||
CheckboxEditor,
|
||||
ColumnDataSource,
|
||||
DataTable,
|
||||
Div,
|
||||
HoverTool,
|
||||
LinearColorMapper,
|
||||
NumberEditor,
|
||||
Panel,
|
||||
Range1d,
|
||||
Select,
|
||||
Spacer,
|
||||
Span,
|
||||
TableColumn,
|
||||
Tabs,
|
||||
TextAreaInput,
|
||||
Whisker,
|
||||
)
|
||||
from bokeh.palettes import Category10, Plasma256
|
||||
from bokeh.plotting import figure
|
||||
from scipy import interpolate
|
||||
|
||||
import pyzebra
|
||||
from pyzebra import app
|
||||
|
||||
|
||||
def color_palette(n_colors):
|
||||
palette = itertools.cycle(Category10[10])
|
||||
return list(itertools.islice(palette, n_colors))
|
||||
|
||||
|
||||
def create():
|
||||
doc = curdoc()
|
||||
log = doc.logger
|
||||
dataset = []
|
||||
app_dlfiles = app.DownloadFiles(n_files=1)
|
||||
|
||||
def _init_datatable():
|
||||
scan_list = [s["idx"] for s in dataset]
|
||||
export = [s["export"] for s in dataset]
|
||||
if param_select.value == "user defined":
|
||||
param = [None] * len(dataset)
|
||||
else:
|
||||
param = [scan[param_select.value] for scan in dataset]
|
||||
|
||||
file_list = []
|
||||
for scan in dataset:
|
||||
file_list.append(os.path.basename(scan["original_filename"]))
|
||||
|
||||
scan_table_source.data.update(
|
||||
file=file_list, scan=scan_list, param=param, fit=[0] * len(scan_list), export=export
|
||||
)
|
||||
scan_table_source.selected.indices = []
|
||||
scan_table_source.selected.indices = [0]
|
||||
|
||||
scan_motor_select.options = dataset[0]["scan_motors"]
|
||||
scan_motor_select.value = dataset[0]["scan_motor"]
|
||||
|
||||
merge_options = [(str(i), f"{i} ({idx})") for i, idx in enumerate(scan_list)]
|
||||
merge_from_select.options = merge_options
|
||||
merge_from_select.value = merge_options[0][0]
|
||||
|
||||
def scan_motor_select_callback(_attr, _old, new):
|
||||
if dataset:
|
||||
for scan in dataset:
|
||||
scan["scan_motor"] = new
|
||||
_update_single_scan_plot()
|
||||
_update_overview()
|
||||
|
||||
scan_motor_select = Select(title="Scan motor:", options=[], width=145)
|
||||
scan_motor_select.on_change("value", scan_motor_select_callback)
|
||||
|
||||
def _update_table():
|
||||
fit_ok = [(1 if "fit" in scan else 0) for scan in dataset]
|
||||
export = [scan["export"] for scan in dataset]
|
||||
if param_select.value == "user defined":
|
||||
param = [None] * len(dataset)
|
||||
else:
|
||||
param = [scan[param_select.value] for scan in dataset]
|
||||
|
||||
scan_table_source.data.update(fit=fit_ok, export=export, param=param)
|
||||
|
||||
def _update_single_scan_plot():
|
||||
scan = _get_selected_scan()
|
||||
scan_motor = scan["scan_motor"]
|
||||
|
||||
y = scan["counts"]
|
||||
y_err = scan["counts_err"]
|
||||
x = scan[scan_motor]
|
||||
|
||||
plot.axis[0].axis_label = scan_motor
|
||||
scatter_source.data.update(x=x, y=y, y_upper=y + y_err, y_lower=y - y_err)
|
||||
|
||||
fit = scan.get("fit")
|
||||
if fit is not None:
|
||||
x_fit = np.linspace(x[0], x[-1], 100)
|
||||
fit_source.data.update(x=x_fit, y=fit.eval(x=x_fit))
|
||||
|
||||
x_bkg = []
|
||||
y_bkg = []
|
||||
xs_peak = []
|
||||
ys_peak = []
|
||||
comps = fit.eval_components(x=x_fit)
|
||||
for i, model in enumerate(app_fitctrl.params):
|
||||
if "linear" in model:
|
||||
x_bkg = x_fit
|
||||
y_bkg = comps[f"f{i}_"]
|
||||
|
||||
elif any(val in model for val in ("gaussian", "voigt", "pvoigt")):
|
||||
xs_peak.append(x_fit)
|
||||
ys_peak.append(comps[f"f{i}_"])
|
||||
|
||||
bkg_source.data.update(x=x_bkg, y=y_bkg)
|
||||
peak_source.data.update(xs=xs_peak, ys=ys_peak)
|
||||
|
||||
else:
|
||||
fit_source.data.update(x=[], y=[])
|
||||
bkg_source.data.update(x=[], y=[])
|
||||
peak_source.data.update(xs=[], ys=[])
|
||||
|
||||
app_fitctrl.update_result_textarea(scan)
|
||||
|
||||
def _update_overview():
|
||||
xs = []
|
||||
ys = []
|
||||
param = []
|
||||
x = []
|
||||
y = []
|
||||
par = []
|
||||
for s, p in enumerate(scan_table_source.data["param"]):
|
||||
if p is not None:
|
||||
scan = dataset[s]
|
||||
scan_motor = scan["scan_motor"]
|
||||
xs.append(scan[scan_motor])
|
||||
x.extend(scan[scan_motor])
|
||||
ys.append(scan["counts"])
|
||||
y.extend([float(p)] * len(scan[scan_motor]))
|
||||
param.append(float(p))
|
||||
par.extend(scan["counts"])
|
||||
|
||||
if dataset:
|
||||
scan_motor = dataset[0]["scan_motor"]
|
||||
ov_plot.axis[0].axis_label = scan_motor
|
||||
ov_param_plot.axis[0].axis_label = scan_motor
|
||||
|
||||
ov_mline_source.data.update(xs=xs, ys=ys, param=param, color=color_palette(len(xs)))
|
||||
|
||||
ov_param_scatter_source.data.update(x=x, y=y)
|
||||
|
||||
if y:
|
||||
x1, x2 = min(x), max(x)
|
||||
y1, y2 = min(y), max(y)
|
||||
grid_x, grid_y = np.meshgrid(
|
||||
np.linspace(x1, x2, ov_param_plot.inner_width),
|
||||
np.linspace(y1, y2, ov_param_plot.inner_height),
|
||||
)
|
||||
image = interpolate.griddata((x, y), par, (grid_x, grid_y))
|
||||
ov_param_image_source.data.update(
|
||||
image=[image], x=[x1], y=[y1], dw=[x2 - x1], dh=[y2 - y1]
|
||||
)
|
||||
|
||||
x_range = ov_param_plot.x_range
|
||||
x_range.start, x_range.end = x1, x2
|
||||
x_range.reset_start, x_range.reset_end = x1, x2
|
||||
x_range.bounds = (x1, x2)
|
||||
|
||||
y_range = ov_param_plot.y_range
|
||||
y_range.start, y_range.end = y1, y2
|
||||
y_range.reset_start, y_range.reset_end = y1, y2
|
||||
y_range.bounds = (y1, y2)
|
||||
|
||||
else:
|
||||
ov_param_image_source.data.update(image=[], x=[], y=[], dw=[], dh=[])
|
||||
|
||||
def _update_param_plot():
|
||||
x = []
|
||||
y = []
|
||||
y_lower = []
|
||||
y_upper = []
|
||||
fit_param = fit_param_select.value
|
||||
for s, p in zip(dataset, scan_table_source.data["param"]):
|
||||
if "fit" in s and fit_param:
|
||||
x.append(p)
|
||||
param_fit_val = s["fit"].params[fit_param].value
|
||||
param_fit_std = s["fit"].params[fit_param].stderr
|
||||
if param_fit_std is None:
|
||||
param_fit_std = 0
|
||||
y.append(param_fit_val)
|
||||
y_lower.append(param_fit_val - param_fit_std)
|
||||
y_upper.append(param_fit_val + param_fit_std)
|
||||
|
||||
param_scatter_source.data.update(x=x, y=y, y_lower=y_lower, y_upper=y_upper)
|
||||
|
||||
def _monitor_change():
|
||||
_update_single_scan_plot()
|
||||
_update_overview()
|
||||
|
||||
app_inputctrl = app.InputControls(
|
||||
dataset, app_dlfiles, on_file_open=_init_datatable, on_monitor_change=_monitor_change
|
||||
)
|
||||
|
||||
# Main plot
|
||||
plot = figure(
|
||||
x_axis_label="Scan motor",
|
||||
y_axis_label="Counts",
|
||||
height=450,
|
||||
width=700,
|
||||
tools="pan,wheel_zoom,reset",
|
||||
)
|
||||
|
||||
scatter_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
|
||||
plot.circle(
|
||||
source=scatter_source, line_color="steelblue", fill_color="steelblue", legend_label="data"
|
||||
)
|
||||
plot.add_layout(Whisker(source=scatter_source, base="x", upper="y_upper", lower="y_lower"))
|
||||
|
||||
fit_source = ColumnDataSource(dict(x=[0], y=[0]))
|
||||
plot.line(source=fit_source, legend_label="best fit")
|
||||
|
||||
bkg_source = ColumnDataSource(dict(x=[0], y=[0]))
|
||||
plot.line(source=bkg_source, line_color="green", line_dash="dashed", legend_label="linear")
|
||||
|
||||
peak_source = ColumnDataSource(dict(xs=[[0]], ys=[[0]]))
|
||||
plot.multi_line(source=peak_source, line_color="red", line_dash="dashed", legend_label="peak")
|
||||
|
||||
fit_from_span = Span(location=None, dimension="height", line_dash="dashed")
|
||||
plot.add_layout(fit_from_span)
|
||||
|
||||
fit_to_span = Span(location=None, dimension="height", line_dash="dashed")
|
||||
plot.add_layout(fit_to_span)
|
||||
|
||||
plot.y_range.only_visible = True
|
||||
plot.toolbar.logo = None
|
||||
plot.legend.click_policy = "hide"
|
||||
|
||||
# Overview multilines plot
|
||||
ov_plot = figure(
|
||||
x_axis_label="Scan motor",
|
||||
y_axis_label="Counts",
|
||||
height=450,
|
||||
width=700,
|
||||
tools="pan,wheel_zoom,reset",
|
||||
)
|
||||
|
||||
ov_mline_source = ColumnDataSource(dict(xs=[], ys=[], param=[], color=[]))
|
||||
ov_plot.multi_line(source=ov_mline_source, line_color="color")
|
||||
|
||||
ov_plot.add_tools(HoverTool(tooltips=[("param", "@param")]))
|
||||
|
||||
ov_plot.toolbar.logo = None
|
||||
|
||||
# Overview params plot
|
||||
ov_param_plot = figure(
|
||||
x_axis_label="Scan motor",
|
||||
y_axis_label="Param",
|
||||
x_range=Range1d(),
|
||||
y_range=Range1d(),
|
||||
height=450,
|
||||
width=700,
|
||||
tools="pan,wheel_zoom,reset",
|
||||
)
|
||||
|
||||
color_mapper = LinearColorMapper(palette=Plasma256)
|
||||
ov_param_image_source = ColumnDataSource(dict(image=[], x=[], y=[], dw=[], dh=[]))
|
||||
ov_param_plot.image(source=ov_param_image_source, color_mapper=color_mapper)
|
||||
|
||||
ov_param_scatter_source = ColumnDataSource(dict(x=[], y=[]))
|
||||
ov_param_plot.dot(source=ov_param_scatter_source, size=15, color="black")
|
||||
|
||||
ov_param_plot.toolbar.logo = None
|
||||
|
||||
# Parameter plot
|
||||
param_plot = figure(
|
||||
x_axis_label="Parameter",
|
||||
y_axis_label="Fit parameter",
|
||||
height=400,
|
||||
width=700,
|
||||
tools="pan,wheel_zoom,reset",
|
||||
)
|
||||
|
||||
param_scatter_source = ColumnDataSource(dict(x=[], y=[], y_upper=[], y_lower=[]))
|
||||
param_plot.circle(source=param_scatter_source)
|
||||
param_plot.add_layout(
|
||||
Whisker(source=param_scatter_source, base="x", upper="y_upper", lower="y_lower")
|
||||
)
|
||||
|
||||
param_plot.toolbar.logo = None
|
||||
|
||||
def fit_param_select_callback(_attr, _old, _new):
|
||||
_update_param_plot()
|
||||
|
||||
fit_param_select = Select(title="Fit parameter", options=[], width=145)
|
||||
fit_param_select.on_change("value", fit_param_select_callback)
|
||||
|
||||
# Plot tabs
|
||||
plots = Tabs(
|
||||
tabs=[
|
||||
Panel(child=plot, title="single scan"),
|
||||
Panel(child=ov_plot, title="overview"),
|
||||
Panel(child=ov_param_plot, title="overview map"),
|
||||
Panel(child=column(param_plot, row(fit_param_select)), title="parameter plot"),
|
||||
]
|
||||
)
|
||||
|
||||
# Scan select
|
||||
def scan_table_select_callback(_attr, old, new):
|
||||
if not new:
|
||||
# skip empty selections
|
||||
return
|
||||
|
||||
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
|
||||
if len(new) > 1:
|
||||
# drop selection to the previous one
|
||||
scan_table_source.selected.indices = old
|
||||
return
|
||||
|
||||
if len(old) > 1:
|
||||
# skip unnecessary update caused by selection drop
|
||||
return
|
||||
|
||||
_update_single_scan_plot()
|
||||
|
||||
def scan_table_source_callback(_attr, _old, new):
|
||||
# unfortunately, we don't know if the change comes from data update or user input
|
||||
# also `old` and `new` are the same for non-scalars
|
||||
for scan, export in zip(dataset, new["export"]):
|
||||
scan["export"] = export
|
||||
_update_overview()
|
||||
_update_param_plot()
|
||||
_update_preview()
|
||||
|
||||
scan_table_source = ColumnDataSource(dict(file=[], scan=[], param=[], fit=[], export=[]))
|
||||
scan_table_source.on_change("data", scan_table_source_callback)
|
||||
scan_table_source.selected.on_change("indices", scan_table_select_callback)
|
||||
|
||||
scan_table = DataTable(
|
||||
source=scan_table_source,
|
||||
columns=[
|
||||
TableColumn(field="file", title="file", editor=CellEditor(), width=150),
|
||||
TableColumn(field="scan", title="scan", editor=CellEditor(), width=50),
|
||||
TableColumn(field="param", title="param", editor=NumberEditor(), width=50),
|
||||
TableColumn(field="fit", title="Fit", editor=CellEditor(), width=50),
|
||||
TableColumn(field="export", title="Export", editor=CheckboxEditor(), width=50),
|
||||
],
|
||||
width=410, # +60 because of the index column
|
||||
height=350,
|
||||
editable=True,
|
||||
autosize_mode="none",
|
||||
)
|
||||
|
||||
merge_from_select = Select(title="scan:", width=145)
|
||||
|
||||
def merge_button_callback():
|
||||
scan_into = _get_selected_scan()
|
||||
scan_from = dataset[int(merge_from_select.value)]
|
||||
|
||||
if scan_into is scan_from:
|
||||
log.warning("Selected scans for merging are identical")
|
||||
return
|
||||
|
||||
pyzebra.merge_scans(scan_into, scan_from, log=log)
|
||||
_update_table()
|
||||
_update_single_scan_plot()
|
||||
_update_overview()
|
||||
|
||||
merge_button = Button(label="Merge into current", width=145)
|
||||
merge_button.on_click(merge_button_callback)
|
||||
|
||||
def restore_button_callback():
|
||||
pyzebra.restore_scan(_get_selected_scan())
|
||||
_update_table()
|
||||
_update_single_scan_plot()
|
||||
_update_overview()
|
||||
|
||||
restore_button = Button(label="Restore scan", width=145)
|
||||
restore_button.on_click(restore_button_callback)
|
||||
|
||||
def _get_selected_scan():
|
||||
return dataset[scan_table_source.selected.indices[0]]
|
||||
|
||||
def param_select_callback(_attr, _old, _new):
|
||||
_update_table()
|
||||
|
||||
param_select = Select(
|
||||
title="Parameter:",
|
||||
options=["user defined", "temp", "mf", "h", "k", "l"],
|
||||
value="user defined",
|
||||
width=145,
|
||||
)
|
||||
param_select.on_change("value", param_select_callback)
|
||||
|
||||
app_fitctrl = app.FitControls()
|
||||
|
||||
def fit_from_spinner_callback(_attr, _old, new):
|
||||
fit_from_span.location = new
|
||||
|
||||
app_fitctrl.from_spinner.on_change("value", fit_from_spinner_callback)
|
||||
|
||||
def fit_to_spinner_callback(_attr, _old, new):
|
||||
fit_to_span.location = new
|
||||
|
||||
app_fitctrl.to_spinner.on_change("value", fit_to_spinner_callback)
|
||||
|
||||
def proc_all_button_callback():
|
||||
app_fitctrl.fit_dataset(dataset)
|
||||
|
||||
_update_single_scan_plot()
|
||||
_update_overview()
|
||||
_update_table()
|
||||
|
||||
for scan in dataset:
|
||||
if "fit" in scan:
|
||||
options = list(scan["fit"].params.keys())
|
||||
fit_param_select.options = options
|
||||
fit_param_select.value = options[0]
|
||||
break
|
||||
|
||||
proc_all_button = Button(label="Process All", button_type="primary", width=145)
|
||||
proc_all_button.on_click(proc_all_button_callback)
|
||||
|
||||
def proc_button_callback():
|
||||
app_fitctrl.fit_scan(_get_selected_scan())
|
||||
|
||||
_update_single_scan_plot()
|
||||
_update_overview()
|
||||
_update_table()
|
||||
|
||||
for scan in dataset:
|
||||
if "fit" in scan:
|
||||
options = list(scan["fit"].params.keys())
|
||||
fit_param_select.options = options
|
||||
fit_param_select.value = options[0]
|
||||
break
|
||||
|
||||
proc_button = Button(label="Process Current", width=145)
|
||||
proc_button.on_click(proc_button_callback)
|
||||
|
||||
export_preview_textinput = TextAreaInput(title="Export file preview:", width=450, height=400)
|
||||
|
||||
def _update_preview():
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
temp_file = temp_dir + "/temp"
|
||||
export_data = []
|
||||
param_data = []
|
||||
for scan, param in zip(dataset, scan_table_source.data["param"]):
|
||||
if scan["export"] and param:
|
||||
export_data.append(scan)
|
||||
param_data.append(param)
|
||||
|
||||
pyzebra.export_param_study(export_data, param_data, temp_file)
|
||||
|
||||
exported_content = ""
|
||||
file_content = []
|
||||
|
||||
fname = temp_file
|
||||
if os.path.isfile(fname):
|
||||
with open(fname) as f:
|
||||
content = f.read()
|
||||
exported_content += content
|
||||
else:
|
||||
content = ""
|
||||
file_content.append(content)
|
||||
|
||||
app_dlfiles.set_contents(file_content)
|
||||
export_preview_textinput.value = exported_content
|
||||
|
||||
area_method_div = Div(text="Intensity:", margin=(5, 5, 0, 5))
|
||||
fitpeak_controls = row(
|
||||
column(
|
||||
app_fitctrl.add_function_button,
|
||||
app_fitctrl.function_select,
|
||||
app_fitctrl.remove_function_button,
|
||||
),
|
||||
app_fitctrl.params_table,
|
||||
Spacer(width=20),
|
||||
column(
|
||||
app_fitctrl.from_spinner,
|
||||
app_fitctrl.lorentz_checkbox,
|
||||
area_method_div,
|
||||
app_fitctrl.area_method_radiogroup,
|
||||
),
|
||||
column(app_fitctrl.to_spinner, proc_button, proc_all_button),
|
||||
)
|
||||
|
||||
scan_layout = column(
|
||||
scan_table,
|
||||
row(app_inputctrl.monitor_spinner, scan_motor_select, param_select),
|
||||
row(column(Spacer(height=19), row(restore_button, merge_button)), merge_from_select),
|
||||
)
|
||||
|
||||
upload_div = Div(text="or upload new .ccl/.dat files:", margin=(5, 5, 0, 5))
|
||||
append_upload_div = Div(text="append extra files:", margin=(5, 5, 0, 5))
|
||||
import_layout = column(
|
||||
app_inputctrl.filelist_select,
|
||||
row(app_inputctrl.open_button, app_inputctrl.append_button),
|
||||
upload_div,
|
||||
app_inputctrl.upload_button,
|
||||
append_upload_div,
|
||||
app_inputctrl.append_upload_button,
|
||||
)
|
||||
|
||||
export_layout = column(export_preview_textinput, row(app_dlfiles.button))
|
||||
|
||||
tab_layout = column(
|
||||
row(import_layout, scan_layout, plots, Spacer(width=30), export_layout),
|
||||
row(fitpeak_controls, app_fitctrl.result_textarea),
|
||||
)
|
||||
|
||||
return Panel(child=tab_layout, title="param study")
|
442
pyzebra/app/panel_plot_data.py
Normal file
442
pyzebra/app/panel_plot_data.py
Normal file
@ -0,0 +1,442 @@
|
||||
import base64
|
||||
import io
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
from bokeh.io import curdoc
|
||||
from bokeh.layouts import column, row
|
||||
from bokeh.models import (
|
||||
Button,
|
||||
CheckboxGroup,
|
||||
ColorBar,
|
||||
ColumnDataSource,
|
||||
DataRange1d,
|
||||
Div,
|
||||
FileInput,
|
||||
LinearColorMapper,
|
||||
LogColorMapper,
|
||||
NumericInput,
|
||||
Panel,
|
||||
RadioGroup,
|
||||
Select,
|
||||
Spacer,
|
||||
Spinner,
|
||||
TextInput,
|
||||
)
|
||||
from bokeh.plotting import figure
|
||||
from scipy import interpolate
|
||||
|
||||
import pyzebra
|
||||
from pyzebra import app
|
||||
from pyzebra.app.panel_hdf_viewer import calculate_hkl
|
||||
|
||||
|
||||
def create():
|
||||
doc = curdoc()
|
||||
log = doc.logger
|
||||
_update_slice = None
|
||||
measured_data_div = Div(text="Measured <b>HDF</b> data:")
|
||||
measured_data = FileInput(accept=".hdf", multiple=True, width=200)
|
||||
|
||||
upload_hkl_div = Div(text="Open hkl/mhkl data:")
|
||||
upload_hkl_fi = FileInput(accept=".hkl,.mhkl", multiple=True, width=200)
|
||||
|
||||
def _prepare_plotting():
|
||||
flag_ub = bool(redef_ub_cb.active)
|
||||
flag_lattice = bool(redef_lattice_cb.active)
|
||||
|
||||
# Define horizontal direction of plotting plane, vertical direction will be calculated
|
||||
# automatically
|
||||
x_dir = list(map(float, hkl_in_plane_x.value.split()))
|
||||
|
||||
# Define direction orthogonal to plotting plane. Together with orth_cut, this parameter also
|
||||
# defines the position of the cut, ie cut will be taken at orth_dir = [x,y,z]*orth_cut +- delta,
|
||||
# where delta is max distance a data point can have from cut in rlu units
|
||||
orth_dir = list(map(float, hkl_normal.value.split()))
|
||||
|
||||
# Load data files
|
||||
md_fnames = measured_data.filename
|
||||
md_fdata = measured_data.value
|
||||
|
||||
for ind, (fname, fdata) in enumerate(zip(md_fnames, md_fdata)):
|
||||
# Read data
|
||||
try:
|
||||
det_data = pyzebra.read_detector_data(io.BytesIO(base64.b64decode(fdata)))
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return None
|
||||
|
||||
if ind == 0:
|
||||
if not flag_ub:
|
||||
redef_ub_ti.value = " ".join(map(str, det_data["ub"].ravel()))
|
||||
if not flag_lattice:
|
||||
redef_lattice_ti.value = " ".join(map(str, det_data["cell"]))
|
||||
|
||||
num_slices = np.shape(det_data["counts"])[0]
|
||||
|
||||
# Change parameter
|
||||
if flag_ub:
|
||||
ub = list(map(float, redef_ub_ti.value.strip().split()))
|
||||
det_data["ub"] = np.array(ub).reshape(3, 3)
|
||||
|
||||
# Convert h k l for all images in file
|
||||
h_temp = np.empty(np.shape(det_data["counts"]))
|
||||
k_temp = np.empty(np.shape(det_data["counts"]))
|
||||
l_temp = np.empty(np.shape(det_data["counts"]))
|
||||
for i in range(num_slices):
|
||||
h_temp[i], k_temp[i], l_temp[i] = calculate_hkl(det_data, i)
|
||||
|
||||
# Append to matrix
|
||||
if ind == 0:
|
||||
h = h_temp
|
||||
k = k_temp
|
||||
l = l_temp
|
||||
I_matrix = det_data["counts"]
|
||||
else:
|
||||
h = np.append(h, h_temp, axis=0)
|
||||
k = np.append(k, k_temp, axis=0)
|
||||
l = np.append(l, l_temp, axis=0)
|
||||
I_matrix = np.append(I_matrix, det_data["counts"], axis=0)
|
||||
|
||||
if flag_lattice:
|
||||
vals = list(map(float, redef_lattice_ti.value.strip().split()))
|
||||
lattice = np.array(vals)
|
||||
else:
|
||||
lattice = det_data["cell"]
|
||||
|
||||
# Define matrix for converting to cartesian coordinates and back
|
||||
alpha = lattice[3] * np.pi / 180.0
|
||||
beta = lattice[4] * np.pi / 180.0
|
||||
gamma = lattice[5] * np.pi / 180.0
|
||||
|
||||
# reciprocal angle parameters
|
||||
beta_star = np.arccos(
|
||||
(np.cos(alpha) * np.cos(gamma) - np.cos(beta)) / (np.sin(alpha) * np.sin(gamma))
|
||||
)
|
||||
gamma_star = np.arccos(
|
||||
(np.cos(alpha) * np.cos(beta) - np.cos(gamma)) / (np.sin(alpha) * np.sin(beta))
|
||||
)
|
||||
|
||||
# conversion matrix:
|
||||
M = np.array(
|
||||
[
|
||||
[1, 1 * np.cos(gamma_star), 1 * np.cos(beta_star)],
|
||||
[0, 1 * np.sin(gamma_star), -np.sin(beta_star) * np.cos(alpha)],
|
||||
[0, 0, 1 * np.sin(beta_star) * np.sin(alpha)],
|
||||
]
|
||||
)
|
||||
|
||||
# Get last lattice vector
|
||||
y_dir = np.cross(x_dir, orth_dir) # Second axes of plotting plane
|
||||
|
||||
# Rescale such that smallest element of y-dir vector is 1
|
||||
y_dir2 = y_dir[y_dir != 0]
|
||||
min_val = np.min(np.abs(y_dir2))
|
||||
y_dir = y_dir / min_val
|
||||
|
||||
# Possibly flip direction of ydir:
|
||||
if y_dir[np.argmax(abs(y_dir))] < 0:
|
||||
y_dir = -y_dir
|
||||
|
||||
# Display the resulting y_dir
|
||||
hkl_in_plane_y.value = " ".join([f"{val:.1f}" for val in y_dir])
|
||||
|
||||
# # Save length of lattice vectors
|
||||
# x_length = np.linalg.norm(x_dir)
|
||||
# y_length = np.linalg.norm(y_dir)
|
||||
|
||||
# # Save str for labels
|
||||
# xlabel_str = " ".join(map(str, x_dir))
|
||||
# ylabel_str = " ".join(map(str, y_dir))
|
||||
|
||||
# Normalize lattice vectors
|
||||
y_dir = y_dir / np.linalg.norm(y_dir)
|
||||
x_dir = x_dir / np.linalg.norm(x_dir)
|
||||
orth_dir = orth_dir / np.linalg.norm(orth_dir)
|
||||
|
||||
# Calculate cartesian equivalents of lattice vectors
|
||||
x_c = np.matmul(M, x_dir)
|
||||
y_c = np.matmul(M, y_dir)
|
||||
o_c = np.matmul(M, orth_dir)
|
||||
|
||||
# Calulcate vertical direction in plotting plame
|
||||
y_vert = np.cross(x_c, o_c) # verical direction in plotting plane
|
||||
if y_vert[np.argmax(abs(y_vert))] < 0:
|
||||
y_vert = -y_vert
|
||||
y_vert = y_vert / np.linalg.norm(y_vert)
|
||||
|
||||
# Normalize all directions
|
||||
y_c = y_c / np.linalg.norm(y_c)
|
||||
x_c = x_c / np.linalg.norm(x_c)
|
||||
o_c = o_c / np.linalg.norm(o_c)
|
||||
|
||||
# Convert all hkls to cartesian
|
||||
hkl = [[h, k, l]]
|
||||
hkl = np.transpose(hkl)
|
||||
hkl_c = np.matmul(M, hkl)
|
||||
|
||||
# Prepare hkl/mhkl data
|
||||
hkl_coord = []
|
||||
for j, fname in enumerate(upload_hkl_fi.filename):
|
||||
with io.StringIO(base64.b64decode(upload_hkl_fi.value[j]).decode()) as file:
|
||||
_, ext = os.path.splitext(fname)
|
||||
try:
|
||||
fdata = pyzebra.parse_hkl(file, ext)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return
|
||||
|
||||
for ind in range(len(fdata["counts"])):
|
||||
# Recognize k_flag_vec
|
||||
hkl = np.array([fdata["h"][ind], fdata["k"][ind], fdata["l"][ind]])
|
||||
|
||||
# Save data
|
||||
hkl_coord.append(hkl)
|
||||
|
||||
def _update_slice():
|
||||
# Where should cut be along orthogonal direction (Mutliplication factor onto orth_dir)
|
||||
orth_cut = hkl_cut.value
|
||||
|
||||
# Width of cut
|
||||
delta = hkl_delta.value
|
||||
|
||||
# Calculate distance of all points to plane
|
||||
Q = np.array(o_c) * orth_cut
|
||||
N = o_c / np.sqrt(np.sum(o_c**2))
|
||||
v = np.empty(np.shape(hkl_c))
|
||||
v[:, :, :, :, 0] = hkl_c[:, :, :, :, 0] - Q
|
||||
dist = np.abs(np.dot(N, v))
|
||||
dist = np.squeeze(dist)
|
||||
dist = np.transpose(dist)
|
||||
|
||||
# Find points within acceptable distance of plane defined by o_c
|
||||
ind = np.where(abs(dist) < delta)
|
||||
if ind[0].size == 0:
|
||||
image_source.data.update(image=[np.zeros((1, 1))])
|
||||
return
|
||||
|
||||
# Project points onto axes
|
||||
x = np.dot(x_c / np.sqrt(np.sum(x_c**2)), hkl_c)
|
||||
y = np.dot(y_c / np.sqrt(np.sum(y_c**2)), hkl_c)
|
||||
|
||||
# take care of dimensions
|
||||
x = np.squeeze(x)
|
||||
x = np.transpose(x)
|
||||
y = np.squeeze(y)
|
||||
y = np.transpose(y)
|
||||
|
||||
# Get slices:
|
||||
x_slice = x[ind]
|
||||
y_slice = y[ind]
|
||||
I_slice = I_matrix[ind]
|
||||
|
||||
# Meshgrid limits for plotting
|
||||
if auto_range_cb.active:
|
||||
min_x = np.min(x_slice)
|
||||
max_x = np.max(x_slice)
|
||||
min_y = np.min(y_slice)
|
||||
max_y = np.max(y_slice)
|
||||
xrange_min_ni.value = min_x
|
||||
xrange_max_ni.value = max_x
|
||||
yrange_min_ni.value = min_y
|
||||
yrange_max_ni.value = max_y
|
||||
else:
|
||||
min_x = xrange_min_ni.value
|
||||
max_x = xrange_max_ni.value
|
||||
min_y = yrange_min_ni.value
|
||||
max_y = yrange_max_ni.value
|
||||
|
||||
delta_x = xrange_step_ni.value
|
||||
delta_y = yrange_step_ni.value
|
||||
|
||||
# Create interpolated mesh grid for plotting
|
||||
grid_x, grid_y = np.mgrid[min_x:max_x:delta_x, min_y:max_y:delta_y]
|
||||
I = interpolate.griddata((x_slice, y_slice), I_slice, (grid_x, grid_y))
|
||||
|
||||
# Update plot
|
||||
display_min_ni.value = 0
|
||||
display_max_ni.value = np.max(I_slice) * 0.25
|
||||
image_source.data.update(
|
||||
image=[I.T], x=[min_x], dw=[max_x - min_x], y=[min_y], dh=[max_y - min_y]
|
||||
)
|
||||
|
||||
scan_x, scan_y = [], []
|
||||
for j in range(len(hkl_coord)):
|
||||
# Get middle hkl from list
|
||||
hklm = M @ hkl_coord[j]
|
||||
|
||||
# Decide if point is in the cut
|
||||
proj = np.dot(hklm, o_c)
|
||||
if abs(proj - orth_cut) >= delta:
|
||||
continue
|
||||
|
||||
# Project onto axes
|
||||
hklmx = np.dot(hklm, x_c)
|
||||
hklmy = np.dot(hklm, y_vert)
|
||||
|
||||
# Plot middle point of scan
|
||||
scan_x.append(hklmx)
|
||||
scan_y.append(hklmy)
|
||||
|
||||
scatter_source.data.update(x=scan_x, y=scan_y)
|
||||
|
||||
return _update_slice
|
||||
|
||||
def plot_file_callback():
|
||||
nonlocal _update_slice
|
||||
_update_slice = _prepare_plotting()
|
||||
_update_slice()
|
||||
|
||||
plot_file = Button(label="Plot selected file(s)", button_type="primary", width=200)
|
||||
plot_file.on_click(plot_file_callback)
|
||||
|
||||
plot = figure(
|
||||
x_range=DataRange1d(),
|
||||
y_range=DataRange1d(),
|
||||
height=550 + 27,
|
||||
width=550 + 117,
|
||||
tools="pan,wheel_zoom,reset",
|
||||
)
|
||||
plot.toolbar.logo = None
|
||||
|
||||
lin_color_mapper = LinearColorMapper(nan_color=(0, 0, 0, 0), low=0, high=1)
|
||||
log_color_mapper = LogColorMapper(nan_color=(0, 0, 0, 0), low=0, high=1)
|
||||
image_source = ColumnDataSource(dict(image=[np.zeros((1, 1))], x=[0], y=[0], dw=[1], dh=[1]))
|
||||
plot_image = plot.image(source=image_source, color_mapper=lin_color_mapper)
|
||||
|
||||
lin_color_bar = ColorBar(color_mapper=lin_color_mapper, width=15)
|
||||
log_color_bar = ColorBar(color_mapper=log_color_mapper, width=15, visible=False)
|
||||
plot.add_layout(lin_color_bar, "right")
|
||||
plot.add_layout(log_color_bar, "right")
|
||||
|
||||
scatter_source = ColumnDataSource(dict(x=[], y=[]))
|
||||
plot.scatter(source=scatter_source, size=4, fill_color="green", line_color="green")
|
||||
|
||||
hkl_div = Div(text="HKL:", margin=(5, 5, 0, 5))
|
||||
hkl_normal = TextInput(title="normal", value="0 0 1", width=70)
|
||||
|
||||
def hkl_cut_callback(_attr, _old, _new):
|
||||
if _update_slice is not None:
|
||||
_update_slice()
|
||||
|
||||
hkl_cut = Spinner(title="cut", value=0, step=0.1, width=70)
|
||||
hkl_cut.on_change("value_throttled", hkl_cut_callback)
|
||||
|
||||
hkl_delta = NumericInput(title="delta", value=0.1, mode="float", width=70)
|
||||
hkl_in_plane_x = TextInput(title="in-plane X", value="1 0 0", width=70)
|
||||
hkl_in_plane_y = TextInput(title="in-plane Y", value="", width=100, disabled=True)
|
||||
|
||||
def redef_lattice_cb_callback(_attr, _old, new):
|
||||
if 0 in new:
|
||||
redef_lattice_ti.disabled = False
|
||||
else:
|
||||
redef_lattice_ti.disabled = True
|
||||
|
||||
redef_lattice_cb = CheckboxGroup(labels=["Redefine lattice:"], width=110)
|
||||
redef_lattice_cb.on_change("active", redef_lattice_cb_callback)
|
||||
redef_lattice_ti = TextInput(width=490, disabled=True)
|
||||
|
||||
def redef_ub_cb_callback(_attr, _old, new):
|
||||
if 0 in new:
|
||||
redef_ub_ti.disabled = False
|
||||
else:
|
||||
redef_ub_ti.disabled = True
|
||||
|
||||
redef_ub_cb = CheckboxGroup(labels=["Redefine UB:"], width=110)
|
||||
redef_ub_cb.on_change("active", redef_ub_cb_callback)
|
||||
redef_ub_ti = TextInput(width=490, disabled=True)
|
||||
|
||||
def colormap_select_callback(_attr, _old, new):
|
||||
lin_color_mapper.palette = new
|
||||
log_color_mapper.palette = new
|
||||
|
||||
colormap_select = Select(
|
||||
title="Colormap:",
|
||||
options=[("Greys256", "greys"), ("Plasma256", "plasma"), ("Cividis256", "cividis")],
|
||||
width=100,
|
||||
)
|
||||
colormap_select.on_change("value", colormap_select_callback)
|
||||
colormap_select.value = "Plasma256"
|
||||
|
||||
def display_min_ni_callback(_attr, _old, new):
|
||||
lin_color_mapper.low = new
|
||||
log_color_mapper.low = new
|
||||
|
||||
display_min_ni = NumericInput(title="Intensity min:", value=0, mode="float", width=70)
|
||||
display_min_ni.on_change("value", display_min_ni_callback)
|
||||
|
||||
def display_max_ni_callback(_attr, _old, new):
|
||||
lin_color_mapper.high = new
|
||||
log_color_mapper.high = new
|
||||
|
||||
display_max_ni = NumericInput(title="max:", value=1, mode="float", width=70)
|
||||
display_max_ni.on_change("value", display_max_ni_callback)
|
||||
|
||||
def colormap_scale_rg_callback(_attr, _old, new):
|
||||
if new == 0: # Linear
|
||||
plot_image.glyph.color_mapper = lin_color_mapper
|
||||
lin_color_bar.visible = True
|
||||
log_color_bar.visible = False
|
||||
|
||||
else: # Logarithmic
|
||||
if display_min_ni.value > 0 and display_max_ni.value > 0:
|
||||
plot_image.glyph.color_mapper = log_color_mapper
|
||||
lin_color_bar.visible = False
|
||||
log_color_bar.visible = True
|
||||
else:
|
||||
colormap_scale_rg.active = 0
|
||||
|
||||
colormap_scale_rg = RadioGroup(labels=["Linear", "Logarithmic"], active=0, width=100)
|
||||
colormap_scale_rg.on_change("active", colormap_scale_rg_callback)
|
||||
|
||||
xrange_min_ni = NumericInput(title="x range min:", value=0, mode="float", width=70)
|
||||
xrange_max_ni = NumericInput(title="max:", value=1, mode="float", width=70)
|
||||
xrange_step_ni = NumericInput(title="x mesh:", value=0.01, mode="float", width=70)
|
||||
|
||||
yrange_min_ni = NumericInput(title="y range min:", value=0, mode="float", width=70)
|
||||
yrange_max_ni = NumericInput(title="max:", value=1, mode="float", width=70)
|
||||
yrange_step_ni = NumericInput(title="y mesh:", value=0.01, mode="float", width=70)
|
||||
|
||||
def auto_range_cb_callback(_attr, _old, new):
|
||||
if 0 in new:
|
||||
xrange_min_ni.disabled = True
|
||||
xrange_max_ni.disabled = True
|
||||
yrange_min_ni.disabled = True
|
||||
yrange_max_ni.disabled = True
|
||||
else:
|
||||
xrange_min_ni.disabled = False
|
||||
xrange_max_ni.disabled = False
|
||||
yrange_min_ni.disabled = False
|
||||
yrange_max_ni.disabled = False
|
||||
|
||||
auto_range_cb = CheckboxGroup(labels=["Auto range:"], width=110)
|
||||
auto_range_cb.on_change("active", auto_range_cb_callback)
|
||||
auto_range_cb.active = [0]
|
||||
|
||||
column1_layout = column(
|
||||
row(
|
||||
column(row(measured_data_div, measured_data), row(upload_hkl_div, upload_hkl_fi)),
|
||||
plot_file,
|
||||
),
|
||||
row(
|
||||
plot,
|
||||
column(
|
||||
hkl_div,
|
||||
row(hkl_normal, hkl_cut, hkl_delta),
|
||||
row(hkl_in_plane_x, hkl_in_plane_y),
|
||||
row(colormap_select, column(Spacer(height=15), colormap_scale_rg)),
|
||||
row(display_min_ni, display_max_ni),
|
||||
row(column(Spacer(height=19), auto_range_cb)),
|
||||
row(xrange_min_ni, xrange_max_ni),
|
||||
row(yrange_min_ni, yrange_max_ni),
|
||||
row(xrange_step_ni, yrange_step_ni),
|
||||
),
|
||||
),
|
||||
row(column(Spacer(height=7), redef_lattice_cb), redef_lattice_ti),
|
||||
row(column(Spacer(height=7), redef_ub_cb), redef_ub_ti),
|
||||
)
|
||||
column2_layout = app.PlotHKL().layout
|
||||
|
||||
tab_layout = row(column1_layout, Spacer(width=50), column2_layout)
|
||||
|
||||
return Panel(child=tab_layout, title="plot data")
|
223
pyzebra/app/panel_spind.py
Normal file
223
pyzebra/app/panel_spind.py
Normal file
@ -0,0 +1,223 @@
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
import numpy as np
|
||||
from bokeh.io import curdoc
|
||||
from bokeh.layouts import column, row
|
||||
from bokeh.models import (
|
||||
Button,
|
||||
ColumnDataSource,
|
||||
DataTable,
|
||||
Panel,
|
||||
Spinner,
|
||||
TableColumn,
|
||||
TextAreaInput,
|
||||
TextInput,
|
||||
)
|
||||
|
||||
import pyzebra
|
||||
|
||||
|
||||
def create():
|
||||
doc = curdoc()
|
||||
log = doc.logger
|
||||
events_data = doc.events_data
|
||||
|
||||
npeaks_spinner = Spinner(title="Number of peaks from hdf_view panel:", disabled=True)
|
||||
lattice_const_textinput = TextInput(title="Lattice constants:")
|
||||
max_res_spinner = Spinner(title="max-res:", value=2, step=0.01, width=145)
|
||||
seed_pool_size_spinner = Spinner(title="seed-pool-size:", value=5, step=0.01, width=145)
|
||||
seed_len_tol_spinner = Spinner(title="seed-len-tol:", value=0.02, step=0.01, width=145)
|
||||
seed_angle_tol_spinner = Spinner(title="seed-angle-tol:", value=1, step=0.01, width=145)
|
||||
eval_hkl_tol_spinner = Spinner(title="eval-hkl-tol:", value=0.15, step=0.01, width=145)
|
||||
|
||||
diff_vec = []
|
||||
ub_matrices = []
|
||||
|
||||
def process_button_callback():
|
||||
# drop table selection to clear result fields
|
||||
results_table_source.selected.indices = []
|
||||
|
||||
nonlocal diff_vec
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
temp_peak_list_dir = os.path.join(temp_dir, "peak_list")
|
||||
os.mkdir(temp_peak_list_dir)
|
||||
temp_event_file = os.path.join(temp_peak_list_dir, "event-0.txt")
|
||||
temp_hkl_file = os.path.join(temp_dir, "hkl.h5")
|
||||
|
||||
comp_proc = subprocess.run(
|
||||
[
|
||||
"mpiexec",
|
||||
"-n",
|
||||
"2",
|
||||
"python",
|
||||
os.path.join(doc.spind_path, "gen_hkl_table.py"),
|
||||
lattice_const_textinput.value,
|
||||
"--max-res",
|
||||
str(max_res_spinner.value),
|
||||
"-o",
|
||||
temp_hkl_file,
|
||||
],
|
||||
check=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True,
|
||||
)
|
||||
log.info(" ".join(comp_proc.args))
|
||||
log.info(comp_proc.stdout)
|
||||
|
||||
# prepare an event file
|
||||
diff_vec = []
|
||||
with open(temp_event_file, "w") as f:
|
||||
npeaks = len(next(iter(doc.events_data.values())))
|
||||
for ind in range(npeaks):
|
||||
wave = events_data["wave"][ind]
|
||||
ddist = events_data["ddist"][ind]
|
||||
x_pos = events_data["x_pos"][ind]
|
||||
y_pos = events_data["y_pos"][ind]
|
||||
intensity = events_data["intensity"][ind]
|
||||
snr_cnts = events_data["snr_cnts"][ind]
|
||||
gamma = events_data["gamma"][ind]
|
||||
omega = events_data["omega"][ind]
|
||||
chi = events_data["chi"][ind]
|
||||
phi = events_data["phi"][ind]
|
||||
nu = events_data["nu"][ind]
|
||||
|
||||
ga, nu = pyzebra.det2pol(ddist, gamma, nu, x_pos, y_pos)
|
||||
diff_vector = pyzebra.z1frmd(wave, ga, omega, chi, phi, nu)
|
||||
d_spacing = float(pyzebra.dandth(wave, diff_vector)[0])
|
||||
diff_vector = diff_vector.flatten() * 1e10
|
||||
dv1, dv2, dv3 = diff_vector
|
||||
|
||||
diff_vec.append(diff_vector)
|
||||
f.write(
|
||||
f"{x_pos} {y_pos} {intensity} {snr_cnts} {dv1} {dv2} {dv3} {d_spacing}\n"
|
||||
)
|
||||
|
||||
log.info(f"Content of {temp_event_file}:")
|
||||
with open(temp_event_file) as f:
|
||||
log.info(f.read())
|
||||
|
||||
comp_proc = subprocess.run(
|
||||
[
|
||||
"mpiexec",
|
||||
"-n",
|
||||
"2",
|
||||
"python",
|
||||
os.path.join(doc.spind_path, "SPIND.py"),
|
||||
temp_peak_list_dir,
|
||||
temp_hkl_file,
|
||||
"-o",
|
||||
temp_dir,
|
||||
"--seed-pool-size",
|
||||
str(seed_pool_size_spinner.value),
|
||||
"--seed-len-tol",
|
||||
str(seed_len_tol_spinner.value),
|
||||
"--seed-angle-tol",
|
||||
str(seed_angle_tol_spinner.value),
|
||||
"--eval-hkl-tol",
|
||||
str(eval_hkl_tol_spinner.value),
|
||||
],
|
||||
check=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True,
|
||||
)
|
||||
log.info(" ".join(comp_proc.args))
|
||||
log.info(comp_proc.stdout)
|
||||
|
||||
spind_out_file = os.path.join(temp_dir, "spind.txt")
|
||||
spind_res = dict(
|
||||
label=[], crystal_id=[], match_rate=[], matched_peaks=[], column_5=[], ub_matrix=[]
|
||||
)
|
||||
try:
|
||||
with open(spind_out_file) as f_out:
|
||||
for line in f_out:
|
||||
c1, c2, c3, c4, c5, *c_rest = line.split()
|
||||
spind_res["label"].append(c1)
|
||||
spind_res["crystal_id"].append(c2)
|
||||
spind_res["match_rate"].append(c3)
|
||||
spind_res["matched_peaks"].append(c4)
|
||||
spind_res["column_5"].append(c5)
|
||||
|
||||
# last digits are spind UB matrix
|
||||
vals = list(map(float, c_rest))
|
||||
ub_matrix_spind = np.transpose(np.array(vals).reshape(3, 3))
|
||||
ub_matrices.append(ub_matrix_spind)
|
||||
spind_res["ub_matrix"].append(str(ub_matrix_spind * 1e-10))
|
||||
|
||||
log.info(f"Content of {spind_out_file}:")
|
||||
with open(spind_out_file) as f:
|
||||
log.info(f.read())
|
||||
|
||||
except FileNotFoundError:
|
||||
log.warning("No results from spind")
|
||||
|
||||
results_table_source.data.update(spind_res)
|
||||
|
||||
process_button = Button(label="Process", button_type="primary")
|
||||
process_button.on_click(process_button_callback)
|
||||
|
||||
if doc.spind_path is None:
|
||||
process_button.disabled = True
|
||||
|
||||
ub_matrix_textareainput = TextAreaInput(title="UB matrix:", rows=7, width=400)
|
||||
hkl_textareainput = TextAreaInput(title="hkl values:", rows=7, width=400)
|
||||
|
||||
def results_table_select_callback(_attr, old, new):
|
||||
if new:
|
||||
ind = new[0]
|
||||
ub_matrix_spind = ub_matrices[ind]
|
||||
res = ""
|
||||
for vec in diff_vec:
|
||||
res += f"{np.linalg.inv(ub_matrix_spind) @ vec}\n"
|
||||
ub_matrix_textareainput.value = str(ub_matrix_spind * 1e-10)
|
||||
hkl_textareainput.value = res
|
||||
else:
|
||||
ub_matrix_textareainput.value = ""
|
||||
hkl_textareainput.value = ""
|
||||
|
||||
results_table_source = ColumnDataSource(
|
||||
dict(label=[], crystal_id=[], match_rate=[], matched_peaks=[], column_5=[], ub_matrix=[])
|
||||
)
|
||||
results_table = DataTable(
|
||||
source=results_table_source,
|
||||
columns=[
|
||||
TableColumn(field="label", title="Label", width=50),
|
||||
TableColumn(field="crystal_id", title="Crystal ID", width=100),
|
||||
TableColumn(field="match_rate", title="Match Rate", width=100),
|
||||
TableColumn(field="matched_peaks", title="Matched Peaks", width=100),
|
||||
TableColumn(field="column_5", title="", width=100),
|
||||
TableColumn(field="ub_matrix", title="UB Matrix", width=700),
|
||||
],
|
||||
height=300,
|
||||
width=1200,
|
||||
autosize_mode="none",
|
||||
index_position=None,
|
||||
)
|
||||
|
||||
results_table_source.selected.on_change("indices", results_table_select_callback)
|
||||
|
||||
tab_layout = row(
|
||||
column(
|
||||
npeaks_spinner,
|
||||
lattice_const_textinput,
|
||||
row(max_res_spinner, seed_pool_size_spinner),
|
||||
row(seed_len_tol_spinner, seed_angle_tol_spinner),
|
||||
row(eval_hkl_tol_spinner),
|
||||
process_button,
|
||||
),
|
||||
column(results_table, row(ub_matrix_textareainput, hkl_textareainput)),
|
||||
)
|
||||
|
||||
async def update_npeaks_spinner():
|
||||
npeaks = len(next(iter(doc.events_data.values())))
|
||||
npeaks_spinner.value = npeaks
|
||||
# TODO: check cell parameter for consistency?
|
||||
if npeaks:
|
||||
lattice_const_textinput.value = ",".join(map(str, doc.events_data["cell"][0]))
|
||||
|
||||
doc.add_periodic_callback(update_npeaks_spinner, 1000)
|
||||
|
||||
return Panel(child=tab_layout, title="spind")
|
549
pyzebra/app/plot_hkl.py
Normal file
549
pyzebra/app/plot_hkl.py
Normal file
@ -0,0 +1,549 @@
|
||||
import base64
|
||||
import io
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
from bokeh.io import curdoc
|
||||
from bokeh.layouts import column, row
|
||||
from bokeh.models import (
|
||||
Arrow,
|
||||
Button,
|
||||
CheckboxGroup,
|
||||
ColumnDataSource,
|
||||
Div,
|
||||
FileInput,
|
||||
HoverTool,
|
||||
Legend,
|
||||
LegendItem,
|
||||
NormalHead,
|
||||
NumericInput,
|
||||
RadioGroup,
|
||||
Spinner,
|
||||
TextAreaInput,
|
||||
TextInput,
|
||||
)
|
||||
from bokeh.palettes import Dark2
|
||||
from bokeh.plotting import figure
|
||||
from scipy.integrate import simpson, trapezoid
|
||||
|
||||
import pyzebra
|
||||
|
||||
|
||||
class PlotHKL:
|
||||
def __init__(self):
|
||||
doc = curdoc()
|
||||
log = doc.logger
|
||||
|
||||
_update_slice = None
|
||||
measured_data_div = Div(text="Measured <b>CCL</b> data:")
|
||||
measured_data = FileInput(accept=".ccl", multiple=True, width=200)
|
||||
|
||||
upload_hkl_div = Div(text="Open hkl/mhkl data:")
|
||||
upload_hkl_fi = FileInput(accept=".hkl,.mhkl", multiple=True, width=200)
|
||||
|
||||
min_grid_x = -10
|
||||
max_grid_x = 10
|
||||
min_grid_y = -10
|
||||
max_grid_y = 10
|
||||
cmap = Dark2[8]
|
||||
syms = ["circle", "inverted_triangle", "square", "diamond", "star", "triangle"]
|
||||
|
||||
def _prepare_plotting():
|
||||
orth_dir = list(map(float, hkl_normal.value.split()))
|
||||
x_dir = list(map(float, hkl_in_plane_x.value.split()))
|
||||
|
||||
k = np.array(k_vectors.value.split()).astype(float).reshape(-1, 3)
|
||||
tol_k = tol_k_ni.value
|
||||
|
||||
# multiplier for resolution function (in case of samples with large mosaicity)
|
||||
res_mult = res_mult_ni.value
|
||||
|
||||
md_fnames = measured_data.filename
|
||||
md_fdata = measured_data.value
|
||||
|
||||
# Load first data file, read angles and define matrices to perform conversion to cartesian
|
||||
# coordinates and back
|
||||
with io.StringIO(base64.b64decode(md_fdata[0]).decode()) as file:
|
||||
_, ext = os.path.splitext(md_fnames[0])
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext, log=log)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return None
|
||||
|
||||
alpha = file_data[0]["alpha_cell"] * np.pi / 180.0
|
||||
beta = file_data[0]["beta_cell"] * np.pi / 180.0
|
||||
gamma = file_data[0]["gamma_cell"] * np.pi / 180.0
|
||||
|
||||
# reciprocal angle parameters
|
||||
beta_star = np.arccos(
|
||||
(np.cos(alpha) * np.cos(gamma) - np.cos(beta)) / (np.sin(alpha) * np.sin(gamma))
|
||||
)
|
||||
gamma_star = np.arccos(
|
||||
(np.cos(alpha) * np.cos(beta) - np.cos(gamma)) / (np.sin(alpha) * np.sin(beta))
|
||||
)
|
||||
|
||||
# conversion matrix
|
||||
M = np.array(
|
||||
[
|
||||
[1, np.cos(gamma_star), np.cos(beta_star)],
|
||||
[0, np.sin(gamma_star), -np.sin(beta_star) * np.cos(alpha)],
|
||||
[0, 0, np.sin(beta_star) * np.sin(alpha)],
|
||||
]
|
||||
)
|
||||
|
||||
# Get last lattice vector
|
||||
y_dir = np.cross(x_dir, orth_dir) # Second axes of plotting plane
|
||||
|
||||
# Rescale such that smallest element of y-dir vector is 1
|
||||
y_dir2 = y_dir[y_dir != 0]
|
||||
min_val = np.min(np.abs(y_dir2))
|
||||
y_dir = y_dir / min_val
|
||||
|
||||
# Possibly flip direction of ydir:
|
||||
if y_dir[np.argmax(abs(y_dir))] < 0:
|
||||
y_dir = -y_dir
|
||||
|
||||
# Display the resulting y_dir
|
||||
hkl_in_plane_y.value = " ".join([f"{val:.1f}" for val in y_dir])
|
||||
|
||||
# Save length of lattice vectors
|
||||
x_length = np.linalg.norm(x_dir)
|
||||
y_length = np.linalg.norm(y_dir)
|
||||
|
||||
# Save str for labels
|
||||
xlabel_str = " ".join(map(str, x_dir))
|
||||
ylabel_str = " ".join(map(str, y_dir))
|
||||
|
||||
# Normalize lattice vectors
|
||||
y_dir = y_dir / np.linalg.norm(y_dir)
|
||||
x_dir = x_dir / np.linalg.norm(x_dir)
|
||||
orth_dir = orth_dir / np.linalg.norm(orth_dir)
|
||||
|
||||
# Calculate cartesian equivalents of lattice vectors
|
||||
x_c = np.matmul(M, x_dir)
|
||||
y_c = np.matmul(M, y_dir)
|
||||
o_c = np.matmul(M, orth_dir)
|
||||
|
||||
# Calulcate vertical direction in plotting plame
|
||||
y_vert = np.cross(x_c, o_c) # verical direction in plotting plane
|
||||
if y_vert[np.argmax(abs(y_vert))] < 0:
|
||||
y_vert = -y_vert
|
||||
y_vert = y_vert / np.linalg.norm(y_vert)
|
||||
|
||||
# Normalize all directions
|
||||
y_c = y_c / np.linalg.norm(y_c)
|
||||
x_c = x_c / np.linalg.norm(x_c)
|
||||
o_c = o_c / np.linalg.norm(o_c)
|
||||
|
||||
# Read all data
|
||||
hkl_coord = []
|
||||
intensity_vec = []
|
||||
k_flag_vec = []
|
||||
file_flag_vec = []
|
||||
res_vec = []
|
||||
res_N = 10
|
||||
|
||||
for j, md_fname in enumerate(md_fnames):
|
||||
with io.StringIO(base64.b64decode(md_fdata[j]).decode()) as file:
|
||||
_, ext = os.path.splitext(md_fname)
|
||||
try:
|
||||
file_data = pyzebra.parse_1D(file, ext, log=log)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return None
|
||||
|
||||
pyzebra.normalize_dataset(file_data)
|
||||
|
||||
# Loop throguh all data
|
||||
for scan in file_data:
|
||||
om = scan["omega"]
|
||||
gammad = scan["twotheta"]
|
||||
chi = scan["chi"]
|
||||
phi = scan["phi"]
|
||||
nud = 0 # 1d detector
|
||||
ub_inv = np.linalg.inv(scan["ub"])
|
||||
counts = scan["counts"]
|
||||
wave = scan["wavelength"]
|
||||
|
||||
# Calculate resolution in degrees
|
||||
expr = np.tan(gammad / 2 * np.pi / 180)
|
||||
fwhm = np.sqrt(0.4639 * expr**2 - 0.4452 * expr + 0.1506) * res_mult
|
||||
res = 4 * np.pi / wave * np.sin(fwhm * np.pi / 180)
|
||||
|
||||
# Get first and final hkl
|
||||
hkl1 = pyzebra.ang2hkl_1d(wave, gammad, om[0], chi, phi, nud, ub_inv)
|
||||
hkl2 = pyzebra.ang2hkl_1d(wave, gammad, om[-1], chi, phi, nud, ub_inv)
|
||||
|
||||
# Get hkl at best intensity
|
||||
hkl_m = pyzebra.ang2hkl_1d(
|
||||
wave, gammad, om[np.argmax(counts)], chi, phi, nud, ub_inv
|
||||
)
|
||||
|
||||
# Estimate intensity for marker size scaling
|
||||
y_bkg = [counts[0], counts[-1]]
|
||||
x_bkg = [om[0], om[-1]]
|
||||
c = int(simpson(counts, x=om) - trapezoid(y_bkg, x=x_bkg))
|
||||
|
||||
# Recognize k_flag_vec
|
||||
reduced_hkl_m = np.minimum(1 - hkl_m % 1, hkl_m % 1)
|
||||
for ind, _k in enumerate(k):
|
||||
if all(np.abs(reduced_hkl_m - _k) < tol_k):
|
||||
k_flag_vec.append(ind)
|
||||
break
|
||||
else:
|
||||
# not required
|
||||
continue
|
||||
|
||||
# Save data
|
||||
hkl_coord.append([hkl1, hkl2, hkl_m])
|
||||
intensity_vec.append(c)
|
||||
file_flag_vec.append(j)
|
||||
res_vec.append(res)
|
||||
|
||||
x_spacing = np.dot(M @ x_dir, x_c) * x_length
|
||||
y_spacing = np.dot(M @ y_dir, y_vert) * y_length
|
||||
y_spacingx = np.dot(M @ y_dir, x_c) * y_length
|
||||
|
||||
# Plot coordinate system
|
||||
arrow1.x_end = x_spacing
|
||||
arrow1.y_end = 0
|
||||
arrow2.x_end = y_spacingx
|
||||
arrow2.y_end = y_spacing
|
||||
|
||||
# Add labels
|
||||
kvect_source.data.update(
|
||||
x=[x_spacing / 4, -0.1],
|
||||
y=[x_spacing / 4 - 0.5, y_spacing / 2],
|
||||
text=[xlabel_str, ylabel_str],
|
||||
)
|
||||
|
||||
# Plot grid lines
|
||||
xs, ys = [], []
|
||||
xs_minor, ys_minor = [], []
|
||||
for yy in np.arange(min_grid_y, max_grid_y, 1):
|
||||
# Calculate end and start point
|
||||
hkl1 = min_grid_x * x_dir + yy * y_dir
|
||||
hkl2 = max_grid_x * x_dir + yy * y_dir
|
||||
hkl1 = M @ hkl1
|
||||
hkl2 = M @ hkl2
|
||||
|
||||
# Project points onto axes
|
||||
x1 = np.dot(x_c, hkl1) * x_length
|
||||
y1 = np.dot(y_vert, hkl1) * y_length
|
||||
x2 = np.dot(x_c, hkl2) * x_length
|
||||
y2 = np.dot(y_vert, hkl2) * y_length
|
||||
|
||||
xs.append([x1, x2])
|
||||
ys.append([y1, y2])
|
||||
|
||||
for xx in np.arange(min_grid_x, max_grid_x, 1):
|
||||
# Calculate end and start point
|
||||
hkl1 = xx * x_dir + min_grid_y * y_dir
|
||||
hkl2 = xx * x_dir + max_grid_y * y_dir
|
||||
hkl1 = M @ hkl1
|
||||
hkl2 = M @ hkl2
|
||||
|
||||
# Project points onto axes
|
||||
x1 = np.dot(x_c, hkl1) * x_length
|
||||
y1 = np.dot(y_vert, hkl1) * y_length
|
||||
x2 = np.dot(x_c, hkl2) * x_length
|
||||
y2 = np.dot(y_vert, hkl2) * y_length
|
||||
|
||||
xs.append([x1, x2])
|
||||
ys.append([y1, y2])
|
||||
|
||||
for yy in np.arange(min_grid_y, max_grid_y, 0.5):
|
||||
# Calculate end and start point
|
||||
hkl1 = min_grid_x * x_dir + yy * y_dir
|
||||
hkl2 = max_grid_x * x_dir + yy * y_dir
|
||||
hkl1 = M @ hkl1
|
||||
hkl2 = M @ hkl2
|
||||
|
||||
# Project points onto axes
|
||||
x1 = np.dot(x_c, hkl1) * x_length
|
||||
y1 = np.dot(y_vert, hkl1) * y_length
|
||||
x2 = np.dot(x_c, hkl2) * x_length
|
||||
y2 = np.dot(y_vert, hkl2) * y_length
|
||||
|
||||
xs_minor.append([x1, x2])
|
||||
ys_minor.append([y1, y2])
|
||||
|
||||
for xx in np.arange(min_grid_x, max_grid_x, 0.5):
|
||||
# Calculate end and start point
|
||||
hkl1 = xx * x_dir + min_grid_y * y_dir
|
||||
hkl2 = xx * x_dir + max_grid_y * y_dir
|
||||
hkl1 = M @ hkl1
|
||||
hkl2 = M @ hkl2
|
||||
|
||||
# Project points onto axes
|
||||
x1 = np.dot(x_c, hkl1) * x_length
|
||||
y1 = np.dot(y_vert, hkl1) * y_length
|
||||
x2 = np.dot(x_c, hkl2) * x_length
|
||||
y2 = np.dot(y_vert, hkl2) * y_length
|
||||
|
||||
xs_minor.append([x1, x2])
|
||||
ys_minor.append([y1, y2])
|
||||
|
||||
grid_source.data.update(xs=xs, ys=ys)
|
||||
minor_grid_source.data.update(xs=xs_minor, ys=ys_minor)
|
||||
|
||||
# Prepare hkl/mhkl data
|
||||
hkl_coord2 = []
|
||||
for j, fname in enumerate(upload_hkl_fi.filename):
|
||||
with io.StringIO(base64.b64decode(upload_hkl_fi.value[j]).decode()) as file:
|
||||
_, ext = os.path.splitext(fname)
|
||||
try:
|
||||
fdata = pyzebra.parse_hkl(file, ext)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return
|
||||
|
||||
for ind in range(len(fdata["counts"])):
|
||||
# Recognize k_flag_vec
|
||||
hkl = np.array([fdata["h"][ind], fdata["k"][ind], fdata["l"][ind]])
|
||||
|
||||
# Save data
|
||||
hkl_coord2.append(hkl)
|
||||
|
||||
def _update_slice():
|
||||
cut_tol = hkl_delta.value
|
||||
cut_or = hkl_cut.value
|
||||
|
||||
# different symbols based on file number
|
||||
file_flag = 0 in disting_opt_cb.active
|
||||
# scale marker size according to intensity
|
||||
intensity_flag = 1 in disting_opt_cb.active
|
||||
# use color to mark different propagation vectors
|
||||
prop_legend_flag = 2 in disting_opt_cb.active
|
||||
# use resolution ellipsis
|
||||
res_flag = disting_opt_rb.active
|
||||
|
||||
el_x, el_y, el_w, el_h, el_c = [], [], [], [], []
|
||||
scan_xs, scan_ys, scan_x, scan_y = [], [], [], []
|
||||
scan_m, scan_s, scan_c, scan_l, scan_hkl = [], [], [], [], []
|
||||
for j in range(len(hkl_coord)):
|
||||
# Get middle hkl from list
|
||||
hklm = M @ hkl_coord[j][2]
|
||||
|
||||
# Decide if point is in the cut
|
||||
proj = np.dot(hklm, o_c)
|
||||
if abs(proj - cut_or) >= cut_tol:
|
||||
continue
|
||||
|
||||
hkl1 = M @ hkl_coord[j][0]
|
||||
hkl2 = M @ hkl_coord[j][1]
|
||||
|
||||
# Project onto axes
|
||||
hkl1x = np.dot(hkl1, x_c)
|
||||
hkl1y = np.dot(hkl1, y_vert)
|
||||
hkl2x = np.dot(hkl2, x_c)
|
||||
hkl2y = np.dot(hkl2, y_vert)
|
||||
hklmx = np.dot(hklm, x_c)
|
||||
hklmy = np.dot(hklm, y_vert)
|
||||
|
||||
if intensity_flag:
|
||||
markersize = max(6, int(intensity_vec[j] / max(intensity_vec) * 30))
|
||||
else:
|
||||
markersize = 6
|
||||
|
||||
if file_flag:
|
||||
plot_symbol = syms[file_flag_vec[j]]
|
||||
else:
|
||||
plot_symbol = "circle"
|
||||
|
||||
if prop_legend_flag:
|
||||
col_value = cmap[k_flag_vec[j]]
|
||||
else:
|
||||
col_value = "black"
|
||||
|
||||
if res_flag:
|
||||
# Generate series of circles along scan line
|
||||
res = res_vec[j]
|
||||
el_x.extend(np.linspace(hkl1x, hkl2x, num=res_N))
|
||||
el_y.extend(np.linspace(hkl1y, hkl2y, num=res_N))
|
||||
el_w.extend([res / 2] * res_N)
|
||||
el_h.extend([res / 2] * res_N)
|
||||
el_c.extend([col_value] * res_N)
|
||||
else:
|
||||
# Plot scan line
|
||||
scan_xs.append([hkl1x, hkl2x])
|
||||
scan_ys.append([hkl1y, hkl2y])
|
||||
|
||||
# Plot middle point of scan
|
||||
scan_x.append(hklmx)
|
||||
scan_y.append(hklmy)
|
||||
scan_m.append(plot_symbol)
|
||||
scan_s.append(markersize)
|
||||
|
||||
# Color and legend label
|
||||
scan_c.append(col_value)
|
||||
scan_l.append(md_fnames[file_flag_vec[j]])
|
||||
scan_hkl.append(hkl_coord[j][2])
|
||||
|
||||
ellipse_source.data.update(x=el_x, y=el_y, width=el_w, height=el_h, c=el_c)
|
||||
scan_source.data.update(
|
||||
xs=scan_xs,
|
||||
ys=scan_ys,
|
||||
x=scan_x,
|
||||
y=scan_y,
|
||||
m=scan_m,
|
||||
s=scan_s,
|
||||
c=scan_c,
|
||||
l=scan_l,
|
||||
hkl=scan_hkl,
|
||||
)
|
||||
|
||||
# Legend items for different file entries (symbol)
|
||||
legend_items = []
|
||||
if not res_flag and file_flag:
|
||||
labels, inds = np.unique(scan_source.data["l"], return_index=True)
|
||||
for label, ind in zip(labels, inds):
|
||||
legend_items.append(LegendItem(label=label, renderers=[scatter], index=ind))
|
||||
|
||||
# Legend items for propagation vector (color)
|
||||
if prop_legend_flag:
|
||||
if res_flag:
|
||||
source, render = ellipse_source, ellipse
|
||||
else:
|
||||
source, render = scan_source, mline
|
||||
|
||||
labels, inds = np.unique(source.data["c"], return_index=True)
|
||||
for label, ind in zip(labels, inds):
|
||||
label = f"k={k[cmap.index(label)]}"
|
||||
legend_items.append(LegendItem(label=label, renderers=[render], index=ind))
|
||||
|
||||
plot.legend.items = legend_items
|
||||
|
||||
scan_x2, scan_y2, scan_hkl2 = [], [], []
|
||||
for j in range(len(hkl_coord2)):
|
||||
# Get middle hkl from list
|
||||
hklm = M @ hkl_coord2[j]
|
||||
|
||||
# Decide if point is in the cut
|
||||
proj = np.dot(hklm, o_c)
|
||||
if abs(proj - cut_or) >= cut_tol:
|
||||
continue
|
||||
|
||||
# Project onto axes
|
||||
hklmx = np.dot(hklm, x_c)
|
||||
hklmy = np.dot(hklm, y_vert)
|
||||
|
||||
scan_x2.append(hklmx)
|
||||
scan_y2.append(hklmy)
|
||||
scan_hkl2.append(hkl_coord2[j])
|
||||
|
||||
scatter_source2.data.update(x=scan_x2, y=scan_y2, hkl=scan_hkl2)
|
||||
|
||||
return _update_slice
|
||||
|
||||
def plot_file_callback():
|
||||
nonlocal _update_slice
|
||||
_update_slice = _prepare_plotting()
|
||||
_update_slice()
|
||||
|
||||
plot_file = Button(label="Plot selected file(s)", button_type="primary", width=200)
|
||||
plot_file.on_click(plot_file_callback)
|
||||
|
||||
plot = figure(height=550, width=550 + 32, tools="pan,wheel_zoom,reset")
|
||||
plot.toolbar.logo = None
|
||||
|
||||
plot.xaxis.visible = False
|
||||
plot.xgrid.visible = False
|
||||
plot.yaxis.visible = False
|
||||
plot.ygrid.visible = False
|
||||
|
||||
arrow1 = Arrow(x_start=0, y_start=0, x_end=0, y_end=0, end=NormalHead(size=10))
|
||||
plot.add_layout(arrow1)
|
||||
arrow2 = Arrow(x_start=0, y_start=0, x_end=0, y_end=0, end=NormalHead(size=10))
|
||||
plot.add_layout(arrow2)
|
||||
|
||||
kvect_source = ColumnDataSource(dict(x=[], y=[], text=[]))
|
||||
plot.text(source=kvect_source)
|
||||
|
||||
grid_source = ColumnDataSource(dict(xs=[], ys=[]))
|
||||
plot.multi_line(source=grid_source, line_color="gray")
|
||||
|
||||
minor_grid_source = ColumnDataSource(dict(xs=[], ys=[]))
|
||||
plot.multi_line(source=minor_grid_source, line_color="gray", line_dash="dotted")
|
||||
|
||||
ellipse_source = ColumnDataSource(dict(x=[], y=[], width=[], height=[], c=[]))
|
||||
ellipse = plot.ellipse(source=ellipse_source, fill_color="c", line_color="c")
|
||||
|
||||
scan_source = ColumnDataSource(
|
||||
dict(xs=[], ys=[], x=[], y=[], m=[], s=[], c=[], l=[], hkl=[])
|
||||
)
|
||||
mline = plot.multi_line(source=scan_source, line_color="c")
|
||||
scatter = plot.scatter(
|
||||
source=scan_source, marker="m", size="s", fill_color="c", line_color="c"
|
||||
)
|
||||
|
||||
scatter_source2 = ColumnDataSource(dict(x=[], y=[], hkl=[]))
|
||||
scatter2 = plot.scatter(
|
||||
source=scatter_source2, size=4, fill_color="green", line_color="green"
|
||||
)
|
||||
|
||||
plot.x_range.renderers = [ellipse, mline, scatter, scatter2]
|
||||
plot.y_range.renderers = [ellipse, mline, scatter, scatter2]
|
||||
|
||||
plot.add_layout(Legend(items=[], location="top_left", click_policy="hide"))
|
||||
|
||||
plot.add_tools(HoverTool(renderers=[scatter, scatter2], tooltips=[("hkl", "@hkl")]))
|
||||
|
||||
hkl_div = Div(text="HKL:", margin=(5, 5, 0, 5))
|
||||
hkl_normal = TextInput(title="normal", value="0 0 1", width=70)
|
||||
|
||||
def hkl_cut_callback(_attr, _old, _new):
|
||||
if _update_slice is not None:
|
||||
_update_slice()
|
||||
|
||||
hkl_cut = Spinner(title="cut", value=0, step=0.1, width=70)
|
||||
hkl_cut.on_change("value_throttled", hkl_cut_callback)
|
||||
|
||||
hkl_delta = NumericInput(title="delta", value=0.1, mode="float", width=70)
|
||||
hkl_in_plane_x = TextInput(title="in-plane X", value="1 0 0", width=70)
|
||||
hkl_in_plane_y = TextInput(title="in-plane Y", value="", width=100, disabled=True)
|
||||
|
||||
disting_opt_div = Div(text="Distinguish options:", margin=(5, 5, 0, 5))
|
||||
disting_opt_cb = CheckboxGroup(
|
||||
labels=["files (symbols)", "intensities (size)", "k vectors nucl/magn (colors)"],
|
||||
active=[0, 1, 2],
|
||||
width=200,
|
||||
)
|
||||
disting_opt_rb = RadioGroup(
|
||||
labels=["scan direction", "resolution ellipsoid"], active=0, width=200
|
||||
)
|
||||
|
||||
k_vectors = TextAreaInput(
|
||||
title="k vectors:", value="0.0 0.0 0.0\n0.5 0.0 0.0\n0.5 0.5 0.0", width=150
|
||||
)
|
||||
res_mult_ni = NumericInput(title="Resolution mult:", value=10, mode="int", width=100)
|
||||
tol_k_ni = NumericInput(title="k tolerance:", value=0.01, mode="float", width=100)
|
||||
|
||||
def show_legend_cb_callback(_attr, _old, new):
|
||||
plot.legend.visible = 0 in new
|
||||
|
||||
show_legend_cb = CheckboxGroup(labels=["Show legend"], active=[0])
|
||||
show_legend_cb.on_change("active", show_legend_cb_callback)
|
||||
|
||||
layout = column(
|
||||
row(
|
||||
column(row(measured_data_div, measured_data), row(upload_hkl_div, upload_hkl_fi)),
|
||||
plot_file,
|
||||
),
|
||||
row(
|
||||
plot,
|
||||
column(
|
||||
hkl_div,
|
||||
row(hkl_normal, hkl_cut, hkl_delta),
|
||||
row(hkl_in_plane_x, hkl_in_plane_y),
|
||||
k_vectors,
|
||||
row(tol_k_ni, res_mult_ni),
|
||||
disting_opt_div,
|
||||
disting_opt_cb,
|
||||
disting_opt_rb,
|
||||
show_legend_cb,
|
||||
),
|
||||
),
|
||||
)
|
||||
self.layout = layout
|
@ -1,513 +0,0 @@
|
||||
import numpy as np
|
||||
import uncertainties as u
|
||||
|
||||
from .fit2 import create_uncertanities
|
||||
|
||||
|
||||
def add_dict(dict1, dict2):
|
||||
"""adds two dictionaries, meta of the new is saved as meata+original_filename and
|
||||
measurements are shifted to continue with numbering of first dict
|
||||
:arg dict1 : dictionarry to add to
|
||||
:arg dict2 : dictionarry from which to take the measurements
|
||||
:return dict1 : combined dictionary
|
||||
Note: dict1 must be made from ccl, otherwise we would have to change the structure of loaded
|
||||
dat file"""
|
||||
max_measurement_dict1 = max([int(str(keys)[1:]) for keys in dict1["scan"]])
|
||||
if dict2["meta"]["data_type"] == ".ccl":
|
||||
new_filenames = [
|
||||
"M" + str(x + max_measurement_dict1)
|
||||
for x in [int(str(keys)[1:]) for keys in dict2["scan"]]
|
||||
]
|
||||
new_meta_name = "meta" + str(dict2["meta"]["original_filename"])
|
||||
if new_meta_name not in dict1:
|
||||
for keys, name in zip(dict2["scan"], new_filenames):
|
||||
dict2["scan"][keys]["file_of_origin"] = str(dict2["meta"]["original_filename"])
|
||||
dict1["scan"][name] = dict2["scan"][keys]
|
||||
|
||||
dict1[new_meta_name] = dict2["meta"]
|
||||
|
||||
else:
|
||||
raise KeyError(
|
||||
str(
|
||||
"The file %s has alredy been added to %s"
|
||||
% (dict2["meta"]["original_filename"], dict1["meta"]["original_filename"])
|
||||
)
|
||||
)
|
||||
elif dict2["meta"]["data_type"] == ".dat":
|
||||
d = {}
|
||||
new_name = "M" + str(max_measurement_dict1 + 1)
|
||||
hkl = dict2["meta"]["title"]
|
||||
d["h_index"] = float(hkl.split()[-3])
|
||||
d["k_index"] = float(hkl.split()[-2])
|
||||
d["l_index"] = float(hkl.split()[-1])
|
||||
d["number_of_measurements"] = len(dict2["scan"]["NP"])
|
||||
d["om"] = dict2["scan"]["om"]
|
||||
d["Counts"] = dict2["scan"]["Counts"]
|
||||
d["monitor"] = dict2["scan"]["Monitor1"][0]
|
||||
d["temperature"] = dict2["meta"]["temp"]
|
||||
d["mag_field"] = dict2["meta"]["mf"]
|
||||
d["omega_angle"] = dict2["meta"]["omega"]
|
||||
dict1["scan"][new_name] = d
|
||||
print(hkl.split())
|
||||
for keys in d:
|
||||
print(keys)
|
||||
|
||||
print("s")
|
||||
|
||||
return dict1
|
||||
|
||||
|
||||
def auto(dict):
|
||||
"""takes just unique tuples from all tuples in dictionary returend by scan_dict
|
||||
intendet for automatic merge if you doesent want to specify what scans to merge together
|
||||
args: dict - dictionary from scan_dict function
|
||||
:return dict - dict without repetitions"""
|
||||
for keys in dict:
|
||||
tuple_list = dict[keys]
|
||||
new = list()
|
||||
for i in range(len(tuple_list)):
|
||||
if tuple_list[0][0] == tuple_list[i][0]:
|
||||
new.append(tuple_list[i])
|
||||
dict[keys] = new
|
||||
return dict
|
||||
|
||||
|
||||
def scan_dict(dict):
|
||||
"""scans dictionary for duplicate hkl indexes
|
||||
:arg dict : dictionary to scan
|
||||
:return dictionary with matching scans, if there are none, the dict is empty
|
||||
note: can be checked by "not d", true if empty
|
||||
"""
|
||||
|
||||
d = {}
|
||||
for i in dict["scan"]:
|
||||
for j in dict["scan"]:
|
||||
if dict["scan"][str(i)] != dict["scan"][str(j)]:
|
||||
itup = (
|
||||
dict["scan"][str(i)]["h_index"],
|
||||
dict["scan"][str(i)]["k_index"],
|
||||
dict["scan"][str(i)]["l_index"],
|
||||
)
|
||||
jtup = (
|
||||
dict["scan"][str(j)]["h_index"],
|
||||
dict["scan"][str(j)]["k_index"],
|
||||
dict["scan"][str(j)]["l_index"],
|
||||
)
|
||||
if itup != jtup:
|
||||
pass
|
||||
else:
|
||||
|
||||
if str(itup) not in d:
|
||||
d[str(itup)] = list()
|
||||
d[str(itup)].append((i, j))
|
||||
else:
|
||||
d[str(itup)].append((i, j))
|
||||
else:
|
||||
continue
|
||||
return d
|
||||
|
||||
|
||||
def compare_hkl(dict1, dict2):
|
||||
"""Compares two dictionaries based on hkl indexes and return dictionary with str(h k l) as
|
||||
key and tuple with keys to same scan in dict1 and dict2
|
||||
:arg dict1 : first dictionary
|
||||
:arg dict2 : second dictionary
|
||||
:return d : dict with matches
|
||||
example of one key: '0.0 0.0 -1.0 : ('M1', 'M9')' meaning that 001 hkl scan is M1 in
|
||||
first dict and M9 in second"""
|
||||
d = {}
|
||||
dupl = 0
|
||||
for keys in dict1["scan"]:
|
||||
for key in dict2["scan"]:
|
||||
if (
|
||||
dict1["scan"][str(keys)]["h_index"] == dict2["scan"][str(key)]["h_index"]
|
||||
and dict1["scan"][str(keys)]["k_index"] == dict2["scan"][str(key)]["k_index"]
|
||||
and dict1["scan"][str(keys)]["l_index"] == dict2["scan"][str(key)]["l_index"]
|
||||
):
|
||||
|
||||
if (
|
||||
str(
|
||||
(
|
||||
str(dict1["scan"][str(keys)]["h_index"])
|
||||
+ " "
|
||||
+ str(dict1["scan"][str(keys)]["k_index"])
|
||||
+ " "
|
||||
+ str(dict1["scan"][str(keys)]["l_index"])
|
||||
)
|
||||
)
|
||||
not in d
|
||||
):
|
||||
d[
|
||||
str(
|
||||
str(dict1["scan"][str(keys)]["h_index"])
|
||||
+ " "
|
||||
+ str(dict1["scan"][str(keys)]["k_index"])
|
||||
+ " "
|
||||
+ str(dict1["scan"][str(keys)]["l_index"])
|
||||
)
|
||||
] = (str(keys), str(key))
|
||||
else:
|
||||
dupl = dupl + 1
|
||||
d[
|
||||
str(
|
||||
str(dict1["scan"][str(keys)]["h_index"])
|
||||
+ " "
|
||||
+ str(dict1["scan"][str(keys)]["k_index"])
|
||||
+ " "
|
||||
+ str(dict1["scan"][str(keys)]["l_index"])
|
||||
+ "_dupl"
|
||||
+ str(dupl)
|
||||
)
|
||||
] = (str(keys), str(key))
|
||||
else:
|
||||
continue
|
||||
|
||||
return d
|
||||
|
||||
|
||||
def create_tuples(x, y, y_err):
|
||||
"""creates tuples for sorting and merginng of the data
|
||||
Counts need to be normalized to monitor before"""
|
||||
t = list()
|
||||
for i in range(len(x)):
|
||||
tup = (x[i], y[i], y_err[i])
|
||||
t.append(tup)
|
||||
return t
|
||||
|
||||
|
||||
def normalize(dict, key, monitor):
|
||||
"""Normalizes the scan to monitor, checks if sigma exists, otherwise creates it
|
||||
:arg dict : dictionary to from which to tkae the scan
|
||||
:arg key : which scan to normalize from dict1
|
||||
:arg monitor : final monitor
|
||||
:return counts - normalized counts
|
||||
:return sigma - normalized sigma"""
|
||||
|
||||
counts = np.array(dict["scan"][key]["Counts"])
|
||||
sigma = np.sqrt(counts) if "sigma" not in dict["scan"][key] else dict["scan"][key]["sigma"]
|
||||
monitor_ratio = monitor / dict["scan"][key]["monitor"]
|
||||
scaled_counts = counts * monitor_ratio
|
||||
scaled_sigma = np.array(sigma) * monitor_ratio
|
||||
|
||||
return scaled_counts, scaled_sigma
|
||||
|
||||
|
||||
def merge(dict1, dict2, keys, auto=True, monitor=100000):
|
||||
"""merges the two tuples and sorts them, if om value is same, Counts value is average
|
||||
averaging is propagated into sigma if dict1 == dict2, key[1] is deleted after merging
|
||||
:arg dict1 : dictionary to which scan will be merged
|
||||
:arg dict2 : dictionary from which scan will be merged
|
||||
:arg keys : tuple with key to dict1 and dict2
|
||||
:arg auto : if true, when monitors are same, does not change it, if flase, takes monitor always
|
||||
:arg monitor : final monitor after merging
|
||||
note: dict1 and dict2 can be same dict
|
||||
:return dict1 with merged scan"""
|
||||
if auto:
|
||||
if dict1["scan"][keys[0]]["monitor"] == dict2["scan"][keys[1]]["monitor"]:
|
||||
monitor = dict1["scan"][keys[0]]["monitor"]
|
||||
|
||||
# load om and Counts
|
||||
x1, x2 = dict1["scan"][keys[0]]["om"], dict2["scan"][keys[1]]["om"]
|
||||
cor_y1, y_err1 = normalize(dict1, keys[0], monitor=monitor)
|
||||
cor_y2, y_err2 = normalize(dict2, keys[1], monitor=monitor)
|
||||
# creates touples (om, Counts, sigma) for sorting and further processing
|
||||
tuple_list = create_tuples(x1, cor_y1, y_err1) + create_tuples(x2, cor_y2, y_err2)
|
||||
# Sort the list on om and add 0 0 0 tuple to the last position
|
||||
sorted_t = sorted(tuple_list, key=lambda tup: tup[0])
|
||||
sorted_t.append((0, 0, 0))
|
||||
om, Counts, sigma = [], [], []
|
||||
seen = list()
|
||||
for i in range(len(sorted_t) - 1):
|
||||
if sorted_t[i][0] not in seen:
|
||||
if sorted_t[i][0] != sorted_t[i + 1][0]:
|
||||
om = np.append(om, sorted_t[i][0])
|
||||
Counts = np.append(Counts, sorted_t[i][1])
|
||||
sigma = np.append(sigma, sorted_t[i][2])
|
||||
else:
|
||||
om = np.append(om, sorted_t[i][0])
|
||||
counts1, counts2 = sorted_t[i][1], sorted_t[i + 1][1]
|
||||
sigma1, sigma2 = sorted_t[i][2], sorted_t[i + 1][2]
|
||||
count_err1 = u.ufloat(counts1, sigma1)
|
||||
count_err2 = u.ufloat(counts2, sigma2)
|
||||
avg = (count_err1 + count_err2) / 2
|
||||
Counts = np.append(Counts, avg.n)
|
||||
sigma = np.append(sigma, avg.s)
|
||||
seen.append(sorted_t[i][0])
|
||||
else:
|
||||
continue
|
||||
|
||||
if dict1 == dict2:
|
||||
del dict1["scan"][keys[1]]
|
||||
|
||||
note = (
|
||||
f"This scan was merged with scan {keys[1]} from "
|
||||
f'file {dict2["meta"]["original_filename"]} \n'
|
||||
)
|
||||
if "notes" not in dict1["scan"][str(keys[0])]:
|
||||
dict1["scan"][str(keys[0])]["notes"] = note
|
||||
else:
|
||||
dict1["scan"][str(keys[0])]["notes"] += note
|
||||
|
||||
dict1["scan"][keys[0]]["om"] = om
|
||||
dict1["scan"][keys[0]]["Counts"] = Counts
|
||||
dict1["scan"][keys[0]]["sigma"] = sigma
|
||||
dict1["scan"][keys[0]]["monitor"] = monitor
|
||||
print("merging done")
|
||||
return dict1
|
||||
|
||||
|
||||
def substract_measurement(dict1, dict2, keys, auto=True, monitor=100000):
|
||||
"""Substracts two scan (scan key2 from dict2 from measurent key1 in dict1), expects om to be same
|
||||
:arg dict1 : dictionary to which scan will be merged
|
||||
:arg dict2 : dictionary from which scan will be merged
|
||||
:arg keys : tuple with key to dict1 and dict2
|
||||
:arg auto : if true, when monitors are same, does not change it, if flase, takes monitor always
|
||||
:arg monitor : final monitor after merging
|
||||
:returns d : dict1 with substracted Counts from dict2 and sigma that comes from the substraction"""
|
||||
|
||||
if len(dict1["scan"][keys[0]]["om"]) != len(dict2["scan"][keys[1]]["om"]):
|
||||
raise ValueError("Omegas have different lengths, cannot be substracted")
|
||||
|
||||
if auto:
|
||||
if dict1["scan"][keys[0]]["monitor"] == dict2["scan"][keys[1]]["monitor"]:
|
||||
monitor = dict1["scan"][keys[0]]["monitor"]
|
||||
|
||||
cor_y1, y_err1 = normalize(dict1, keys[0], monitor=monitor)
|
||||
cor_y2, y_err2 = normalize(dict2, keys[1], monitor=monitor)
|
||||
|
||||
dict1_count_err = create_uncertanities(cor_y1, y_err1)
|
||||
dict2_count_err = create_uncertanities(cor_y2, y_err2)
|
||||
|
||||
res = np.subtract(dict1_count_err, dict2_count_err)
|
||||
|
||||
res_nom = []
|
||||
res_err = []
|
||||
for k in range(len(res)):
|
||||
res_nom = np.append(res_nom, res[k].n)
|
||||
res_err = np.append(res_err, res[k].s)
|
||||
|
||||
if len([num for num in res_nom if num < 0]) >= 0.3 * len(res_nom):
|
||||
print(
|
||||
f"Warning! percentage of negative numbers in scan subsracted {keys[0]} is "
|
||||
f"{len([num for num in res_nom if num < 0]) / len(res_nom)}"
|
||||
)
|
||||
|
||||
dict1["scan"][str(keys[0])]["Counts"] = res_nom
|
||||
dict1["scan"][str(keys[0])]["sigma"] = res_err
|
||||
dict1["scan"][str(keys[0])]["monitor"] = monitor
|
||||
note = (
|
||||
f'Scan {keys[1]} from file {dict2["meta"]["original_filename"]} '
|
||||
f"was substracted from this scan \n"
|
||||
)
|
||||
if "notes" not in dict1["scan"][str(keys[0])]:
|
||||
dict1["scan"][str(keys[0])]["notes"] = note
|
||||
else:
|
||||
dict1["scan"][str(keys[0])]["notes"] += note
|
||||
return dict1
|
||||
|
||||
|
||||
def compare_dict(dict1, dict2):
|
||||
"""takes two ccl dictionaries and compare different values for each key
|
||||
:arg dict1 : dictionary 1 (ccl)
|
||||
:arg dict2 : dictionary 2 (ccl)
|
||||
:returns warning : dictionary with keys from primary files (if they differ) with
|
||||
information of how many scan differ and which ones differ
|
||||
:returns report_string string comparing all different values respecively of measurements"""
|
||||
|
||||
if dict1["meta"]["data_type"] != dict2["meta"]["data_type"]:
|
||||
print("select two dicts")
|
||||
return
|
||||
S = []
|
||||
conflicts = {}
|
||||
warnings = {}
|
||||
|
||||
comp = compare_hkl(dict1, dict2)
|
||||
d1 = scan_dict(dict1)
|
||||
d2 = scan_dict(dict2)
|
||||
if not d1:
|
||||
S.append("There are no duplicates in %s (dict1) \n" % dict1["meta"]["original_filename"])
|
||||
else:
|
||||
S.append(
|
||||
"There are %d duplicates in %s (dict1) \n"
|
||||
% (len(d1), dict1["meta"]["original_filename"])
|
||||
)
|
||||
warnings["Duplicates in dict1"] = list()
|
||||
for keys in d1:
|
||||
S.append("Measurements %s with hkl %s \n" % (d1[keys], keys))
|
||||
warnings["Duplicates in dict1"].append(d1[keys])
|
||||
if not d2:
|
||||
S.append("There are no duplicates in %s (dict2) \n" % dict2["meta"]["original_filename"])
|
||||
else:
|
||||
S.append(
|
||||
"There are %d duplicates in %s (dict2) \n"
|
||||
% (len(d2), dict2["meta"]["original_filename"])
|
||||
)
|
||||
warnings["Duplicates in dict2"] = list()
|
||||
for keys in d2:
|
||||
S.append("Measurements %s with hkl %s \n" % (d2[keys], keys))
|
||||
warnings["Duplicates in dict2"].append(d2[keys])
|
||||
|
||||
# compare meta
|
||||
S.append("Different values in meta: \n")
|
||||
different_meta = {
|
||||
k: dict1["meta"][k]
|
||||
for k in dict1["meta"]
|
||||
if k in dict2["meta"] and dict1["meta"][k] != dict2["meta"][k]
|
||||
}
|
||||
exlude_meta_set = ["original_filename", "date", "title"]
|
||||
for keys in different_meta:
|
||||
if keys in exlude_meta_set:
|
||||
continue
|
||||
else:
|
||||
if keys not in conflicts:
|
||||
conflicts[keys] = 1
|
||||
else:
|
||||
conflicts[keys] = conflicts[keys] + 1
|
||||
|
||||
S.append(" Different values in %s \n" % str(keys))
|
||||
S.append(" dict1: %s \n" % str(dict1["meta"][str(keys)]))
|
||||
S.append(" dict2: %s \n" % str(dict2["meta"][str(keys)]))
|
||||
|
||||
# compare Measurements
|
||||
S.append(
|
||||
"Number of measurements in %s = %s \n"
|
||||
% (dict1["meta"]["original_filename"], len(dict1["scan"]))
|
||||
)
|
||||
S.append(
|
||||
"Number of measurements in %s = %s \n"
|
||||
% (dict2["meta"]["original_filename"], len(dict2["scan"]))
|
||||
)
|
||||
S.append("Different values in Measurements:\n")
|
||||
select_set = ["om", "Counts", "sigma"]
|
||||
exlude_set = ["time", "Counts", "date", "notes"]
|
||||
for keys1 in comp:
|
||||
for key2 in dict1["scan"][str(comp[str(keys1)][0])]:
|
||||
if key2 in exlude_set:
|
||||
continue
|
||||
if key2 not in select_set:
|
||||
try:
|
||||
if (
|
||||
dict1["scan"][comp[str(keys1)][0]][str(key2)]
|
||||
!= dict2["scan"][str(comp[str(keys1)][1])][str(key2)]
|
||||
):
|
||||
S.append(
|
||||
"Scan value "
|
||||
"%s"
|
||||
", with hkl %s differs in meausrements %s and %s \n"
|
||||
% (key2, keys1, comp[str(keys1)][0], comp[str(keys1)][1])
|
||||
)
|
||||
S.append(
|
||||
" dict1: %s \n"
|
||||
% str(dict1["scan"][comp[str(keys1)][0]][str(key2)])
|
||||
)
|
||||
S.append(
|
||||
" dict2: %s \n"
|
||||
% str(dict2["scan"][comp[str(keys1)][1]][str(key2)])
|
||||
)
|
||||
if key2 not in conflicts:
|
||||
conflicts[key2] = {}
|
||||
conflicts[key2]["amount"] = 1
|
||||
conflicts[key2]["scan"] = str(comp[str(keys1)])
|
||||
else:
|
||||
|
||||
conflicts[key2]["amount"] = conflicts[key2]["amount"] + 1
|
||||
conflicts[key2]["scan"] = (
|
||||
conflicts[key2]["scan"] + " " + (str(comp[str(keys1)]))
|
||||
)
|
||||
except KeyError as e:
|
||||
print("Missing keys, some files were probably merged or substracted")
|
||||
print(e.args)
|
||||
|
||||
else:
|
||||
try:
|
||||
comparison = list(dict1["scan"][comp[str(keys1)][0]][str(key2)]) == list(
|
||||
dict2["scan"][comp[str(keys1)][1]][str(key2)]
|
||||
)
|
||||
if len(list(dict1["scan"][comp[str(keys1)][0]][str(key2)])) != len(
|
||||
list(dict2["scan"][comp[str(keys1)][1]][str(key2)])
|
||||
):
|
||||
if str("different length of %s" % key2) not in warnings:
|
||||
warnings[str("different length of %s" % key2)] = list()
|
||||
warnings[str("different length of %s" % key2)].append(
|
||||
(str(comp[keys1][0]), str(comp[keys1][1]))
|
||||
)
|
||||
else:
|
||||
warnings[str("different length of %s" % key2)].append(
|
||||
(str(comp[keys1][0]), str(comp[keys1][1]))
|
||||
)
|
||||
if not comparison:
|
||||
S.append(
|
||||
"Scan value "
|
||||
"%s"
|
||||
" differs in scan %s and %s \n"
|
||||
% (key2, comp[str(keys1)][0], comp[str(keys1)][1])
|
||||
)
|
||||
S.append(
|
||||
" dict1: %s \n"
|
||||
% str(list(dict1["scan"][comp[str(keys1)][0]][str(key2)]))
|
||||
)
|
||||
S.append(
|
||||
" dict2: %s \n"
|
||||
% str(list(dict2["scan"][comp[str(keys1)][1]][str(key2)]))
|
||||
)
|
||||
if key2 not in conflicts:
|
||||
conflicts[key2] = {}
|
||||
conflicts[key2]["amount"] = 1
|
||||
conflicts[key2]["scan"] = str(comp[str(keys1)])
|
||||
else:
|
||||
conflicts[key2]["amount"] = conflicts[key2]["amount"] + 1
|
||||
conflicts[key2]["scan"] = (
|
||||
conflicts[key2]["scan"] + " " + (str(comp[str(keys1)]))
|
||||
)
|
||||
except KeyError as e:
|
||||
print("Missing keys, some files were probably merged or substracted")
|
||||
print(e.args)
|
||||
|
||||
for keys in conflicts:
|
||||
try:
|
||||
conflicts[str(keys)]["scan"] = conflicts[str(keys)]["scan"].split(" ")
|
||||
except:
|
||||
continue
|
||||
report_string = "".join(S)
|
||||
return warnings, conflicts, report_string
|
||||
|
||||
|
||||
def guess_next(dict1, dict2, comp):
|
||||
"""iterates thorough the scans and tries to decide if the scans should be
|
||||
substracted or merged"""
|
||||
threshold = 0.05
|
||||
for keys in comp:
|
||||
if (
|
||||
abs(
|
||||
(
|
||||
dict1["scan"][str(comp[keys][0])]["temperature"]
|
||||
- dict2["scan"][str(comp[keys][1])]["temperature"]
|
||||
)
|
||||
/ dict2["scan"][str(comp[keys][1])]["temperature"]
|
||||
)
|
||||
< threshold
|
||||
and abs(
|
||||
(
|
||||
dict1["scan"][str(comp[keys][0])]["mag_field"]
|
||||
- dict2["scan"][str(comp[keys][1])]["mag_field"]
|
||||
)
|
||||
/ dict2["scan"][str(comp[keys][1])]["mag_field"]
|
||||
)
|
||||
< threshold
|
||||
):
|
||||
comp[keys] = comp[keys] + tuple("m")
|
||||
else:
|
||||
comp[keys] = comp[keys] + tuple("s")
|
||||
|
||||
return comp
|
||||
|
||||
|
||||
def process_dict(dict1, dict2, comp):
|
||||
"""substracts or merges scans, guess_next function must run first """
|
||||
for keys in comp:
|
||||
if comp[keys][2] == "s":
|
||||
substract_measurement(dict1, dict2, comp[keys])
|
||||
elif comp[keys][2] == "m":
|
||||
merge(dict1, dict2, comp[keys])
|
||||
|
||||
return dict1
|
@ -1,75 +0,0 @@
|
||||
import numpy as np
|
||||
import scipy as sc
|
||||
from scipy.interpolate import interp1d
|
||||
from scipy.signal import savgol_filter
|
||||
|
||||
|
||||
def ccl_findpeaks(
|
||||
scan, int_threshold=0.8, prominence=50, smooth=False, window_size=7, poly_order=3
|
||||
):
|
||||
|
||||
"""function iterates through the dictionary created by load_cclv2 and locates peaks for each scan
|
||||
args: scan - a single scan,
|
||||
|
||||
int_threshold - fraction of threshold_intensity/max_intensity, must be positive num between 0 and 1
|
||||
i.e. will only detect peaks above 75% of max intensity
|
||||
|
||||
prominence - defines a drop of values that must be between two peaks, must be positive number
|
||||
i.e. if promimence is 20, it will detect two neigbouring peaks of 300 and 310 intesities,
|
||||
if none of the itermediate values are lower that 290
|
||||
|
||||
smooth - if true, smooths data by savitzky golay filter, if false - no smoothing
|
||||
|
||||
window_size - window size for savgol filter, must be odd positive integer
|
||||
|
||||
poly_order = order of the polynomial used in savgol filter, must be positive integer smaller than
|
||||
window_size returns: dictionary with following structure:
|
||||
D{M34{ 'num_of_peaks': 1, #num of peaks
|
||||
'peak_indexes': [20], # index of peaks in omega array
|
||||
'peak_heights': [90.], # height of the peaks (if data vere smoothed
|
||||
its the heigh of the peaks in smoothed data)
|
||||
"""
|
||||
if not 0 <= int_threshold <= 1:
|
||||
int_threshold = 0.8
|
||||
print(
|
||||
"Invalid value for int_threshold, select value between 0 and 1, new value set to:",
|
||||
int_threshold,
|
||||
)
|
||||
|
||||
if not isinstance(window_size, int) or (window_size % 2) == 0 or window_size <= 1:
|
||||
window_size = 7
|
||||
print(
|
||||
"Invalid value for window_size, select positive odd integer, new value set to!:",
|
||||
window_size,
|
||||
)
|
||||
|
||||
if not isinstance(poly_order, int) or window_size < poly_order:
|
||||
poly_order = 3
|
||||
print(
|
||||
"Invalid value for poly_order, select positive integer smaller than window_size, new value set to:",
|
||||
poly_order,
|
||||
)
|
||||
|
||||
if not isinstance(prominence, (int, float)) and prominence < 0:
|
||||
prominence = 50
|
||||
print("Invalid value for prominence, select positive number, new value set to:", prominence)
|
||||
|
||||
omega = scan["om"]
|
||||
counts = np.array(scan["Counts"])
|
||||
if smooth:
|
||||
itp = interp1d(omega, counts, kind="linear")
|
||||
absintensity = [abs(number) for number in counts]
|
||||
lowest_intensity = min(absintensity)
|
||||
counts[counts < 0] = lowest_intensity
|
||||
smooth_peaks = savgol_filter(itp(omega), window_size, poly_order)
|
||||
|
||||
else:
|
||||
smooth_peaks = counts
|
||||
|
||||
peaks, properties = sc.signal.find_peaks(
|
||||
smooth_peaks, height=int_threshold * max(smooth_peaks), prominence=prominence
|
||||
)
|
||||
scan["num_of_peaks"] = len(peaks)
|
||||
scan["peak_indexes"] = peaks
|
||||
scan["peak_heights"] = properties["peak_heights"]
|
||||
scan["smooth_peaks"] = smooth_peaks # smoothed curve
|
448
pyzebra/ccl_io.py
Normal file
448
pyzebra/ccl_io.py
Normal file
@ -0,0 +1,448 @@
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from ast import literal_eval
|
||||
from collections import defaultdict
|
||||
|
||||
import numpy as np
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
META_VARS_STR = (
|
||||
"instrument",
|
||||
"title",
|
||||
"comment",
|
||||
"user",
|
||||
"proposal_id",
|
||||
"original_filename",
|
||||
"date",
|
||||
"zebra_mode",
|
||||
"zebramode",
|
||||
"sample_name",
|
||||
)
|
||||
|
||||
META_VARS_FLOAT = (
|
||||
"a",
|
||||
"b",
|
||||
"c",
|
||||
"alpha",
|
||||
"beta",
|
||||
"gamma",
|
||||
"omega",
|
||||
"chi",
|
||||
"phi",
|
||||
"temp",
|
||||
"mf",
|
||||
"temperature",
|
||||
"magnetic_field",
|
||||
"cex1",
|
||||
"cex2",
|
||||
"wavelength",
|
||||
"mexz",
|
||||
"moml",
|
||||
"mcvl",
|
||||
"momu",
|
||||
"mcvu",
|
||||
"2-theta",
|
||||
"twotheta",
|
||||
"nu",
|
||||
"gamma_angle",
|
||||
"polar_angle",
|
||||
"tilt_angle",
|
||||
"distance",
|
||||
"distance_an",
|
||||
"snv",
|
||||
"snh",
|
||||
"snvm",
|
||||
"snhm",
|
||||
"s1vt",
|
||||
"s1vb",
|
||||
"s1hr",
|
||||
"s1hl",
|
||||
"s2vt",
|
||||
"s2vb",
|
||||
"s2hr",
|
||||
"s2hl",
|
||||
"a5",
|
||||
"a6",
|
||||
"a4t",
|
||||
"s2ant",
|
||||
"s2anb",
|
||||
"s2anl",
|
||||
"s2anr",
|
||||
)
|
||||
|
||||
META_UB_MATRIX = ("ub1j", "ub2j", "ub3j", "UB")
|
||||
|
||||
CCL_FIRST_LINE = (("idx", int), ("h", float), ("k", float), ("l", float))
|
||||
|
||||
CCL_ANGLES = {
|
||||
"bi": (("twotheta", float), ("omega", float), ("chi", float), ("phi", float)),
|
||||
"nb": (("gamma", float), ("omega", float), ("nu", float), ("skip_angle", float)),
|
||||
}
|
||||
|
||||
CCL_SECOND_LINE = (
|
||||
("n_points", int),
|
||||
("angle_step", float),
|
||||
("monitor", float),
|
||||
("temp", float),
|
||||
("mf", float),
|
||||
("date", str),
|
||||
("time", str),
|
||||
("scan_motor", str),
|
||||
)
|
||||
|
||||
EXPORT_TARGETS = {"fullprof": (".comm", ".incomm"), "jana": (".col", ".incol")}
|
||||
|
||||
|
||||
def load_1D(filepath):
|
||||
"""
|
||||
Loads *.ccl or *.dat file (Distinguishes them based on last 3 chars in string of filepath
|
||||
to add more variables to read, extend the elif list
|
||||
the file must include '#data' and number of points in right place to work properly
|
||||
|
||||
:arg filepath
|
||||
:returns det_variables
|
||||
- dictionary of all detector/scan variables and dictinionary for every scan.
|
||||
Names of these dictionaries are M + scan number. They include HKL indeces, angles,
|
||||
monitors, stepsize and array of counts
|
||||
"""
|
||||
with open(filepath, "r") as infile:
|
||||
_, ext = os.path.splitext(filepath)
|
||||
dataset = parse_1D(infile, data_type=ext)
|
||||
|
||||
return dataset
|
||||
|
||||
|
||||
def parse_1D(fileobj, data_type, log=logger):
|
||||
metadata = {"data_type": data_type}
|
||||
|
||||
# read metadata
|
||||
for line in fileobj:
|
||||
if "#data" in line:
|
||||
# this is the end of metadata and the start of data section
|
||||
break
|
||||
|
||||
if "=" not in line:
|
||||
# skip comments / empty lines
|
||||
continue
|
||||
|
||||
var_name, value = line.split("=", 1)
|
||||
var_name = var_name.strip()
|
||||
value = value.strip()
|
||||
|
||||
if value == "UNKNOWN":
|
||||
metadata[var_name] = None
|
||||
continue
|
||||
|
||||
try:
|
||||
if var_name in META_VARS_STR:
|
||||
if var_name == "zebramode":
|
||||
var_name = "zebra_mode"
|
||||
metadata[var_name] = value
|
||||
|
||||
elif var_name in META_VARS_FLOAT:
|
||||
if var_name == "2-theta": # fix that angle name not to be an expression
|
||||
var_name = "twotheta"
|
||||
if var_name == "temperature":
|
||||
var_name = "temp"
|
||||
if var_name == "magnetic_field":
|
||||
var_name = "mf"
|
||||
if var_name in ("a", "b", "c", "alpha", "beta", "gamma"):
|
||||
var_name += "_cell"
|
||||
metadata[var_name] = float(value)
|
||||
|
||||
elif var_name in META_UB_MATRIX:
|
||||
if var_name == "UB":
|
||||
metadata["ub"] = np.array(literal_eval(value)).reshape(3, 3)
|
||||
else:
|
||||
if "ub" not in metadata:
|
||||
metadata["ub"] = np.zeros((3, 3))
|
||||
row = int(var_name[-2]) - 1
|
||||
metadata["ub"][row, :] = list(map(float, value.split()))
|
||||
|
||||
except Exception:
|
||||
log.error(f"Error reading {var_name} with value '{value}'")
|
||||
metadata[var_name] = 0
|
||||
|
||||
# handle older files that don't contain "zebra_mode" metadata
|
||||
if "zebra_mode" not in metadata:
|
||||
metadata["zebra_mode"] = "nb"
|
||||
|
||||
# read data
|
||||
dataset = []
|
||||
if data_type == ".ccl":
|
||||
ccl_first_line = CCL_FIRST_LINE + CCL_ANGLES[metadata["zebra_mode"]]
|
||||
ccl_second_line = CCL_SECOND_LINE
|
||||
|
||||
for line in fileobj:
|
||||
# skip empty/whitespace lines before start of any scan
|
||||
if not line or line.isspace():
|
||||
continue
|
||||
|
||||
scan = {}
|
||||
scan["export"] = True
|
||||
|
||||
# first line
|
||||
for param, (param_name, param_type) in zip(line.split(), ccl_first_line):
|
||||
scan[param_name] = param_type(param)
|
||||
|
||||
# rename 0 index scan to 1
|
||||
if scan["idx"] == 0:
|
||||
scan["idx"] = 1
|
||||
|
||||
# second line
|
||||
next_line = next(fileobj)
|
||||
for param, (param_name, param_type) in zip(next_line.split(), ccl_second_line):
|
||||
scan[param_name] = param_type(param)
|
||||
|
||||
if "scan_motor" not in scan:
|
||||
scan["scan_motor"] = "om"
|
||||
|
||||
if scan["scan_motor"] == "o2t":
|
||||
scan["scan_motor"] = "om"
|
||||
|
||||
if scan["scan_motor"] != "om":
|
||||
raise Exception("Unsupported variable name in ccl file.")
|
||||
|
||||
# "om" -> "omega"
|
||||
scan["scan_motor"] = "omega"
|
||||
scan["scan_motors"] = ["omega"]
|
||||
# overwrite metadata, because it only refers to the scan center
|
||||
half_dist = (scan["n_points"] - 1) / 2 * scan["angle_step"]
|
||||
scan["omega"] = np.linspace(
|
||||
scan["omega"] - half_dist, scan["omega"] + half_dist, scan["n_points"]
|
||||
)
|
||||
|
||||
# subsequent lines with counts
|
||||
counts = []
|
||||
while len(counts) < scan["n_points"]:
|
||||
counts.extend(map(float, next(fileobj).split()))
|
||||
scan["counts"] = np.array(counts)
|
||||
scan["counts_err"] = np.sqrt(np.maximum(scan["counts"], 1))
|
||||
|
||||
if scan["h"].is_integer() and scan["k"].is_integer() and scan["l"].is_integer():
|
||||
scan["h"], scan["k"], scan["l"] = map(int, (scan["h"], scan["k"], scan["l"]))
|
||||
|
||||
dataset.append({**metadata, **scan})
|
||||
|
||||
elif data_type == ".dat":
|
||||
if metadata["zebra_mode"] == "nb":
|
||||
if "gamma_angle" in metadata:
|
||||
# support for the new format
|
||||
metadata["gamma"] = metadata["gamma_angle"]
|
||||
else:
|
||||
metadata["gamma"] = metadata["twotheta"]
|
||||
|
||||
scan = defaultdict(list)
|
||||
scan["export"] = True
|
||||
|
||||
match = re.search("Scanning Variables: (.*), Steps: (.*)", next(fileobj))
|
||||
motors = [motor.strip().lower() for motor in match.group(1).split(",")]
|
||||
# Steps can be separated by " " or ", "
|
||||
steps = [float(step.strip(",")) for step in match.group(2).split()]
|
||||
|
||||
match = re.search("(.*) Points, Mode: (.*), Preset (.*)", next(fileobj))
|
||||
if match.group(2) != "Monitor":
|
||||
raise Exception("Unknown mode in dat file.")
|
||||
scan["n_points"] = int(match.group(1))
|
||||
scan["monitor"] = float(match.group(3))
|
||||
|
||||
col_names = list(map(str.lower, next(fileobj).split()))
|
||||
|
||||
for line in fileobj:
|
||||
if "END-OF-DATA" in line:
|
||||
# this is the end of data
|
||||
break
|
||||
|
||||
for name, val in zip(col_names, line.split()):
|
||||
scan[name].append(float(val))
|
||||
|
||||
for name in col_names:
|
||||
scan[name] = np.array(scan[name])
|
||||
|
||||
scan["counts_err"] = np.sqrt(np.maximum(scan["counts"], 1))
|
||||
|
||||
scan["scan_motors"] = []
|
||||
for motor, step in zip(motors, steps):
|
||||
if step == 0:
|
||||
# it's not a scan motor, so keep only the median value
|
||||
scan[motor] = np.median(scan[motor])
|
||||
else:
|
||||
scan["scan_motors"].append(motor)
|
||||
|
||||
# "om" -> "omega"
|
||||
if "om" in scan["scan_motors"]:
|
||||
scan["scan_motors"][scan["scan_motors"].index("om")] = "omega"
|
||||
scan["omega"] = scan["om"]
|
||||
del scan["om"]
|
||||
|
||||
# "tt" -> "temp"
|
||||
if "tt" in scan["scan_motors"]:
|
||||
scan["scan_motors"][scan["scan_motors"].index("tt")] = "temp"
|
||||
scan["temp"] = scan["tt"]
|
||||
del scan["tt"]
|
||||
|
||||
# "mf" stays "mf"
|
||||
# "phi" stays "phi"
|
||||
|
||||
scan["scan_motor"] = scan["scan_motors"][0]
|
||||
|
||||
if "h" not in scan:
|
||||
scan["h"] = scan["k"] = scan["l"] = float("nan")
|
||||
|
||||
for param in ("mf", "temp"):
|
||||
if param not in metadata:
|
||||
scan[param] = 0
|
||||
|
||||
scan["idx"] = 1
|
||||
|
||||
dataset.append({**metadata, **scan})
|
||||
|
||||
else:
|
||||
log.error("Unknown file extention")
|
||||
|
||||
return dataset
|
||||
|
||||
|
||||
def export_1D(dataset, path, export_target, hkl_precision=2):
|
||||
"""Exports data in the .comm/.incomm format for fullprof or .col/.incol format for jana.
|
||||
|
||||
Scans with integer/real hkl values are saved in .comm/.incomm or .col/.incol files
|
||||
correspondingly. If no scans are present for a particular output format, that file won't be
|
||||
created.
|
||||
"""
|
||||
if export_target not in EXPORT_TARGETS:
|
||||
raise ValueError(f"Unknown export target: {export_target}.")
|
||||
|
||||
zebra_mode = dataset[0]["zebra_mode"]
|
||||
exts = EXPORT_TARGETS[export_target]
|
||||
file_content = {ext: [] for ext in exts}
|
||||
|
||||
for scan in dataset:
|
||||
if "fit" not in scan:
|
||||
continue
|
||||
|
||||
idx_str = f"{scan['idx']:6}"
|
||||
|
||||
h, k, l = scan["h"], scan["k"], scan["l"]
|
||||
hkl_are_integers = isinstance(h, int) # if True, other indices are of type 'int' too
|
||||
if hkl_are_integers:
|
||||
hkl_str = f"{h:4}{k:4}{l:4}"
|
||||
else:
|
||||
hkl_str = f"{h:8.{hkl_precision}f}{k:8.{hkl_precision}f}{l:8.{hkl_precision}f}"
|
||||
|
||||
area_n, area_s = scan["area"]
|
||||
area_str = f"{area_n:10.2f}{area_s:10.2f}"
|
||||
|
||||
ang_str = ""
|
||||
for angle, _ in CCL_ANGLES[zebra_mode]:
|
||||
if angle == scan["scan_motor"]:
|
||||
angle_center = (np.min(scan[angle]) + np.max(scan[angle])) / 2
|
||||
else:
|
||||
angle_center = scan[angle]
|
||||
|
||||
if angle == "twotheta" and export_target == "jana":
|
||||
angle_center /= 2
|
||||
|
||||
ang_str = ang_str + f"{angle_center:8g}"
|
||||
|
||||
if export_target == "jana":
|
||||
ang_str = ang_str + f"{scan['temp']:8}" + f"{scan['monitor']:8}"
|
||||
|
||||
ref = file_content[exts[0]] if hkl_are_integers else file_content[exts[1]]
|
||||
ref.append(idx_str + hkl_str + area_str + ang_str + "\n")
|
||||
|
||||
for ext, content in file_content.items():
|
||||
if content:
|
||||
with open(path + ext, "w") as out_file:
|
||||
out_file.writelines(content)
|
||||
|
||||
|
||||
def export_ccl_compare(dataset1, dataset2, path, export_target, hkl_precision=2):
|
||||
"""Exports compare data in the .comm/.incomm format for fullprof or .col/.incol format for jana.
|
||||
|
||||
Scans with integer/real hkl values are saved in .comm/.incomm or .col/.incol files
|
||||
correspondingly. If no scans are present for a particular output format, that file won't be
|
||||
created.
|
||||
"""
|
||||
if export_target not in EXPORT_TARGETS:
|
||||
raise ValueError(f"Unknown export target: {export_target}.")
|
||||
|
||||
zebra_mode = dataset1[0]["zebra_mode"]
|
||||
exts = EXPORT_TARGETS[export_target]
|
||||
file_content = {ext: [] for ext in exts}
|
||||
|
||||
for scan1, scan2 in zip(dataset1, dataset2):
|
||||
if "fit" not in scan1:
|
||||
continue
|
||||
|
||||
idx_str = f"{scan1['idx']:6}"
|
||||
|
||||
h, k, l = scan1["h"], scan1["k"], scan1["l"]
|
||||
hkl_are_integers = isinstance(h, int) # if True, other indices are of type 'int' too
|
||||
if hkl_are_integers:
|
||||
hkl_str = f"{h:4}{k:4}{l:4}"
|
||||
else:
|
||||
hkl_str = f"{h:8.{hkl_precision}f}{k:8.{hkl_precision}f}{l:8.{hkl_precision}f}"
|
||||
|
||||
area_n1, area_s1 = scan1["area"]
|
||||
area_n2, area_s2 = scan2["area"]
|
||||
area_n = area_n1 - area_n2
|
||||
area_s = np.sqrt(area_s1**2 + area_s2**2)
|
||||
area_str = f"{area_n:10.2f}{area_s:10.2f}"
|
||||
|
||||
ang_str = ""
|
||||
for angle, _ in CCL_ANGLES[zebra_mode]:
|
||||
if angle == scan1["scan_motor"]:
|
||||
angle_center = (np.min(scan1[angle]) + np.max(scan1[angle])) / 2
|
||||
else:
|
||||
angle_center = scan1[angle]
|
||||
|
||||
if angle == "twotheta" and export_target == "jana":
|
||||
angle_center /= 2
|
||||
|
||||
ang_str = ang_str + f"{angle_center:8g}"
|
||||
|
||||
if export_target == "jana":
|
||||
ang_str = ang_str + f"{scan1['temp']:8}" + f"{scan1['monitor']:8}"
|
||||
|
||||
ref = file_content[exts[0]] if hkl_are_integers else file_content[exts[1]]
|
||||
ref.append(idx_str + hkl_str + area_str + ang_str + "\n")
|
||||
|
||||
for ext, content in file_content.items():
|
||||
if content:
|
||||
with open(path + ext, "w") as out_file:
|
||||
out_file.writelines(content)
|
||||
|
||||
|
||||
def export_param_study(dataset, param_data, path):
|
||||
file_content = []
|
||||
for scan, param in zip(dataset, param_data):
|
||||
if "fit" not in scan:
|
||||
continue
|
||||
|
||||
if not file_content:
|
||||
title_str = f"{'param':12}"
|
||||
for fit_param_name in scan["fit"].params:
|
||||
title_str = title_str + f"{fit_param_name:20}" + f"{'std_' + fit_param_name:20}"
|
||||
title_str = title_str + "file"
|
||||
file_content.append(title_str + "\n")
|
||||
|
||||
param_str = f"{param:<12.2f}"
|
||||
|
||||
fit_str = ""
|
||||
for fit_param in scan["fit"].params.values():
|
||||
fit_param_val = fit_param.value
|
||||
fit_param_std = fit_param.stderr
|
||||
if fit_param_std is None:
|
||||
fit_param_std = 0
|
||||
fit_str = fit_str + f"{fit_param_val:<20.2f}" + f"{fit_param_std:<20.2f}"
|
||||
|
||||
_, fname_str = os.path.split(scan["original_filename"])
|
||||
|
||||
file_content.append(param_str + fit_str + fname_str + "\n")
|
||||
|
||||
if file_content:
|
||||
with open(path, "w") as out_file:
|
||||
out_file.writelines(file_content)
|
341
pyzebra/ccl_process.py
Normal file
341
pyzebra/ccl_process.py
Normal file
@ -0,0 +1,341 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
from lmfit.models import GaussianModel, LinearModel, PseudoVoigtModel, VoigtModel
|
||||
from scipy.integrate import simpson, trapezoid
|
||||
|
||||
from pyzebra import CCL_ANGLES
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
PARAM_PRECISIONS = {
|
||||
"twotheta": 0.1,
|
||||
"chi": 0.1,
|
||||
"nu": 0.1,
|
||||
"phi": 0.05,
|
||||
"omega": 0.05,
|
||||
"gamma": 0.05,
|
||||
"temp": 1,
|
||||
"mf": 0.001,
|
||||
"ub": 0.01,
|
||||
}
|
||||
|
||||
MAX_RANGE_GAP = {"omega": 0.5}
|
||||
|
||||
MOTOR_POS_PRECISION = 0.01
|
||||
|
||||
AREA_METHODS = ("fit_area", "int_area")
|
||||
|
||||
|
||||
def normalize_dataset(dataset, monitor=100_000):
|
||||
for scan in dataset:
|
||||
monitor_ratio = monitor / scan["monitor"]
|
||||
scan["counts"] *= monitor_ratio
|
||||
scan["counts_err"] *= monitor_ratio
|
||||
scan["monitor"] = monitor
|
||||
|
||||
|
||||
def merge_duplicates(dataset, log=logger):
|
||||
merged = np.zeros(len(dataset), dtype=bool)
|
||||
for ind_into, scan_into in enumerate(dataset):
|
||||
for ind_from, scan_from in enumerate(dataset[ind_into + 1 :], start=ind_into + 1):
|
||||
if _parameters_match(scan_into, scan_from) and not merged[ind_from]:
|
||||
merge_scans(scan_into, scan_from, log=log)
|
||||
merged[ind_from] = True
|
||||
|
||||
|
||||
def _parameters_match(scan1, scan2):
|
||||
zebra_mode = scan1["zebra_mode"]
|
||||
if zebra_mode != scan2["zebra_mode"]:
|
||||
return False
|
||||
|
||||
for param in ("ub", *(vars[0] for vars in CCL_ANGLES[zebra_mode])):
|
||||
if param.startswith("skip"):
|
||||
# ignore skip parameters, like the last angle in 'nb' zebra mode
|
||||
continue
|
||||
|
||||
if param == scan1["scan_motor"] == scan2["scan_motor"]:
|
||||
# check if ranges of variable parameter overlap
|
||||
r1_start, r1_end = scan1[param][0], scan1[param][-1]
|
||||
r2_start, r2_end = scan2[param][0], scan2[param][-1]
|
||||
# support reversed ranges
|
||||
if r1_start > r1_end:
|
||||
r1_start, r1_end = r1_end, r1_start
|
||||
if r2_start > r2_end:
|
||||
r2_start, r2_end = r2_end, r2_start
|
||||
# maximum gap between ranges of the scanning parameter (default 0)
|
||||
max_range_gap = MAX_RANGE_GAP.get(param, 0)
|
||||
if max(r1_start - r2_end, r2_start - r1_end) > max_range_gap:
|
||||
return False
|
||||
|
||||
elif (
|
||||
np.max(np.abs(np.median(scan1[param]) - np.median(scan2[param])))
|
||||
> PARAM_PRECISIONS[param]
|
||||
):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def merge_datasets(dataset_into, dataset_from, log=logger):
|
||||
scan_motors_into = dataset_into[0]["scan_motors"]
|
||||
scan_motors_from = dataset_from[0]["scan_motors"]
|
||||
if scan_motors_into != scan_motors_from:
|
||||
log.warning(
|
||||
f"Scan motors mismatch between datasets: {scan_motors_into} vs {scan_motors_from}"
|
||||
)
|
||||
return
|
||||
|
||||
merged = np.zeros(len(dataset_from), dtype=bool)
|
||||
for scan_into in dataset_into:
|
||||
for ind, scan_from in enumerate(dataset_from):
|
||||
if _parameters_match(scan_into, scan_from) and not merged[ind]:
|
||||
if scan_into["counts"].ndim == 3:
|
||||
merge_h5_scans(scan_into, scan_from, log=log)
|
||||
else: # scan_into["counts"].ndim == 1
|
||||
merge_scans(scan_into, scan_from, log=log)
|
||||
merged[ind] = True
|
||||
|
||||
for scan_from in dataset_from:
|
||||
dataset_into.append(scan_from)
|
||||
|
||||
|
||||
def merge_scans(scan_into, scan_from, log=logger):
|
||||
if "init_scan" not in scan_into:
|
||||
scan_into["init_scan"] = scan_into.copy()
|
||||
|
||||
if "merged_scans" not in scan_into:
|
||||
scan_into["merged_scans"] = []
|
||||
|
||||
if scan_from in scan_into["merged_scans"]:
|
||||
return
|
||||
|
||||
scan_into["merged_scans"].append(scan_from)
|
||||
|
||||
scan_motor = scan_into["scan_motor"] # the same as scan_from["scan_motor"]
|
||||
|
||||
pos_all = np.array([])
|
||||
val_all = np.array([])
|
||||
err_all = np.array([])
|
||||
for scan in [scan_into["init_scan"], *scan_into["merged_scans"]]:
|
||||
pos_all = np.append(pos_all, scan[scan_motor])
|
||||
val_all = np.append(val_all, scan["counts"])
|
||||
err_all = np.append(err_all, scan["counts_err"] ** 2)
|
||||
|
||||
sort_index = np.argsort(pos_all)
|
||||
pos_all = pos_all[sort_index]
|
||||
val_all = val_all[sort_index]
|
||||
err_all = err_all[sort_index]
|
||||
|
||||
pos_tmp = pos_all[:1]
|
||||
val_tmp = val_all[:1]
|
||||
err_tmp = err_all[:1]
|
||||
num_tmp = np.array([1])
|
||||
for pos, val, err in zip(pos_all[1:], val_all[1:], err_all[1:]):
|
||||
if pos - pos_tmp[-1] < MOTOR_POS_PRECISION:
|
||||
# the repeated motor position
|
||||
val_tmp[-1] += val
|
||||
err_tmp[-1] += err
|
||||
num_tmp[-1] += 1
|
||||
else:
|
||||
# a new motor position
|
||||
pos_tmp = np.append(pos_tmp, pos)
|
||||
val_tmp = np.append(val_tmp, val)
|
||||
err_tmp = np.append(err_tmp, err)
|
||||
num_tmp = np.append(num_tmp, 1)
|
||||
|
||||
scan_into[scan_motor] = pos_tmp
|
||||
scan_into["counts"] = val_tmp / num_tmp
|
||||
scan_into["counts_err"] = np.sqrt(err_tmp) / num_tmp
|
||||
|
||||
scan_from["export"] = False
|
||||
|
||||
fname1 = os.path.basename(scan_into["original_filename"])
|
||||
fname2 = os.path.basename(scan_from["original_filename"])
|
||||
log.info(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})')
|
||||
|
||||
|
||||
def merge_h5_scans(scan_into, scan_from, log=logger):
|
||||
if "init_scan" not in scan_into:
|
||||
scan_into["init_scan"] = scan_into.copy()
|
||||
|
||||
if "merged_scans" not in scan_into:
|
||||
scan_into["merged_scans"] = []
|
||||
|
||||
for scan in scan_into["merged_scans"]:
|
||||
if scan_from is scan:
|
||||
log.warning("Already merged scan")
|
||||
return
|
||||
|
||||
scan_into["merged_scans"].append(scan_from)
|
||||
|
||||
scan_motor = scan_into["scan_motor"] # the same as scan_from["scan_motor"]
|
||||
|
||||
pos_all = [scan_into["init_scan"][scan_motor]]
|
||||
val_all = [scan_into["init_scan"]["counts"]]
|
||||
err_all = [scan_into["init_scan"]["counts_err"] ** 2]
|
||||
for scan in scan_into["merged_scans"]:
|
||||
pos_all.append(scan[scan_motor])
|
||||
val_all.append(scan["counts"])
|
||||
err_all.append(scan["counts_err"] ** 2)
|
||||
pos_all = np.concatenate(pos_all)
|
||||
val_all = np.concatenate(val_all)
|
||||
err_all = np.concatenate(err_all)
|
||||
|
||||
sort_index = np.argsort(pos_all)
|
||||
pos_all = pos_all[sort_index]
|
||||
val_all = val_all[sort_index]
|
||||
err_all = err_all[sort_index]
|
||||
|
||||
pos_tmp = [pos_all[0]]
|
||||
val_tmp = [val_all[:1]]
|
||||
err_tmp = [err_all[:1]]
|
||||
num_tmp = [1]
|
||||
for pos, val, err in zip(pos_all[1:], val_all[1:], err_all[1:]):
|
||||
if pos - pos_tmp[-1] < MOTOR_POS_PRECISION:
|
||||
# the repeated motor position
|
||||
val_tmp[-1] += val
|
||||
err_tmp[-1] += err
|
||||
num_tmp[-1] += 1
|
||||
else:
|
||||
# a new motor position
|
||||
pos_tmp.append(pos)
|
||||
val_tmp.append(val[None, :])
|
||||
err_tmp.append(err[None, :])
|
||||
num_tmp.append(1)
|
||||
pos_tmp = np.array(pos_tmp)
|
||||
val_tmp = np.concatenate(val_tmp)
|
||||
err_tmp = np.concatenate(err_tmp)
|
||||
num_tmp = np.array(num_tmp)
|
||||
|
||||
scan_into[scan_motor] = pos_tmp
|
||||
scan_into["counts"] = val_tmp / num_tmp[:, None, None]
|
||||
scan_into["counts_err"] = np.sqrt(err_tmp) / num_tmp[:, None, None]
|
||||
|
||||
scan_from["export"] = False
|
||||
|
||||
fname1 = os.path.basename(scan_into["original_filename"])
|
||||
fname2 = os.path.basename(scan_from["original_filename"])
|
||||
log.info(f'Merging scans: {scan_into["idx"]} ({fname1}) <-- {scan_from["idx"]} ({fname2})')
|
||||
|
||||
|
||||
def restore_scan(scan):
|
||||
if "merged_scans" in scan:
|
||||
for merged_scan in scan["merged_scans"]:
|
||||
merged_scan["export"] = True
|
||||
|
||||
if "init_scan" in scan:
|
||||
tmp = scan["init_scan"]
|
||||
scan.clear()
|
||||
scan.update(tmp)
|
||||
# force scan export to True, otherwise in the sequence of incorrectly merged scans
|
||||
# a <- b <- c the scan b will be restored with scan["export"] = False if restoring executed
|
||||
# in the same order, i.e. restore a -> restore b
|
||||
scan["export"] = True
|
||||
|
||||
|
||||
def fit_scan(scan, model_dict, fit_from=None, fit_to=None, log=logger):
|
||||
if fit_from is None:
|
||||
fit_from = -np.inf
|
||||
if fit_to is None:
|
||||
fit_to = np.inf
|
||||
|
||||
y_fit = scan["counts"]
|
||||
y_err = scan["counts_err"]
|
||||
x_fit = scan[scan["scan_motor"]]
|
||||
|
||||
# apply fitting range
|
||||
fit_ind = (fit_from <= x_fit) & (x_fit <= fit_to)
|
||||
if not np.any(fit_ind):
|
||||
log.warning(f"No data in fit range for scan {scan['idx']}")
|
||||
return
|
||||
|
||||
y_fit = y_fit[fit_ind]
|
||||
y_err = y_err[fit_ind]
|
||||
x_fit = x_fit[fit_ind]
|
||||
|
||||
model = None
|
||||
for model_index, (model_name, model_param) in enumerate(model_dict.items()):
|
||||
model_name, _ = model_name.split("-")
|
||||
prefix = f"f{model_index}_"
|
||||
|
||||
if model_name == "linear":
|
||||
_model = LinearModel(prefix=prefix)
|
||||
elif model_name == "gaussian":
|
||||
_model = GaussianModel(prefix=prefix)
|
||||
elif model_name == "voigt":
|
||||
_model = VoigtModel(prefix=prefix)
|
||||
elif model_name == "pvoigt":
|
||||
_model = PseudoVoigtModel(prefix=prefix)
|
||||
else:
|
||||
raise ValueError(f"Unknown model name: '{model_name}'")
|
||||
|
||||
_init_guess = _model.guess(y_fit, x=x_fit)
|
||||
|
||||
for param_index, param_name in enumerate(model_param["param"]):
|
||||
param_hints = {}
|
||||
for hint_name in ("value", "vary", "min", "max"):
|
||||
tmp = model_param[hint_name][param_index]
|
||||
if tmp is None:
|
||||
param_hints[hint_name] = getattr(_init_guess[prefix + param_name], hint_name)
|
||||
else:
|
||||
param_hints[hint_name] = tmp
|
||||
|
||||
if "center" in param_name:
|
||||
if np.isneginf(param_hints["min"]):
|
||||
param_hints["min"] = np.min(x_fit)
|
||||
|
||||
if np.isposinf(param_hints["max"]):
|
||||
param_hints["max"] = np.max(x_fit)
|
||||
|
||||
if "sigma" in param_name:
|
||||
if np.isposinf(param_hints["max"]):
|
||||
param_hints["max"] = np.max(x_fit) - np.min(x_fit)
|
||||
|
||||
_model.set_param_hint(param_name, **param_hints)
|
||||
|
||||
if model is None:
|
||||
model = _model
|
||||
else:
|
||||
model += _model
|
||||
|
||||
scan["fit"] = model.fit(y_fit, x=x_fit, weights=1 / y_err)
|
||||
|
||||
|
||||
def get_area(scan, area_method, lorentz):
|
||||
if "fit" not in scan:
|
||||
return
|
||||
|
||||
if area_method not in AREA_METHODS:
|
||||
raise ValueError(f"Unknown area method: {area_method}.")
|
||||
|
||||
if area_method == "fit_area":
|
||||
area_v = 0
|
||||
area_s = 0
|
||||
for name, param in scan["fit"].params.items():
|
||||
if "amplitude" in name:
|
||||
area_v += np.nan if param.value is None else param.value
|
||||
area_s += np.nan if param.stderr is None else param.stderr
|
||||
|
||||
else: # area_method == "int_area"
|
||||
y_val = scan["counts"]
|
||||
x_val = scan[scan["scan_motor"]]
|
||||
y_bkg = scan["fit"].eval_components(x=x_val)["f0_"]
|
||||
area_v = simpson(y_val, x=x_val) - trapezoid(y_bkg, x=x_val)
|
||||
area_s = np.sqrt(area_v)
|
||||
|
||||
if lorentz:
|
||||
# lorentz correction to area
|
||||
if scan["zebra_mode"] == "bi":
|
||||
twotheta = np.deg2rad(scan["twotheta"])
|
||||
corr_factor = np.sin(twotheta)
|
||||
else: # zebra_mode == "nb":
|
||||
gamma = np.deg2rad(scan["gamma"])
|
||||
nu = np.deg2rad(scan["nu"])
|
||||
corr_factor = np.sin(gamma) * np.cos(nu)
|
||||
|
||||
area_v = np.abs(area_v * corr_factor)
|
||||
area_s = np.abs(area_s * corr_factor)
|
||||
|
||||
scan["area"] = (area_v, area_s)
|
@ -1,61 +0,0 @@
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
|
||||
from bokeh.application.application import Application
|
||||
from bokeh.application.handlers import ScriptHandler
|
||||
from bokeh.server.server import Server
|
||||
|
||||
logging.basicConfig(format="%(asctime)s %(message)s", level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def main():
|
||||
"""The pyzebra command line interface.
|
||||
|
||||
This is a wrapper around a bokeh server that provides an interface to launch the application,
|
||||
bundled with the pyzebra package.
|
||||
"""
|
||||
app_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "app", "app.py")
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
prog="pyzebra", formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--port", type=int, default=5006, help="port to listen on for HTTP requests"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--allow-websocket-origin",
|
||||
metavar="HOST[:PORT]",
|
||||
type=str,
|
||||
action="append",
|
||||
default=None,
|
||||
help="hostname that can connect to the server websocket",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--args",
|
||||
nargs=argparse.REMAINDER,
|
||||
default=[],
|
||||
help="command line arguments for the pyzebra application",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logger.info(app_path)
|
||||
|
||||
handler = ScriptHandler(filename=app_path, argv=args.args)
|
||||
server = Server(
|
||||
{"/": Application(handler)},
|
||||
port=args.port,
|
||||
allow_websocket_origin=args.allow_websocket_origin,
|
||||
)
|
||||
|
||||
server.start()
|
||||
server.io_loop.start()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,80 +0,0 @@
|
||||
import numpy as np
|
||||
|
||||
|
||||
def correction(value, lorentz=True, zebra_mode="--", ang1=0, ang2=0):
|
||||
if lorentz is False:
|
||||
return value
|
||||
else:
|
||||
if zebra_mode == "bi":
|
||||
corr_value = np.abs(value * np.sin(ang1))
|
||||
return corr_value
|
||||
elif zebra_mode == "nb":
|
||||
corr_value = np.abs(value * np.sin(ang1) * np.cos(ang2))
|
||||
return corr_value
|
||||
|
||||
|
||||
def export_comm(data, path, lorentz=False):
|
||||
"""exports data in the *.comm format
|
||||
:param lorentz: perform Lorentz correction
|
||||
:param path: path to file + name
|
||||
:arg data - data to export, is dict after peak fitting
|
||||
|
||||
"""
|
||||
zebra_mode = data["meta"]["zebra_mode"]
|
||||
align = ">"
|
||||
if data["meta"]["indices"] == "hkl":
|
||||
extension = ".comm"
|
||||
padding = [6, 4, 10, 8]
|
||||
elif data["meta"]["indices"] == "real":
|
||||
extension = ".incomm"
|
||||
padding = [4, 6, 10, 8]
|
||||
|
||||
with open(str(path + extension), "w") as out_file:
|
||||
for key, scan in data["scan"].items():
|
||||
if "fit" not in scan:
|
||||
print("Scan skipped - no fit value for:", key)
|
||||
continue
|
||||
scan_number_str = f"{key:{align}{padding[0]}}"
|
||||
h_str = f'{int(scan["h_index"]):{padding[1]}}'
|
||||
k_str = f'{int(scan["k_index"]):{padding[1]}}'
|
||||
l_str = f'{int(scan["l_index"]):{padding[1]}}'
|
||||
if data["meta"]["area_method"] == "fit":
|
||||
area = float(scan["fit"]["fit_area"].n)
|
||||
sigma_str = (
|
||||
f'{"{:8.2f}".format(float(scan["fit"]["fit_area"].s)):{align}{padding[2]}}'
|
||||
)
|
||||
elif data["meta"]["area_method"] == "integ":
|
||||
area = float(scan["fit"]["int_area"].n)
|
||||
sigma_str = (
|
||||
f'{"{:8.2f}".format(float(scan["fit"]["int_area"].s)):{align}{padding[2]}}'
|
||||
)
|
||||
|
||||
if zebra_mode == "bi":
|
||||
area = correction(area, lorentz, zebra_mode, scan["twotheta_angle"])
|
||||
int_str = f'{"{:8.2f}".format(area):{align}{padding[2]}}'
|
||||
angle_str1 = f'{scan["twotheta_angle"]:{padding[3]}}'
|
||||
angle_str2 = f'{scan["omega_angle"]:{padding[3]}}'
|
||||
angle_str3 = f'{scan["chi_angle"]:{padding[3]}}'
|
||||
angle_str4 = f'{scan["phi_angle"]:{padding[3]}}'
|
||||
elif zebra_mode == "nb":
|
||||
area = correction(area, lorentz, zebra_mode, scan["gamma_angle"], scan["nu_angle"])
|
||||
int_str = f'{"{:8.2f}".format(area):{align}{padding[2]}}'
|
||||
angle_str1 = f'{scan["gamma_angle"]:{padding[3]}}'
|
||||
angle_str2 = f'{scan["omega_angle"]:{padding[3]}}'
|
||||
angle_str3 = f'{scan["nu_angle"]:{padding[3]}}'
|
||||
angle_str4 = f'{scan["unkwn_angle"]:{padding[3]}}'
|
||||
|
||||
line = (
|
||||
scan_number_str
|
||||
+ h_str
|
||||
+ k_str
|
||||
+ l_str
|
||||
+ int_str
|
||||
+ sigma_str
|
||||
+ angle_str1
|
||||
+ angle_str2
|
||||
+ angle_str3
|
||||
+ angle_str4
|
||||
+ "\n"
|
||||
)
|
||||
out_file.write(line)
|
227
pyzebra/fit2.py
227
pyzebra/fit2.py
@ -1,227 +0,0 @@
|
||||
import numpy as np
|
||||
import uncertainties as u
|
||||
from lmfit import Model, Parameters
|
||||
from scipy.integrate import simps
|
||||
|
||||
|
||||
def bin_data(array, binsize):
|
||||
if isinstance(binsize, int) and 0 < binsize < len(array):
|
||||
return [
|
||||
np.mean(array[binsize * i : binsize * i + binsize])
|
||||
for i in range(int(np.ceil(len(array) / binsize)))
|
||||
]
|
||||
else:
|
||||
print("Binsize need to be positive integer smaller than lenght of array")
|
||||
return array
|
||||
|
||||
|
||||
def find_nearest(array, value):
|
||||
# find nearest value and return index
|
||||
array = np.asarray(array)
|
||||
idx = (np.abs(array - value)).argmin()
|
||||
return idx
|
||||
|
||||
|
||||
def create_uncertanities(y, y_err):
|
||||
# create array with uncertanities for error propagation
|
||||
combined = np.array([])
|
||||
for i in range(len(y)):
|
||||
part = u.ufloat(y[i], y_err[i])
|
||||
combined = np.append(combined, part)
|
||||
return combined
|
||||
|
||||
|
||||
def fitccl(
|
||||
scan,
|
||||
guess,
|
||||
vary,
|
||||
constraints_min,
|
||||
constraints_max,
|
||||
numfit_min=None,
|
||||
numfit_max=None,
|
||||
binning=None,
|
||||
):
|
||||
"""Made for fitting of ccl date where 1 peak is expected. Allows for combination of gaussian and linear model combination
|
||||
:param scan: scan in the data dict (i.e. M123)
|
||||
:param guess: initial guess for the fitting, if none, some values are added automatically in order (see below)
|
||||
:param vary: True if parameter can vary during fitting, False if it to be fixed
|
||||
:param numfit_min: minimal value on x axis for numerical integration - if none is centre of gaussian minus 3 sigma
|
||||
:param numfit_max: maximal value on x axis for numerical integration - if none is centre of gaussian plus 3 sigma
|
||||
:param constraints_min: min constranits value for fit
|
||||
:param constraints_max: max constranits value for fit
|
||||
:param binning : binning of the data
|
||||
:return data dict with additional values
|
||||
order for guess, vary, constraints_min, constraints_max:
|
||||
[Gaussian centre, Gaussian sigma, Gaussian amplitude, background slope, background intercept]
|
||||
examples:
|
||||
guess = [None, None, 100, 0, None]
|
||||
vary = [True, True, True, True, True]
|
||||
constraints_min = [23, None, 50, 0, 0]
|
||||
constraints_min = [80, None, 1000, 0, 100]
|
||||
"""
|
||||
if len(scan["peak_indexes"]) > 1:
|
||||
# return in case of more than 1 peaks
|
||||
print("More than 1 peak, scan skipped")
|
||||
return
|
||||
if binning is None or binning == 0 or binning == 1:
|
||||
x = list(scan["om"])
|
||||
y = list(scan["Counts"])
|
||||
y_err = list(np.sqrt(y)) if scan.get("sigma", None) is None else list(scan["sigma"])
|
||||
print(scan["peak_indexes"])
|
||||
if not scan["peak_indexes"]:
|
||||
centre = np.mean(x)
|
||||
else:
|
||||
centre = x[int(scan["peak_indexes"])]
|
||||
else:
|
||||
x = list(scan["om"])
|
||||
if not scan["peak_indexes"]:
|
||||
centre = np.mean(x)
|
||||
else:
|
||||
centre = x[int(scan["peak_indexes"])]
|
||||
x = bin_data(x, binning)
|
||||
y = list(scan["Counts"])
|
||||
y_err = list(np.sqrt(y)) if scan.get("sigma", None) is None else list(scan["sigma"])
|
||||
combined = bin_data(create_uncertanities(y, y_err), binning)
|
||||
y = [combined[i].n for i in range(len(combined))]
|
||||
y_err = [combined[i].s for i in range(len(combined))]
|
||||
|
||||
if len(scan["peak_indexes"]) == 0:
|
||||
# Case for no peak, gaussian in centre, sigma as 20% of range
|
||||
print("No peak")
|
||||
peak_index = find_nearest(x, np.mean(x))
|
||||
guess[0] = centre if guess[0] is None else guess[0]
|
||||
guess[1] = (x[-1] - x[0]) / 5 if guess[1] is None else guess[1]
|
||||
guess[2] = 50 if guess[2] is None else guess[2]
|
||||
guess[3] = 0 if guess[3] is None else guess[3]
|
||||
guess[4] = np.mean(y) if guess[4] is None else guess[4]
|
||||
constraints_min[2] = 0
|
||||
|
||||
elif len(scan["peak_indexes"]) == 1:
|
||||
# case for one peak, takse into account users guesses
|
||||
print("one peak")
|
||||
peak_height = scan["peak_heights"]
|
||||
guess[0] = centre if guess[0] is None else guess[0]
|
||||
guess[1] = 0.1 if guess[1] is None else guess[1]
|
||||
guess[2] = float(peak_height / 10) if guess[2] is None else float(guess[2])
|
||||
guess[3] = 0 if guess[3] is None else guess[3]
|
||||
guess[4] = np.median(x) if guess[4] is None else guess[4]
|
||||
constraints_min[0] = np.min(x) if constraints_min[0] is None else constraints_min[0]
|
||||
constraints_max[0] = np.max(x) if constraints_max[0] is None else constraints_max[0]
|
||||
|
||||
def gaussian(x, g_cen, g_width, g_amp):
|
||||
"""1-d gaussian: gaussian(x, amp, cen, wid)"""
|
||||
return (g_amp / (np.sqrt(2 * np.pi) * g_width)) * np.exp(
|
||||
-((x - g_cen) ** 2) / (2 * g_width ** 2)
|
||||
)
|
||||
|
||||
def background(x, slope, intercept):
|
||||
"""background"""
|
||||
return slope * (x - centre) + intercept
|
||||
|
||||
mod = Model(gaussian) + Model(background)
|
||||
params = Parameters()
|
||||
params.add_many(
|
||||
("g_cen", guess[0], bool(vary[0]), np.min(x), np.max(x), None, None),
|
||||
("g_width", guess[1], bool(vary[1]), constraints_min[1], constraints_max[1], None, None),
|
||||
("g_amp", guess[2], bool(vary[2]), constraints_min[2], constraints_max[2], None, None),
|
||||
("slope", guess[3], bool(vary[3]), constraints_min[3], constraints_max[3], None, None),
|
||||
("intercept", guess[4], bool(vary[4]), constraints_min[4], constraints_max[4], None, None),
|
||||
)
|
||||
# the weighted fit
|
||||
try:
|
||||
result = mod.fit(
|
||||
y, params, weights=[np.abs(1 / val) for val in y_err], x=x, calc_covar=True,
|
||||
)
|
||||
except ValueError:
|
||||
return
|
||||
|
||||
if result.params["g_amp"].stderr is None:
|
||||
result.params["g_amp"].stderr = result.params["g_amp"].value
|
||||
elif result.params["g_amp"].stderr > result.params["g_amp"].value:
|
||||
result.params["g_amp"].stderr = result.params["g_amp"].value
|
||||
|
||||
# u.ufloat to work with uncertanities
|
||||
fit_area = u.ufloat(result.params["g_amp"].value, result.params["g_amp"].stderr)
|
||||
comps = result.eval_components()
|
||||
|
||||
if len(scan["peak_indexes"]) == 0:
|
||||
# for case of no peak, there is no reason to integrate, therefore fit and int are equal
|
||||
int_area = fit_area
|
||||
|
||||
elif len(scan["peak_indexes"]) == 1:
|
||||
gauss_3sigmamin = find_nearest(
|
||||
x, result.params["g_cen"].value - 3 * result.params["g_width"].value
|
||||
)
|
||||
gauss_3sigmamax = find_nearest(
|
||||
x, result.params["g_cen"].value + 3 * result.params["g_width"].value
|
||||
)
|
||||
numfit_min = gauss_3sigmamin if numfit_min is None else find_nearest(x, numfit_min)
|
||||
numfit_max = gauss_3sigmamax if numfit_max is None else find_nearest(x, numfit_max)
|
||||
|
||||
it = -1
|
||||
while abs(numfit_max - numfit_min) < 3:
|
||||
# in the case the peak is very thin and numerical integration would be on zero omega
|
||||
# difference, finds closes values
|
||||
it = it + 1
|
||||
numfit_min = find_nearest(
|
||||
x,
|
||||
result.params["g_cen"].value - 3 * (1 + it / 10) * result.params["g_width"].value,
|
||||
)
|
||||
numfit_max = find_nearest(
|
||||
x,
|
||||
result.params["g_cen"].value + 3 * (1 + it / 10) * result.params["g_width"].value,
|
||||
)
|
||||
|
||||
if x[numfit_min] < np.min(x):
|
||||
# makes sure that the values supplied by user lay in the omega range
|
||||
# can be ommited for users who know what they're doing
|
||||
numfit_min = gauss_3sigmamin
|
||||
print("Minimal integration value outside of x range")
|
||||
elif x[numfit_min] >= x[numfit_max]:
|
||||
numfit_min = gauss_3sigmamin
|
||||
print("Minimal integration value higher than maximal")
|
||||
else:
|
||||
pass
|
||||
if x[numfit_max] > np.max(x):
|
||||
numfit_max = gauss_3sigmamax
|
||||
print("Maximal integration value outside of x range")
|
||||
elif x[numfit_max] <= x[numfit_min]:
|
||||
numfit_max = gauss_3sigmamax
|
||||
print("Maximal integration value lower than minimal")
|
||||
else:
|
||||
pass
|
||||
|
||||
count_errors = create_uncertanities(y, y_err)
|
||||
# create error vector for numerical integration propagation
|
||||
num_int_area = simps(count_errors[numfit_min:numfit_max], x[numfit_min:numfit_max])
|
||||
slope_err = u.ufloat(result.params["slope"].value, result.params["slope"].stderr)
|
||||
# pulls the nominal and error values from fit (slope)
|
||||
intercept_err = u.ufloat(
|
||||
result.params["intercept"].value, result.params["intercept"].stderr
|
||||
)
|
||||
# pulls the nominal and error values from fit (intercept)
|
||||
|
||||
background_errors = np.array([])
|
||||
for j in range(len(x[numfit_min:numfit_max])):
|
||||
# creates nominal and error vector for numerical integration of background
|
||||
bg = slope_err * (x[j] - centre) + intercept_err
|
||||
background_errors = np.append(background_errors, bg)
|
||||
|
||||
num_int_background = simps(background_errors, x[numfit_min:numfit_max])
|
||||
int_area = num_int_area - num_int_background
|
||||
|
||||
d = {}
|
||||
for pars in result.params:
|
||||
d[str(pars)] = (result.params[str(pars)].value, result.params[str(pars)].vary)
|
||||
print(result.fit_report())
|
||||
|
||||
print((result.params["g_amp"].value - int_area.n) / result.params["g_amp"].value)
|
||||
|
||||
d["ratio"] = (result.params["g_amp"].value - int_area.n) / result.params["g_amp"].value
|
||||
d["int_area"] = int_area
|
||||
d["fit_area"] = u.ufloat(result.params["g_amp"].value, result.params["g_amp"].stderr)
|
||||
d["full_report"] = result.fit_report()
|
||||
d["result"] = result
|
||||
d["comps"] = comps
|
||||
d["numfit"] = [numfit_min, numfit_max]
|
||||
scan["fit"] = d
|
@ -1,167 +0,0 @@
|
||||
import numpy as np
|
||||
from lmfit import Model, Parameters
|
||||
from scipy.integrate import simps
|
||||
import matplotlib.pyplot as plt
|
||||
import uncertainties as u
|
||||
from lmfit.models import GaussianModel
|
||||
from lmfit.models import VoigtModel
|
||||
from lmfit.models import PseudoVoigtModel
|
||||
|
||||
|
||||
def bin_data(array, binsize):
|
||||
if isinstance(binsize, int) and 0 < binsize < len(array):
|
||||
return [
|
||||
np.mean(array[binsize * i : binsize * i + binsize])
|
||||
for i in range(int(np.ceil(len(array) / binsize)))
|
||||
]
|
||||
else:
|
||||
print("Binsize need to be positive integer smaller than lenght of array")
|
||||
return array
|
||||
|
||||
|
||||
def create_uncertanities(y, y_err):
|
||||
# create array with uncertanities for error propagation
|
||||
combined = np.array([])
|
||||
for i in range(len(y)):
|
||||
part = u.ufloat(y[i], y_err[i])
|
||||
combined = np.append(combined, part)
|
||||
return combined
|
||||
|
||||
|
||||
def find_nearest(array, value):
|
||||
# find nearest value and return index
|
||||
array = np.asarray(array)
|
||||
idx = (np.abs(array - value)).argmin()
|
||||
return idx
|
||||
|
||||
|
||||
# predefined peak positions
|
||||
# peaks = [6.2, 8.1, 9.9, 11.5]
|
||||
peaks = [23.5, 24.5]
|
||||
# peaks = [24]
|
||||
def fitccl(scan, variable="om", peak_type="gauss", binning=None):
|
||||
|
||||
x = list(scan[variable])
|
||||
y = list(scan["Counts"])
|
||||
peak_centre = np.mean(x)
|
||||
if binning is None or binning == 0 or binning == 1:
|
||||
x = list(scan["om"])
|
||||
y = list(scan["Counts"])
|
||||
y_err = list(np.sqrt(y)) if scan.get("sigma", None) is None else list(scan["sigma"])
|
||||
print(scan["peak_indexes"])
|
||||
if not scan["peak_indexes"]:
|
||||
peak_centre = np.mean(x)
|
||||
else:
|
||||
centre = x[int(scan["peak_indexes"])]
|
||||
else:
|
||||
x = list(scan["om"])
|
||||
if not scan["peak_indexes"]:
|
||||
peak_centre = np.mean(x)
|
||||
else:
|
||||
peak_centre = x[int(scan["peak_indexes"])]
|
||||
x = bin_data(x, binning)
|
||||
y = list(scan["Counts"])
|
||||
y_err = list(np.sqrt(y)) if scan.get("sigma", None) is None else list(scan["sigma"])
|
||||
combined = bin_data(create_uncertanities(y, y_err), binning)
|
||||
y = [combined[i].n for i in range(len(combined))]
|
||||
y_err = [combined[i].s for i in range(len(combined))]
|
||||
|
||||
def background(x, slope, intercept):
|
||||
"""background"""
|
||||
return slope * (x - peak_centre) + intercept
|
||||
|
||||
def gaussian(x, center, g_sigma, amplitude):
|
||||
"""1-d gaussian: gaussian(x, amp, cen, wid)"""
|
||||
return (amplitude / (np.sqrt(2.0 * np.pi) * g_sigma)) * np.exp(
|
||||
-((x - center) ** 2) / (2 * g_sigma ** 2)
|
||||
)
|
||||
|
||||
def lorentzian(x, center, l_sigma, amplitude):
|
||||
"""1d lorentzian"""
|
||||
return (amplitude / (1 + ((1 * x - center) / l_sigma) ** 2)) / (np.pi * l_sigma)
|
||||
|
||||
def pseudoVoigt1(x, center, g_sigma, amplitude, l_sigma, fraction):
|
||||
"""PseudoVoight peak with different widths of lorenzian and gaussian"""
|
||||
return (1 - fraction) * gaussian(x, center, g_sigma, amplitude) + fraction * (
|
||||
lorentzian(x, center, l_sigma, amplitude)
|
||||
)
|
||||
|
||||
mod = Model(background)
|
||||
params = Parameters()
|
||||
params.add_many(
|
||||
("slope", 0, True, None, None, None, None), ("intercept", 0, False, None, None, None, None)
|
||||
)
|
||||
for i in range(len(peaks)):
|
||||
if peak_type == "gauss":
|
||||
mod = mod + GaussianModel(prefix="p%d_" % (i + 1))
|
||||
params.add(str("p%d_" % (i + 1) + "amplitude"), 20, True, 0, None, None)
|
||||
params.add(str("p%d_" % (i + 1) + "center"), peaks[i], True, None, None, None)
|
||||
params.add(str("p%d_" % (i + 1) + "sigma"), 0.2, True, 0, 5, None)
|
||||
elif peak_type == "voigt":
|
||||
mod = mod + VoigtModel(prefix="p%d_" % (i + 1))
|
||||
params.add(str("p%d_" % (i + 1) + "amplitude"), 20, True, 0, None, None)
|
||||
params.add(str("p%d_" % (i + 1) + "center"), peaks[i], True, None, None, None)
|
||||
params.add(str("p%d_" % (i + 1) + "sigma"), 0.2, True, 0, 3, None)
|
||||
params.add(str("p%d_" % (i + 1) + "gamma"), 0.2, True, 0, 5, None)
|
||||
elif peak_type == "pseudovoigt":
|
||||
mod = mod + PseudoVoigtModel(prefix="p%d_" % (i + 1))
|
||||
params.add(str("p%d_" % (i + 1) + "amplitude"), 20, True, 0, None, None)
|
||||
params.add(str("p%d_" % (i + 1) + "center"), peaks[i], True, None, None, None)
|
||||
params.add(str("p%d_" % (i + 1) + "sigma"), 0.2, True, 0, 5, None)
|
||||
params.add(str("p%d_" % (i + 1) + "fraction"), 0.5, True, -5, 5, None)
|
||||
elif peak_type == "pseudovoigt1":
|
||||
mod = mod + Model(pseudoVoigt1, prefix="p%d_" % (i + 1))
|
||||
params.add(str("p%d_" % (i + 1) + "amplitude"), 20, True, 0, None, None)
|
||||
params.add(str("p%d_" % (i + 1) + "center"), peaks[i], True, None, None, None)
|
||||
params.add(str("p%d_" % (i + 1) + "g_sigma"), 0.2, True, 0, 5, None)
|
||||
params.add(str("p%d_" % (i + 1) + "l_sigma"), 0.2, True, 0, 5, None)
|
||||
params.add(str("p%d_" % (i + 1) + "fraction"), 0.5, True, 0, 1, None)
|
||||
# add parameters
|
||||
|
||||
result = mod.fit(
|
||||
y, params, weights=[np.abs(1 / y_err[i]) for i in range(len(y_err))], x=x, calc_covar=True
|
||||
)
|
||||
|
||||
comps = result.eval_components()
|
||||
|
||||
reportstring = list()
|
||||
for keys in result.params:
|
||||
if result.params[keys].value is not None:
|
||||
str2 = np.around(result.params[keys].value, 3)
|
||||
else:
|
||||
str2 = 0
|
||||
if result.params[keys].stderr is not None:
|
||||
str3 = np.around(result.params[keys].stderr, 3)
|
||||
else:
|
||||
str3 = 0
|
||||
reportstring.append("%s = %2.3f +/- %2.3f" % (keys, str2, str3))
|
||||
|
||||
reportstring = "\n".join(reportstring)
|
||||
|
||||
plt.figure(figsize=(20, 10))
|
||||
plt.plot(x, result.best_fit, "k-", label="Best fit")
|
||||
|
||||
plt.plot(x, y, "b-", label="Original data")
|
||||
plt.plot(x, comps["background"], "g--", label="Line component")
|
||||
for i in range(len(peaks)):
|
||||
plt.plot(
|
||||
x,
|
||||
comps[str("p%d_" % (i + 1))],
|
||||
"r--",
|
||||
)
|
||||
plt.fill_between(x, comps[str("p%d_" % (i + 1))], alpha=0.4, label=str("p%d_" % (i + 1)))
|
||||
plt.legend()
|
||||
plt.text(
|
||||
np.min(x),
|
||||
np.max(y),
|
||||
reportstring,
|
||||
fontsize=9,
|
||||
verticalalignment="top",
|
||||
)
|
||||
plt.title(str(peak_type))
|
||||
|
||||
plt.xlabel("Omega [deg]")
|
||||
plt.ylabel("Counts [a.u.]")
|
||||
plt.show()
|
||||
|
||||
print(result.fit_report())
|
181
pyzebra/h5.py
181
pyzebra/h5.py
@ -1,4 +1,10 @@
|
||||
import h5py
|
||||
import numpy as np
|
||||
from lmfit.models import Gaussian2dModel, GaussianModel
|
||||
|
||||
META_MATRIX = ("UB",)
|
||||
META_CELL = ("cell",)
|
||||
META_STR = ("name",)
|
||||
|
||||
|
||||
def read_h5meta(filepath):
|
||||
@ -23,49 +29,184 @@ def parse_h5meta(file):
|
||||
line = line.strip()
|
||||
if line.startswith("#begin "):
|
||||
section = line[len("#begin ") :]
|
||||
content[section] = []
|
||||
if section in ("detector parameters", "crystal"):
|
||||
content[section] = {}
|
||||
else:
|
||||
content[section] = []
|
||||
|
||||
elif line.startswith("#end"):
|
||||
section = None
|
||||
|
||||
elif section:
|
||||
content[section].append(line)
|
||||
if section in ("detector parameters", "crystal"):
|
||||
if "=" in line:
|
||||
variable, value = line.split("=", 1)
|
||||
variable = variable.strip()
|
||||
value = value.strip()
|
||||
|
||||
if variable in META_STR:
|
||||
pass
|
||||
elif variable in META_CELL:
|
||||
value = np.array(value.split(",")[:6], dtype=float)
|
||||
elif variable in META_MATRIX:
|
||||
value = np.array(value.split(",")[:9], dtype=float).reshape(3, 3)
|
||||
else: # default is a single float number
|
||||
value = float(value)
|
||||
content[section][variable] = value
|
||||
else:
|
||||
content[section].append(line)
|
||||
|
||||
return content
|
||||
|
||||
|
||||
def read_detector_data(filepath):
|
||||
def read_detector_data(filepath, cami_meta=None):
|
||||
"""Read detector data and angles from an h5 file.
|
||||
|
||||
Args:
|
||||
filepath (str): File path of an h5 file.
|
||||
|
||||
Returns:
|
||||
ndarray: A 3D array of data, rot_angle, pol_angle, tilt_angle.
|
||||
ndarray: A 3D array of data, omega, gamma, nu.
|
||||
"""
|
||||
with h5py.File(filepath, "r") as h5f:
|
||||
data = h5f["/entry1/area_detector2/data"][:]
|
||||
counts = h5f["/entry1/area_detector2/data"][:].astype(float)
|
||||
|
||||
# reshape data to a correct shape (2006 issue)
|
||||
n, cols, rows = data.shape
|
||||
data = data.reshape(n, rows, cols)
|
||||
n, cols, rows = counts.shape
|
||||
if "/entry1/experiment_identifier" in h5f: # old format
|
||||
# reshape images (counts) to a correct shape (2006 issue)
|
||||
counts = counts.reshape(n, rows, cols)
|
||||
else:
|
||||
counts = counts.swapaxes(1, 2)
|
||||
|
||||
det_data = {"data": data}
|
||||
scan = {"counts": counts, "counts_err": np.sqrt(np.maximum(counts, 1))}
|
||||
scan["original_filename"] = filepath
|
||||
scan["export"] = True
|
||||
|
||||
det_data["rot_angle"] = h5f["/entry1/area_detector2/rotation_angle"][:] # om, sometimes ph
|
||||
det_data["pol_angle"] = h5f["/entry1/ZEBRA/area_detector2/polar_angle"][:] # gammad
|
||||
det_data["tlt_angle"] = h5f["/entry1/ZEBRA/area_detector2/tilt_angle"][:] # nud
|
||||
det_data["ddist"] = h5f["/entry1/ZEBRA/area_detector2/distance"][:]
|
||||
det_data["wave"] = h5f["/entry1/ZEBRA/monochromator/wavelength"][:]
|
||||
det_data["chi_angle"] = h5f["/entry1/sample/chi"][:] # ch
|
||||
det_data["phi_angle"] = h5f["/entry1/sample/phi"][:] # ph
|
||||
det_data["UB"] = h5f["/entry1/sample/UB"][:].reshape(3, 3)
|
||||
if "/entry1/zebra_mode" in h5f:
|
||||
scan["zebra_mode"] = h5f["/entry1/zebra_mode"][0].decode()
|
||||
else:
|
||||
scan["zebra_mode"] = "nb"
|
||||
|
||||
# overwrite zebra_mode from cami
|
||||
if cami_meta is not None:
|
||||
if "zebra_mode" in cami_meta:
|
||||
scan["zebra_mode"] = cami_meta["zebra_mode"][0]
|
||||
|
||||
if "/entry1/control/Monitor" in h5f:
|
||||
scan["monitor"] = h5f["/entry1/control/Monitor"][0]
|
||||
else: # old path
|
||||
scan["monitor"] = h5f["/entry1/control/data"][0]
|
||||
|
||||
scan["idx"] = 1
|
||||
|
||||
if "/entry1/sample/rotation_angle" in h5f:
|
||||
scan["omega"] = h5f["/entry1/sample/rotation_angle"][:]
|
||||
else:
|
||||
scan["omega"] = h5f["/entry1/area_detector2/rotation_angle"][:]
|
||||
if len(scan["omega"]) == 1:
|
||||
scan["omega"] = np.ones(n) * scan["omega"]
|
||||
|
||||
scan["gamma"] = h5f["/entry1/ZEBRA/area_detector2/polar_angle"][:]
|
||||
scan["twotheta"] = h5f["/entry1/ZEBRA/area_detector2/polar_angle"][:]
|
||||
if len(scan["gamma"]) == 1:
|
||||
scan["gamma"] = np.ones(n) * scan["gamma"]
|
||||
scan["twotheta"] = np.ones(n) * scan["twotheta"]
|
||||
scan["nu"] = h5f["/entry1/ZEBRA/area_detector2/tilt_angle"][0]
|
||||
scan["ddist"] = h5f["/entry1/ZEBRA/area_detector2/distance"][0]
|
||||
scan["wave"] = h5f["/entry1/ZEBRA/monochromator/wavelength"][0]
|
||||
if scan["zebra_mode"] == "nb":
|
||||
scan["chi"] = np.array([180])
|
||||
scan["phi"] = np.array([0])
|
||||
elif scan["zebra_mode"] == "bi":
|
||||
scan["chi"] = h5f["/entry1/sample/chi"][:]
|
||||
scan["phi"] = h5f["/entry1/sample/phi"][:]
|
||||
if len(scan["chi"]) == 1:
|
||||
scan["chi"] = np.ones(n) * scan["chi"]
|
||||
if len(scan["phi"]) == 1:
|
||||
scan["phi"] = np.ones(n) * scan["phi"]
|
||||
if h5f["/entry1/sample/UB"].size == 0:
|
||||
scan["ub"] = np.eye(3) * 0.177
|
||||
else:
|
||||
scan["ub"] = h5f["/entry1/sample/UB"][:].reshape(3, 3)
|
||||
scan["name"] = h5f["/entry1/sample/name"][0].decode()
|
||||
scan["cell"] = h5f["/entry1/sample/cell"][:]
|
||||
|
||||
if n == 1:
|
||||
# a default motor for a single frame file
|
||||
scan["scan_motor"] = "omega"
|
||||
else:
|
||||
for var in ("omega", "gamma", "chi", "phi"): # TODO: also nu?
|
||||
if abs(scan[var][0] - scan[var][-1]) > 0.1:
|
||||
scan["scan_motor"] = var
|
||||
break
|
||||
else:
|
||||
raise ValueError("No angles that vary")
|
||||
|
||||
scan["scan_motors"] = [scan["scan_motor"]]
|
||||
|
||||
# optional parameters
|
||||
if "/entry1/sample/magnetic_field" in h5f:
|
||||
det_data["magnetic_field"] = h5f["/entry1/sample/magnetic_field"][:]
|
||||
scan["mf"] = h5f["/entry1/sample/magnetic_field"][:]
|
||||
|
||||
if "mf" in scan:
|
||||
# TODO: NaNs are not JSON compliant, so replace them with None
|
||||
# this is not a great solution, but makes it safe to use the array in bokeh
|
||||
scan["mf"] = np.where(np.isnan(scan["mf"]), None, scan["mf"])
|
||||
|
||||
if "/entry1/sample/temperature" in h5f:
|
||||
det_data["temperature"] = h5f["/entry1/sample/temperature"][:]
|
||||
scan["temp"] = h5f["/entry1/sample/temperature"][:]
|
||||
elif "/entry1/sample/Ts/value" in h5f:
|
||||
scan["temp"] = h5f["/entry1/sample/Ts/value"][:]
|
||||
|
||||
return det_data
|
||||
if "temp" in scan:
|
||||
# TODO: NaNs are not JSON compliant, so replace them with None
|
||||
# this is not a great solution, but makes it safe to use the array in bokeh
|
||||
scan["temp"] = np.where(np.isnan(scan["temp"]), None, scan["temp"])
|
||||
|
||||
# overwrite metadata from .cami
|
||||
if cami_meta is not None:
|
||||
if "crystal" in cami_meta:
|
||||
cami_meta_crystal = cami_meta["crystal"]
|
||||
if "name" in cami_meta_crystal:
|
||||
scan["name"] = cami_meta_crystal["name"]
|
||||
if "UB" in cami_meta_crystal:
|
||||
scan["ub"] = cami_meta_crystal["UB"]
|
||||
if "cell" in cami_meta_crystal:
|
||||
scan["cell"] = cami_meta_crystal["cell"]
|
||||
if "lambda" in cami_meta_crystal:
|
||||
scan["wave"] = cami_meta_crystal["lambda"]
|
||||
|
||||
if "detector parameters" in cami_meta:
|
||||
cami_meta_detparam = cami_meta["detector parameters"]
|
||||
if "dist2" in cami_meta_detparam:
|
||||
scan["ddist"] = cami_meta_detparam["dist2"]
|
||||
|
||||
return scan
|
||||
|
||||
|
||||
def fit_event(scan, fr_from, fr_to, y_from, y_to, x_from, x_to):
|
||||
data_roi = scan["counts"][fr_from:fr_to, y_from:y_to, x_from:x_to]
|
||||
|
||||
model = GaussianModel()
|
||||
fr = np.arange(fr_from, fr_to)
|
||||
counts_per_fr = np.sum(data_roi, axis=(1, 2))
|
||||
params = model.guess(counts_per_fr, fr)
|
||||
result = model.fit(counts_per_fr, x=fr, params=params)
|
||||
frC = result.params["center"].value
|
||||
intensity = result.params["height"].value
|
||||
|
||||
counts_std = counts_per_fr.std()
|
||||
counts_mean = counts_per_fr.mean()
|
||||
snr = 0 if counts_std == 0 else counts_mean / counts_std
|
||||
|
||||
model = Gaussian2dModel()
|
||||
xs, ys = np.meshgrid(np.arange(x_from, x_to), np.arange(y_from, y_to))
|
||||
xs = xs.flatten()
|
||||
ys = ys.flatten()
|
||||
counts = np.sum(data_roi, axis=0).flatten()
|
||||
params = model.guess(counts, xs, ys)
|
||||
result = model.fit(counts, x=xs, y=ys, params=params)
|
||||
xC = result.params["centerx"].value
|
||||
yC = result.params["centery"].value
|
||||
|
||||
scan["fit"] = {"frame": frC, "x_pos": xC, "y_pos": yC, "intensity": intensity, "snr": snr}
|
||||
|
@ -1,221 +0,0 @@
|
||||
import os
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from decimal import Decimal
|
||||
|
||||
import numpy as np
|
||||
|
||||
META_VARS_STR = (
|
||||
"instrument",
|
||||
"title",
|
||||
"sample",
|
||||
"user",
|
||||
"ProposalID",
|
||||
"original_filename",
|
||||
"date",
|
||||
"zebra_mode",
|
||||
"proposal",
|
||||
"proposal_user",
|
||||
"proposal_title",
|
||||
"proposal_email",
|
||||
"detectorDistance",
|
||||
)
|
||||
META_VARS_FLOAT = (
|
||||
"omega",
|
||||
"mf",
|
||||
"2-theta",
|
||||
"chi",
|
||||
"phi",
|
||||
"nu",
|
||||
"temp",
|
||||
"wavelenght",
|
||||
"a",
|
||||
"b",
|
||||
"c",
|
||||
"alpha",
|
||||
"beta",
|
||||
"gamma",
|
||||
"cex1",
|
||||
"cex2",
|
||||
"mexz",
|
||||
"moml",
|
||||
"mcvl",
|
||||
"momu",
|
||||
"mcvu",
|
||||
"snv",
|
||||
"snh",
|
||||
"snvm",
|
||||
"snhm",
|
||||
"s1vt",
|
||||
"s1vb",
|
||||
"s1hr",
|
||||
"s1hl",
|
||||
"s2vt",
|
||||
"s2vb",
|
||||
"s2hr",
|
||||
"s2hl",
|
||||
)
|
||||
META_UB_MATRIX = ("ub1j", "ub2j", "ub3j")
|
||||
|
||||
CCL_FIRST_LINE = (
|
||||
# the first element is `scan_number`, which we don't save to metadata
|
||||
("h_index", float),
|
||||
("k_index", float),
|
||||
("l_index", float),
|
||||
)
|
||||
|
||||
CCL_FIRST_LINE_BI = (
|
||||
*CCL_FIRST_LINE,
|
||||
("twotheta_angle", float),
|
||||
("omega_angle", float),
|
||||
("chi_angle", float),
|
||||
("phi_angle", float),
|
||||
)
|
||||
|
||||
CCL_FIRST_LINE_NB = (
|
||||
*CCL_FIRST_LINE,
|
||||
("gamma_angle", float),
|
||||
("omega_angle", float),
|
||||
("nu_angle", float),
|
||||
("unkwn_angle", float),
|
||||
)
|
||||
|
||||
CCL_SECOND_LINE = (
|
||||
("number_of_measurements", int),
|
||||
("angle_step", float),
|
||||
("monitor", float),
|
||||
("temperature", float),
|
||||
("mag_field", float),
|
||||
("date", str),
|
||||
("time", str),
|
||||
("scan_type", str),
|
||||
)
|
||||
|
||||
|
||||
def load_1D(filepath):
|
||||
"""
|
||||
Loads *.ccl or *.dat file (Distinguishes them based on last 3 chars in string of filepath
|
||||
to add more variables to read, extend the elif list
|
||||
the file must include '#data' and number of points in right place to work properly
|
||||
|
||||
:arg filepath
|
||||
:returns det_variables
|
||||
- dictionary of all detector/scan variables and dictinionary for every scan.
|
||||
Names of these dictionaries are M + scan number. They include HKL indeces, angles,
|
||||
monitors, stepsize and array of counts
|
||||
"""
|
||||
with open(filepath, "r") as infile:
|
||||
_, ext = os.path.splitext(filepath)
|
||||
det_variables = parse_1D(infile, data_type=ext)
|
||||
|
||||
return det_variables
|
||||
|
||||
|
||||
def parse_1D(fileobj, data_type):
|
||||
# read metadata
|
||||
metadata = {}
|
||||
for line in fileobj:
|
||||
if "=" in line:
|
||||
variable, value = line.split("=")
|
||||
variable = variable.strip()
|
||||
if variable in META_VARS_FLOAT:
|
||||
metadata[variable] = float(value)
|
||||
elif variable in META_VARS_STR:
|
||||
metadata[variable] = str(value)[:-1].strip()
|
||||
elif variable in META_UB_MATRIX:
|
||||
metadata[variable] = re.findall(r"[-+]?\d*\.\d+|\d+", str(value))
|
||||
|
||||
if "#data" in line:
|
||||
# this is the end of metadata and the start of data section
|
||||
break
|
||||
|
||||
# read data
|
||||
scan = {}
|
||||
if data_type == ".ccl":
|
||||
decimal = list()
|
||||
|
||||
if metadata["zebra_mode"] == "bi":
|
||||
ccl_first_line = CCL_FIRST_LINE_BI
|
||||
elif metadata["zebra_mode"] == "nb":
|
||||
ccl_first_line = CCL_FIRST_LINE_NB
|
||||
ccl_second_line = CCL_SECOND_LINE
|
||||
|
||||
for line in fileobj:
|
||||
d = {}
|
||||
|
||||
# first line
|
||||
scan_number, *params = line.split()
|
||||
for param, (param_name, param_type) in zip(params, ccl_first_line):
|
||||
d[param_name] = param_type(param)
|
||||
|
||||
decimal.append(bool(Decimal(d["h_index"]) % 1 == 0))
|
||||
decimal.append(bool(Decimal(d["k_index"]) % 1 == 0))
|
||||
decimal.append(bool(Decimal(d["l_index"]) % 1 == 0))
|
||||
|
||||
# second line
|
||||
next_line = next(fileobj)
|
||||
params = next_line.split()
|
||||
for param, (param_name, param_type) in zip(params, ccl_second_line):
|
||||
d[param_name] = param_type(param)
|
||||
|
||||
d["om"] = np.linspace(
|
||||
d["omega_angle"] - (d["number_of_measurements"] / 2) * d["angle_step"],
|
||||
d["omega_angle"] + (d["number_of_measurements"] / 2) * d["angle_step"],
|
||||
d["number_of_measurements"],
|
||||
)
|
||||
|
||||
# subsequent lines with counts
|
||||
counts = []
|
||||
while len(counts) < d["number_of_measurements"]:
|
||||
counts.extend(map(int, next(fileobj).split()))
|
||||
d["Counts"] = counts
|
||||
|
||||
scan[int(scan_number)] = d
|
||||
|
||||
if all(decimal):
|
||||
metadata["indices"] = "hkl"
|
||||
else:
|
||||
metadata["indices"] = "real"
|
||||
|
||||
elif data_type == ".dat":
|
||||
# skip the first 2 rows, the third row contans the column names
|
||||
next(fileobj)
|
||||
next(fileobj)
|
||||
col_names = next(fileobj).split()
|
||||
data_cols = defaultdict(list)
|
||||
|
||||
for line in fileobj:
|
||||
if "END-OF-DATA" in line:
|
||||
# this is the end of data
|
||||
break
|
||||
|
||||
for name, val in zip(col_names, line.split()):
|
||||
data_cols[name].append(float(val))
|
||||
|
||||
try:
|
||||
data_cols["h_index"] = float(metadata["title"].split()[-3])
|
||||
data_cols["k_index"] = float(metadata["title"].split()[-2])
|
||||
data_cols["l_index"] = float(metadata["title"].split()[-1])
|
||||
except (ValueError, IndexError):
|
||||
print("seems hkl is not in title")
|
||||
|
||||
data_cols["temperature"] = metadata["temp"]
|
||||
data_cols["mag_field"] = metadata["mf"]
|
||||
data_cols["omega_angle"] = metadata["omega"]
|
||||
data_cols["number_of_measurements"] = len(data_cols["om"])
|
||||
data_cols["monitor"] = data_cols["Monitor1"][0]
|
||||
data_cols["twotheta_angle"] = metadata["2-theta"]
|
||||
data_cols["chi_angle"] = metadata["chi"]
|
||||
data_cols["phi_angle"] = metadata["phi"]
|
||||
data_cols["nu_angle"] = metadata["nu"]
|
||||
|
||||
scan[1] = dict(data_cols)
|
||||
|
||||
else:
|
||||
print("Unknown file extention")
|
||||
|
||||
# utility information
|
||||
metadata["data_type"] = data_type
|
||||
metadata["area_method"] = "fit"
|
||||
|
||||
return {"meta": metadata, "scan": scan}
|
@ -1,383 +0,0 @@
|
||||
from load_1D import load_1D
|
||||
import pandas as pd
|
||||
from mpl_toolkits.mplot3d import Axes3D # dont delete, otherwise waterfall wont work
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib as mpl
|
||||
import numpy as np
|
||||
import pickle
|
||||
import scipy.io as sio
|
||||
import uncertainties as u
|
||||
|
||||
|
||||
def create_tuples(x, y, y_err):
|
||||
"""creates tuples for sorting and merginng of the data
|
||||
Counts need to be normalized to monitor before"""
|
||||
t = list()
|
||||
for i in range(len(x)):
|
||||
tup = (x[i], y[i], y_err[i])
|
||||
t.append(tup)
|
||||
return t
|
||||
|
||||
|
||||
def load_dats(filepath):
|
||||
"""reads the txt file, get headers and data
|
||||
:arg filepath to txt file or list of filepaths to the files
|
||||
:return ccl like dictionary"""
|
||||
if isinstance(filepath, str):
|
||||
data_type = "txt"
|
||||
file_list = list()
|
||||
with open(filepath, "r") as infile:
|
||||
col_names = next(infile).split(",")
|
||||
col_names = [col_names[i].rstrip() for i in range(len(col_names))]
|
||||
for line in infile:
|
||||
if "END" in line:
|
||||
break
|
||||
file_list.append(tuple(line.split(",")))
|
||||
elif isinstance(filepath, list):
|
||||
data_type = "list"
|
||||
file_list = filepath
|
||||
dict1 = {}
|
||||
for i in range(len(file_list)):
|
||||
if not dict1:
|
||||
if data_type == "txt":
|
||||
dict1 = load_1D(file_list[0][0])
|
||||
else:
|
||||
dict1 = load_1D(file_list[0])
|
||||
else:
|
||||
if data_type == "txt":
|
||||
dict1 = add_dict(dict1, load_1D(file_list[i][0]))
|
||||
else:
|
||||
dict1 = add_dict(dict1, load_1D(file_list[i]))
|
||||
dict1["scan"][i + 1]["params"] = {}
|
||||
if data_type == "txt":
|
||||
for x in range(len(col_names) - 1):
|
||||
dict1["scan"][i + 1]["params"][col_names[x + 1]] = file_list[i][x + 1]
|
||||
|
||||
return dict1
|
||||
|
||||
|
||||
def create_dataframe(dict1):
|
||||
"""Creates pandas dataframe from the dictionary
|
||||
:arg ccl like dictionary
|
||||
:return pandas dataframe"""
|
||||
# create dictionary to which we pull only wanted items before transforming it to pd.dataframe
|
||||
pull_dict = {}
|
||||
pull_dict["filenames"] = list()
|
||||
for key in dict1["scan"][1]["params"]:
|
||||
pull_dict[key] = list()
|
||||
pull_dict["temperature"] = list()
|
||||
pull_dict["mag_field"] = list()
|
||||
pull_dict["fit_area"] = list()
|
||||
pull_dict["int_area"] = list()
|
||||
pull_dict["om"] = list()
|
||||
pull_dict["Counts"] = list()
|
||||
|
||||
# populate the dict
|
||||
for keys in dict1["scan"]:
|
||||
if "file_of_origin" in dict1["scan"][keys]:
|
||||
pull_dict["filenames"].append(dict1["scan"][keys]["file_of_origin"].split("/")[-1])
|
||||
else:
|
||||
pull_dict["filenames"].append(dict1["meta"]["original_filename"].split("/")[-1])
|
||||
for key in dict1["scan"][keys]["params"]:
|
||||
pull_dict[str(key)].append(float(dict1["scan"][keys]["params"][key]))
|
||||
pull_dict["temperature"].append(dict1["scan"][keys]["temperature"])
|
||||
pull_dict["mag_field"].append(dict1["scan"][keys]["mag_field"])
|
||||
pull_dict["fit_area"].append(dict1["scan"][keys]["fit"]["fit_area"])
|
||||
pull_dict["int_area"].append(dict1["scan"][keys]["fit"]["int_area"])
|
||||
pull_dict["om"].append(dict1["scan"][keys]["om"])
|
||||
pull_dict["Counts"].append(dict1["scan"][keys]["Counts"])
|
||||
|
||||
return pd.DataFrame(data=pull_dict)
|
||||
|
||||
|
||||
def sort_dataframe(dataframe, sorting_parameter):
|
||||
"""sorts the data frame and resets index"""
|
||||
data = dataframe.sort_values(by=sorting_parameter)
|
||||
data = data.reset_index(drop=True)
|
||||
return data
|
||||
|
||||
|
||||
def make_graph(data, sorting_parameter, style):
|
||||
"""Makes the graph from the data based on style and sorting parameter
|
||||
:arg data : pandas dataframe with data after sorting
|
||||
:arg sorting_parameter to pull the correct variable and name
|
||||
:arg style of the graph - waterfall, scatter, heatmap
|
||||
:return matplotlib figure"""
|
||||
if style == "waterfall":
|
||||
mpl.rcParams["legend.fontsize"] = 10
|
||||
fig = plt.figure()
|
||||
ax = fig.gca(projection="3d")
|
||||
for i in range(len(data)):
|
||||
x = data["om"][i]
|
||||
z = data["Counts"][i]
|
||||
yy = [data[sorting_parameter][i]] * len(x)
|
||||
ax.plot(x, yy, z, label=str("%s = %f" % (sorting_parameter, yy[i])))
|
||||
|
||||
ax.legend()
|
||||
ax.set_xlabel("Omega")
|
||||
ax.set_ylabel(sorting_parameter)
|
||||
ax.set_zlabel("counts")
|
||||
|
||||
elif style == "scatter":
|
||||
fig = plt.figure()
|
||||
plt.errorbar(
|
||||
data[sorting_parameter],
|
||||
[data["fit_area"][i].n for i in range(len(data["fit_area"]))],
|
||||
[data["fit_area"][i].s for i in range(len(data["fit_area"]))],
|
||||
capsize=5,
|
||||
ecolor="green",
|
||||
)
|
||||
plt.xlabel(str(sorting_parameter))
|
||||
plt.ylabel("Intesity")
|
||||
|
||||
elif style == "heat":
|
||||
new_om = list()
|
||||
for i in range(len(data)):
|
||||
new_om = np.append(new_om, np.around(data["om"][i], 2), axis=0)
|
||||
unique_om = np.unique(new_om)
|
||||
color_matrix = np.zeros(shape=(len(data), len(unique_om)))
|
||||
for i in range(len(data)):
|
||||
for j in range(len(data["om"][i])):
|
||||
if np.around(data["om"][i][j], 2) in np.unique(new_om):
|
||||
color_matrix[i, j] = data["Counts"][i][j]
|
||||
else:
|
||||
continue
|
||||
|
||||
fig = plt.figure()
|
||||
plt.pcolormesh(unique_om, data[sorting_parameter], color_matrix, shading="gouraud")
|
||||
plt.xlabel("omega")
|
||||
plt.ylabel(sorting_parameter)
|
||||
plt.colorbar()
|
||||
plt.clim(color_matrix.mean(), color_matrix.max())
|
||||
|
||||
return fig
|
||||
|
||||
|
||||
def save_dict(obj, name):
|
||||
"""saves dictionary as pickle file in binary format
|
||||
:arg obj - object to save
|
||||
:arg name - name of the file
|
||||
NOTE: path should be added later"""
|
||||
with open(name + ".pkl", "wb") as f:
|
||||
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
|
||||
def load_dict(name):
|
||||
"""load dictionary from picle file
|
||||
:arg name - name of the file to load
|
||||
NOTE: expect the file in the same folder, path should be added later
|
||||
:return dictionary"""
|
||||
with open(name + ".pkl", "rb") as f:
|
||||
return pickle.load(f)
|
||||
|
||||
|
||||
# pickle, mat, h5, txt, csv, json
|
||||
def save_table(data, filetype, name, path=None):
|
||||
print("Saving: ", filetype)
|
||||
path = "" if path is None else path
|
||||
if filetype == "pickle":
|
||||
# to work with uncertanities, see uncertanity module
|
||||
with open(path + name + ".pkl", "wb") as f:
|
||||
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
|
||||
if filetype == "mat":
|
||||
# matlab doesent allow some special character to be in var names, also cant start with
|
||||
# numbers, in need, add some to the romove_character list
|
||||
data["fit_area_nom"] = [data["fit_area"][i].n for i in range(len(data["fit_area"]))]
|
||||
data["fit_area_err"] = [data["fit_area"][i].s for i in range(len(data["fit_area"]))]
|
||||
data["int_area_nom"] = [data["int_area"][i].n for i in range(len(data["int_area"]))]
|
||||
data["int_area_err"] = [data["int_area"][i].s for i in range(len(data["int_area"]))]
|
||||
data = data.drop(columns=["fit_area", "int_area"])
|
||||
remove_characters = [" ", "[", "]", "{", "}", "(", ")"]
|
||||
for character in remove_characters:
|
||||
data.columns = [
|
||||
data.columns[i].replace(character, "") for i in range(len(data.columns))
|
||||
]
|
||||
sio.savemat((path + name + ".mat"), {name: col.values for name, col in data.items()})
|
||||
if filetype == "csv" or "txt":
|
||||
data["fit_area_nom"] = [data["fit_area"][i].n for i in range(len(data["fit_area"]))]
|
||||
data["fit_area_err"] = [data["fit_area"][i].s for i in range(len(data["fit_area"]))]
|
||||
data["int_area_nom"] = [data["int_area"][i].n for i in range(len(data["int_area"]))]
|
||||
data["int_area_err"] = [data["int_area"][i].s for i in range(len(data["int_area"]))]
|
||||
data = data.drop(columns=["fit_area", "int_area", "om", "Counts"])
|
||||
if filetype == "csv":
|
||||
data.to_csv(path + name + ".csv")
|
||||
if filetype == "txt":
|
||||
with open((path + name + ".txt"), "w") as outfile:
|
||||
data.to_string(outfile)
|
||||
if filetype == "h5":
|
||||
hdf = pd.HDFStore((path + name + ".h5"))
|
||||
hdf.put("data", data)
|
||||
hdf.close()
|
||||
if filetype == "json":
|
||||
data.to_json((path + name + ".json"))
|
||||
|
||||
def normalize(dict, key, monitor):
|
||||
"""Normalizes the measurement to monitor, checks if sigma exists, otherwise creates it
|
||||
:arg dict : dictionary to from which to tkae the scan
|
||||
:arg key : which scan to normalize from dict1
|
||||
:arg monitor : final monitor
|
||||
:return counts - normalized counts
|
||||
:return sigma - normalized sigma"""
|
||||
|
||||
counts = np.array(dict["scan"][key]["Counts"])
|
||||
sigma = np.sqrt(counts) if "sigma" not in dict["scan"][key] else dict["scan"][key]["sigma"]
|
||||
monitor_ratio = monitor / dict["scan"][key]["monitor"]
|
||||
scaled_counts = counts * monitor_ratio
|
||||
scaled_sigma = np.array(sigma) * monitor_ratio
|
||||
|
||||
return scaled_counts, scaled_sigma
|
||||
|
||||
def merge(dict1, dict2, scand_dict_result, keep=True, monitor=100000):
|
||||
"""merges the two tuples and sorts them, if om value is same, Counts value is average
|
||||
averaging is propagated into sigma if dict1 == dict2, key[1] is deleted after merging
|
||||
:arg dict1 : dictionary to which measurement will be merged
|
||||
:arg dict2 : dictionary from which measurement will be merged
|
||||
:arg scand_dict_result : result of scan_dict after auto function
|
||||
:arg keep : if true, when monitors are same, does not change it, if flase, takes monitor
|
||||
always
|
||||
:arg monitor : final monitor after merging
|
||||
note: dict1 and dict2 can be same dict
|
||||
:return dict1 with merged scan"""
|
||||
for keys in scand_dict_result:
|
||||
for j in range(len(scand_dict_result[keys])):
|
||||
first, second = scand_dict_result[keys][j][0], scand_dict_result[keys][j][1]
|
||||
print(first, second)
|
||||
if keep:
|
||||
if dict1["scan"][first]["monitor"] == dict2["scan"][second]["monitor"]:
|
||||
monitor = dict1["scan"][first]["monitor"]
|
||||
|
||||
# load om and Counts
|
||||
x1, x2 = dict1["scan"][first]["om"], dict2["scan"][second]["om"]
|
||||
cor_y1, y_err1 = normalize(dict1, first, monitor=monitor)
|
||||
cor_y2, y_err2 = normalize(dict2, second, monitor=monitor)
|
||||
# creates touples (om, Counts, sigma) for sorting and further processing
|
||||
tuple_list = create_tuples(x1, cor_y1, y_err1) + create_tuples(x2, cor_y2, y_err2)
|
||||
# Sort the list on om and add 0 0 0 tuple to the last position
|
||||
sorted_t = sorted(tuple_list, key=lambda tup: tup[0])
|
||||
sorted_t.append((0, 0, 0))
|
||||
om, Counts, sigma = [], [], []
|
||||
seen = list()
|
||||
for i in range(len(sorted_t) - 1):
|
||||
if sorted_t[i][0] not in seen:
|
||||
if sorted_t[i][0] != sorted_t[i + 1][0]:
|
||||
om = np.append(om, sorted_t[i][0])
|
||||
Counts = np.append(Counts, sorted_t[i][1])
|
||||
sigma = np.append(sigma, sorted_t[i][2])
|
||||
else:
|
||||
om = np.append(om, sorted_t[i][0])
|
||||
counts1, counts2 = sorted_t[i][1], sorted_t[i + 1][1]
|
||||
sigma1, sigma2 = sorted_t[i][2], sorted_t[i + 1][2]
|
||||
count_err1 = u.ufloat(counts1, sigma1)
|
||||
count_err2 = u.ufloat(counts2, sigma2)
|
||||
avg = (count_err1 + count_err2) / 2
|
||||
Counts = np.append(Counts, avg.n)
|
||||
sigma = np.append(sigma, avg.s)
|
||||
seen.append(sorted_t[i][0])
|
||||
else:
|
||||
continue
|
||||
|
||||
if dict1 == dict2:
|
||||
del dict1["scan"][second]
|
||||
|
||||
note = (
|
||||
f"This measurement was merged with measurement {second} from "
|
||||
f'file {dict2["meta"]["original_filename"]} \n'
|
||||
)
|
||||
if "notes" not in dict1["scan"][first]:
|
||||
dict1["scan"][first]["notes"] = note
|
||||
else:
|
||||
dict1["scan"][first]["notes"] += note
|
||||
|
||||
dict1["scan"][first]["om"] = om
|
||||
dict1["scan"][first]["Counts"] = Counts
|
||||
dict1["scan"][first]["sigma"] = sigma
|
||||
dict1["scan"][first]["monitor"] = monitor
|
||||
print("merging done")
|
||||
return dict1
|
||||
|
||||
|
||||
def add_dict(dict1, dict2):
|
||||
"""adds two dictionaries, meta of the new is saved as meata+original_filename and
|
||||
measurements are shifted to continue with numbering of first dict
|
||||
:arg dict1 : dictionarry to add to
|
||||
:arg dict2 : dictionarry from which to take the measurements
|
||||
:return dict1 : combined dictionary
|
||||
Note: dict1 must be made from ccl, otherwise we would have to change the structure of loaded
|
||||
dat file"""
|
||||
if dict1["meta"]["zebra_mode"] != dict2["meta"]["zebra_mode"]:
|
||||
print("You are trying to add scans measured with different zebra modes")
|
||||
return
|
||||
max_measurement_dict1 = max([keys for keys in dict1["scan"]])
|
||||
new_filenames = np.arange(
|
||||
max_measurement_dict1 + 1, max_measurement_dict1 + 1 + len(dict2["scan"])
|
||||
)
|
||||
new_meta_name = "meta" + str(dict2["meta"]["original_filename"])
|
||||
if new_meta_name not in dict1:
|
||||
for keys, name in zip(dict2["scan"], new_filenames):
|
||||
dict2["scan"][keys]["file_of_origin"] = str(dict2["meta"]["original_filename"])
|
||||
dict1["scan"][name] = dict2["scan"][keys]
|
||||
|
||||
dict1[new_meta_name] = dict2["meta"]
|
||||
else:
|
||||
raise KeyError(
|
||||
str(
|
||||
"The file %s has alredy been added to %s"
|
||||
% (dict2["meta"]["original_filename"], dict1["meta"]["original_filename"])
|
||||
)
|
||||
)
|
||||
return dict1
|
||||
|
||||
|
||||
def auto(dict):
|
||||
"""takes just unique tuples from all tuples in dictionary returend by scan_dict
|
||||
intendet for automatic merge if you doesent want to specify what scans to merge together
|
||||
args: dict - dictionary from scan_dict function
|
||||
:return dict - dict without repetitions"""
|
||||
for keys in dict:
|
||||
tuple_list = dict[keys]
|
||||
new = list()
|
||||
for i in range(len(tuple_list)):
|
||||
if tuple_list[0][0] == tuple_list[i][0]:
|
||||
new.append(tuple_list[i])
|
||||
dict[keys] = new
|
||||
return dict
|
||||
|
||||
|
||||
def scan_dict(dict, precision=0.5):
|
||||
"""scans dictionary for duplicate angles indexes
|
||||
:arg dict : dictionary to scan
|
||||
:arg precision : in deg, sometimes angles are zero so its easier this way, instead of
|
||||
checking zero division
|
||||
:return dictionary with matching scans, if there are none, the dict is empty
|
||||
note: can be checked by "not d", true if empty
|
||||
"""
|
||||
|
||||
if dict["meta"]["zebra_mode"] == "bi":
|
||||
angles = ["twotheta_angle", "omega_angle", "chi_angle", "phi_angle"]
|
||||
elif dict["meta"]["zebra_mode"] == "nb":
|
||||
angles = ["gamma_angle", "omega_angle", "nu_angle"]
|
||||
else:
|
||||
print("Unknown zebra mode")
|
||||
return
|
||||
|
||||
d = {}
|
||||
for i in dict["scan"]:
|
||||
for j in dict["scan"]:
|
||||
if dict["scan"][i] != dict["scan"][j]:
|
||||
itup = list()
|
||||
for k in angles:
|
||||
itup.append(abs(abs(dict["scan"][i][k]) - abs(dict["scan"][j][k])))
|
||||
|
||||
if all(i <= precision for i in itup):
|
||||
if str([np.around(dict["scan"][i][k], 1) for k in angles]) not in d:
|
||||
d[str([np.around(dict["scan"][i][k], 1) for k in angles])] = list()
|
||||
d[str([np.around(dict["scan"][i][k], 1) for k in angles])].append((i, j))
|
||||
else:
|
||||
d[str([np.around(dict["scan"][i][k], 1) for k in angles])].append((i, j))
|
||||
|
||||
else:
|
||||
pass
|
||||
|
||||
else:
|
||||
continue
|
||||
return d
|
486
pyzebra/sxtal_refgen.py
Normal file
486
pyzebra/sxtal_refgen.py
Normal file
@ -0,0 +1,486 @@
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
from math import ceil, floor
|
||||
|
||||
import numpy as np
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
SXTAL_REFGEN_PATH = "/afs/psi.ch/project/sinq/rhel8/bin/Sxtal_Refgen"
|
||||
|
||||
_zebraBI_default_geom = """GEOM 2 Bissecting - HiCHI
|
||||
BLFR z-up
|
||||
DIST_UNITS mm
|
||||
ANGL_UNITS deg
|
||||
DET_TYPE Point ipsd 1
|
||||
DIST_DET 488
|
||||
DIM_XY 1.0 1.0 1 1
|
||||
GAPS_DET 0 0
|
||||
|
||||
SETTING 1 0 0 0 1 0 0 0 1
|
||||
NUM_ANG 4
|
||||
ANG_LIMITS Min Max Offset
|
||||
Gamma 0.0 128.0 0.00
|
||||
Omega 0.0 64.0 0.00
|
||||
Chi 80.0 211.0 0.00
|
||||
Phi 0.0 360.0 0.00
|
||||
|
||||
DET_OFF 0 0 0
|
||||
"""
|
||||
|
||||
_zebraNB_default_geom = """GEOM 3 Normal Beam
|
||||
BLFR z-up
|
||||
DIST_UNITS mm
|
||||
ANGL_UNITS deg
|
||||
DET_TYPE Point ipsd 1
|
||||
DIST_DET 448
|
||||
DIM_XY 1.0 1.0 1 1
|
||||
GAPS_DET 0 0
|
||||
|
||||
SETTING 1 0 0 0 1 0 0 0 1
|
||||
NUM_ANG 3
|
||||
ANG_LIMITS Min Max Offset
|
||||
Gamma 0.0 128.0 0.00
|
||||
Omega -180.0 180.0 0.00
|
||||
Nu -15.0 15.0 0.00
|
||||
|
||||
DET_OFF 0 0 0
|
||||
"""
|
||||
|
||||
_zebra_default_cfl = """TITLE mymaterial
|
||||
SPGR P 63 2 2
|
||||
CELL 5.73 5.73 11.89 90 90 120
|
||||
|
||||
WAVE 1.383
|
||||
|
||||
UBMAT
|
||||
0.000000 0.000000 0.084104
|
||||
0.000000 0.174520 -0.000000
|
||||
0.201518 0.100759 0.000000
|
||||
|
||||
INSTR zebra.geom
|
||||
|
||||
ORDER 1 2 3
|
||||
|
||||
ANGOR gamma
|
||||
|
||||
HLIM -25 25 -25 25 -25 25
|
||||
SRANG 0.0 0.7
|
||||
|
||||
Mag_Structure
|
||||
lattiCE P 1
|
||||
kvect 0.0 0.0 0.0
|
||||
magcent
|
||||
symm x,y,z
|
||||
msym u,v,w, 0.0
|
||||
End_Mag_Structure
|
||||
"""
|
||||
|
||||
|
||||
def get_zebraBI_default_geom_file():
|
||||
return io.StringIO(_zebraBI_default_geom)
|
||||
|
||||
|
||||
def get_zebraNB_default_geom_file():
|
||||
return io.StringIO(_zebraNB_default_geom)
|
||||
|
||||
|
||||
def get_zebra_default_cfl_file():
|
||||
return io.StringIO(_zebra_default_cfl)
|
||||
|
||||
|
||||
def read_geom_file(fileobj):
|
||||
ang_lims = dict()
|
||||
for line in fileobj:
|
||||
if "!" in line: # remove comments that start with ! sign
|
||||
line, _ = line.split(sep="!", maxsplit=1)
|
||||
|
||||
if line.startswith("GEOM"):
|
||||
_, val = line.split(maxsplit=1)
|
||||
if val.startswith("2"):
|
||||
ang_lims["geom"] = "bi"
|
||||
else: # val.startswith("3")
|
||||
ang_lims["geom"] = "nb"
|
||||
|
||||
elif line.startswith("ANG_LIMITS"):
|
||||
# read angular limits
|
||||
for line in fileobj:
|
||||
if not line or line.isspace():
|
||||
break
|
||||
|
||||
ang, ang_min, ang_max, ang_offset = line.split()
|
||||
ang_lims[ang.lower()] = [ang_min, ang_max, ang_offset]
|
||||
|
||||
if "2theta" in ang_lims: # treat 2theta as gamma
|
||||
ang_lims["gamma"] = ang_lims.pop("2theta")
|
||||
|
||||
return ang_lims
|
||||
|
||||
|
||||
def export_geom_file(path, ang_lims, template=None):
|
||||
if ang_lims["geom"] == "bi":
|
||||
template_file = get_zebraBI_default_geom_file()
|
||||
n_ang = 4
|
||||
else: # ang_lims["geom"] == "nb"
|
||||
template_file = get_zebraNB_default_geom_file()
|
||||
n_ang = 3
|
||||
|
||||
if template is not None:
|
||||
template_file = template
|
||||
|
||||
with open(path, "w") as out_file:
|
||||
for line in template_file:
|
||||
out_file.write(line)
|
||||
|
||||
if line.startswith("ANG_LIMITS"):
|
||||
for _ in range(n_ang):
|
||||
next_line = next(template_file)
|
||||
ang, _, _, _ = next_line.split()
|
||||
|
||||
if ang == "2theta": # treat 2theta as gamma
|
||||
ang = "Gamma"
|
||||
vals = ang_lims[ang.lower()]
|
||||
|
||||
out_file.write(f"{'':<8}{ang:<10}{vals[0]:<10}{vals[1]:<10}{vals[2]:<10}\n")
|
||||
|
||||
|
||||
def calc_ub_matrix(params, log=logger):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
cfl_file = os.path.join(temp_dir, "ub_matrix.cfl")
|
||||
|
||||
with open(cfl_file, "w") as fileobj:
|
||||
for key, value in params.items():
|
||||
fileobj.write(f"{key} {value}\n")
|
||||
|
||||
comp_proc = subprocess.run(
|
||||
[SXTAL_REFGEN_PATH, cfl_file],
|
||||
cwd=temp_dir,
|
||||
check=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True,
|
||||
)
|
||||
log.info(" ".join(comp_proc.args))
|
||||
log.info(comp_proc.stdout)
|
||||
|
||||
sfa_file = os.path.join(temp_dir, "ub_matrix.sfa")
|
||||
ub_matrix = []
|
||||
with open(sfa_file, "r") as fileobj:
|
||||
for line in fileobj:
|
||||
if "BL_M" in line: # next 3 lines contain the matrix
|
||||
for _ in range(3):
|
||||
next_line = next(fileobj)
|
||||
*vals, _ = next_line.split(maxsplit=3)
|
||||
ub_matrix.extend(vals)
|
||||
|
||||
return ub_matrix
|
||||
|
||||
|
||||
def read_cfl_file(fileobj):
|
||||
params = {
|
||||
"SPGR": None,
|
||||
"CELL": None,
|
||||
"WAVE": None,
|
||||
"UBMAT": None,
|
||||
"HLIM": None,
|
||||
"SRANG": None,
|
||||
"lattiCE": None,
|
||||
"kvect": None,
|
||||
}
|
||||
param_names = tuple(params)
|
||||
|
||||
for line in fileobj:
|
||||
line = line.strip()
|
||||
if "!" in line: # remove comments that start with ! sign
|
||||
line, _ = line.split(sep="!", maxsplit=1)
|
||||
|
||||
if line.startswith(param_names):
|
||||
if line.startswith("UBMAT"): # next 3 lines contain the matrix
|
||||
param, val = "UBMAT", []
|
||||
for _ in range(3):
|
||||
next_line = next(fileobj).strip()
|
||||
val.extend(next_line.split(maxsplit=2))
|
||||
else:
|
||||
param, val = line.split(maxsplit=1)
|
||||
|
||||
params[param] = val
|
||||
|
||||
return params
|
||||
|
||||
|
||||
def read_cif_file(fileobj):
|
||||
params = {"SPGR": None, "CELL": None, "ATOM": []}
|
||||
|
||||
cell_params = {
|
||||
"_cell_length_a": None,
|
||||
"_cell_length_b": None,
|
||||
"_cell_length_c": None,
|
||||
"_cell_angle_alpha": None,
|
||||
"_cell_angle_beta": None,
|
||||
"_cell_angle_gamma": None,
|
||||
}
|
||||
cell_param_names = tuple(cell_params)
|
||||
|
||||
atom_param_pos = {
|
||||
"_atom_site_label": 0,
|
||||
"_atom_site_type_symbol": None,
|
||||
"_atom_site_fract_x": None,
|
||||
"_atom_site_fract_y": None,
|
||||
"_atom_site_fract_z": None,
|
||||
"_atom_site_U_iso_or_equiv": None,
|
||||
"_atom_site_occupancy": None,
|
||||
}
|
||||
atom_param_names = tuple(atom_param_pos)
|
||||
|
||||
for line in fileobj:
|
||||
line = line.strip()
|
||||
if line.startswith("_space_group_name_H-M_alt"):
|
||||
_, val = line.split(maxsplit=1)
|
||||
params["SPGR"] = val.strip("'")
|
||||
|
||||
elif line.startswith(cell_param_names):
|
||||
param, val = line.split(maxsplit=1)
|
||||
cell_params[param] = val
|
||||
|
||||
elif line.startswith("_atom_site_label"): # assume this is the start of atom data
|
||||
for ind, line in enumerate(fileobj, start=1):
|
||||
line = line.strip()
|
||||
|
||||
# read fields
|
||||
if line.startswith("_atom_site"):
|
||||
if line.startswith(atom_param_names):
|
||||
atom_param_pos[line] = ind
|
||||
continue
|
||||
|
||||
# read data till an empty line
|
||||
if not line:
|
||||
break
|
||||
vals = line.split()
|
||||
params["ATOM"].append(" ".join([vals[ind] for ind in atom_param_pos.values()]))
|
||||
|
||||
if None not in cell_params.values():
|
||||
params["CELL"] = " ".join(cell_params.values())
|
||||
|
||||
return params
|
||||
|
||||
|
||||
def export_cfl_file(path, params, template=None):
|
||||
param_names = tuple(params)
|
||||
if template is None:
|
||||
template_file = get_zebra_default_cfl_file()
|
||||
else:
|
||||
template_file = template
|
||||
|
||||
atom_done = False
|
||||
with open(path, "w") as out_file:
|
||||
for line in template_file:
|
||||
if line.startswith(param_names):
|
||||
if line.startswith("UBMAT"): # only UBMAT values are not on the same line
|
||||
out_file.write(line)
|
||||
for i in range(3):
|
||||
next(template_file)
|
||||
out_file.write(" ".join(params["UBMAT"][3 * i : 3 * (i + 1)]) + "\n")
|
||||
|
||||
elif line.startswith("ATOM"):
|
||||
if "ATOM" in params:
|
||||
# replace all ATOM with values in params
|
||||
while line.startswith("ATOM"):
|
||||
line = next(template_file)
|
||||
for atom_line in params["ATOM"]:
|
||||
out_file.write(f"ATOM {atom_line}\n")
|
||||
atom_done = True
|
||||
|
||||
else:
|
||||
param, _ = line.split(maxsplit=1)
|
||||
out_file.write(f"{param} {params[param]}\n")
|
||||
|
||||
elif line.startswith("INSTR"):
|
||||
# replace it with a default name
|
||||
out_file.write("INSTR zebra.geom\n")
|
||||
|
||||
else:
|
||||
out_file.write(line)
|
||||
|
||||
# append ATOM data if it's present and a template did not contain it
|
||||
if "ATOM" in params and not atom_done:
|
||||
out_file.write("\n")
|
||||
for atom_line in params["ATOM"]:
|
||||
out_file.write(f"ATOM {atom_line}\n")
|
||||
|
||||
|
||||
def sort_hkl_file_bi(file_in, file_out, priority, chunks):
|
||||
with open(file_in) as fileobj:
|
||||
file_in_data = fileobj.readlines()
|
||||
|
||||
data = np.genfromtxt(file_in, skip_header=3)
|
||||
stt = data[:, 4]
|
||||
omega = data[:, 5]
|
||||
chi = data[:, 6]
|
||||
phi = data[:, 7]
|
||||
|
||||
lines = file_in_data[3:]
|
||||
lines_update = []
|
||||
|
||||
angles = {"2theta": stt, "omega": omega, "chi": chi, "phi": phi}
|
||||
|
||||
# Reverse flag
|
||||
to_reverse = False
|
||||
to_reverse_p2 = False
|
||||
to_reverse_p3 = False
|
||||
|
||||
# Get indices within first priority
|
||||
ang_p1 = angles[priority[0]]
|
||||
begin_p1 = floor(min(ang_p1))
|
||||
end_p1 = ceil(max(ang_p1))
|
||||
delta_p1 = chunks[0]
|
||||
for p1 in range(begin_p1, end_p1, delta_p1):
|
||||
ind_p1 = [j for j, x in enumerate(ang_p1) if p1 <= x and x < p1 + delta_p1]
|
||||
|
||||
stt_new = [stt[x] for x in ind_p1]
|
||||
omega_new = [omega[x] for x in ind_p1]
|
||||
chi_new = [chi[x] for x in ind_p1]
|
||||
phi_new = [phi[x] for x in ind_p1]
|
||||
lines_new = [lines[x] for x in ind_p1]
|
||||
|
||||
angles_p2 = {"stt": stt_new, "omega": omega_new, "chi": chi_new, "phi": phi_new}
|
||||
|
||||
# Get indices for second priority
|
||||
ang_p2 = angles_p2[priority[1]]
|
||||
if len(ang_p2) > 0 and to_reverse_p2:
|
||||
begin_p2 = ceil(max(ang_p2))
|
||||
end_p2 = floor(min(ang_p2))
|
||||
delta_p2 = -chunks[1]
|
||||
elif len(ang_p2) > 0 and not to_reverse_p2:
|
||||
end_p2 = ceil(max(ang_p2))
|
||||
begin_p2 = floor(min(ang_p2))
|
||||
delta_p2 = chunks[1]
|
||||
else:
|
||||
end_p2 = 0
|
||||
begin_p2 = 0
|
||||
delta_p2 = 1
|
||||
|
||||
to_reverse_p2 = not to_reverse_p2
|
||||
|
||||
for p2 in range(begin_p2, end_p2, delta_p2):
|
||||
min_p2 = min([p2, p2 + delta_p2])
|
||||
max_p2 = max([p2, p2 + delta_p2])
|
||||
ind_p2 = [j for j, x in enumerate(ang_p2) if min_p2 <= x and x < max_p2]
|
||||
|
||||
stt_new2 = [stt_new[x] for x in ind_p2]
|
||||
omega_new2 = [omega_new[x] for x in ind_p2]
|
||||
chi_new2 = [chi_new[x] for x in ind_p2]
|
||||
phi_new2 = [phi_new[x] for x in ind_p2]
|
||||
lines_new2 = [lines_new[x] for x in ind_p2]
|
||||
|
||||
angles_p3 = {"stt": stt_new2, "omega": omega_new2, "chi": chi_new2, "phi": phi_new2}
|
||||
|
||||
# Get indices for third priority
|
||||
ang_p3 = angles_p3[priority[2]]
|
||||
if len(ang_p3) > 0 and to_reverse_p3:
|
||||
begin_p3 = ceil(max(ang_p3)) + chunks[2]
|
||||
end_p3 = floor(min(ang_p3)) - chunks[2]
|
||||
delta_p3 = -chunks[2]
|
||||
elif len(ang_p3) > 0 and not to_reverse_p3:
|
||||
end_p3 = ceil(max(ang_p3)) + chunks[2]
|
||||
begin_p3 = floor(min(ang_p3)) - chunks[2]
|
||||
delta_p3 = chunks[2]
|
||||
else:
|
||||
end_p3 = 0
|
||||
begin_p3 = 0
|
||||
delta_p3 = 1
|
||||
|
||||
to_reverse_p3 = not to_reverse_p3
|
||||
|
||||
for p3 in range(begin_p3, end_p3, delta_p3):
|
||||
min_p3 = min([p3, p3 + delta_p3])
|
||||
max_p3 = max([p3, p3 + delta_p3])
|
||||
ind_p3 = [j for j, x in enumerate(ang_p3) if min_p3 <= x and x < max_p3]
|
||||
|
||||
angle_new3 = [angles_p3[priority[3]][x] for x in ind_p3]
|
||||
|
||||
ind_final = [x for _, x in sorted(zip(angle_new3, ind_p3), reverse=to_reverse)]
|
||||
|
||||
to_reverse = not to_reverse
|
||||
|
||||
for i in ind_final:
|
||||
lines_update.append(lines_new2[i])
|
||||
|
||||
with open(file_out, "w") as fileobj:
|
||||
for _ in range(3):
|
||||
fileobj.write(file_in_data.pop(0))
|
||||
|
||||
fileobj.writelines(lines_update)
|
||||
|
||||
|
||||
def sort_hkl_file_nb(file_in, file_out, priority, chunks):
|
||||
with open(file_in) as fileobj:
|
||||
file_in_data = fileobj.readlines()
|
||||
|
||||
data = np.genfromtxt(file_in, skip_header=3)
|
||||
gamma = data[:, 4]
|
||||
omega = data[:, 5]
|
||||
nu = data[:, 6]
|
||||
|
||||
lines = file_in_data[3:]
|
||||
lines_update = []
|
||||
|
||||
angles = {"gamma": gamma, "omega": omega, "nu": nu}
|
||||
|
||||
to_reverse = False
|
||||
to_reverse_p2 = False
|
||||
|
||||
# Get indices within first priority
|
||||
ang_p1 = angles[priority[0]]
|
||||
begin_p1 = floor(min(ang_p1))
|
||||
end_p1 = ceil(max(ang_p1))
|
||||
delta_p1 = chunks[0]
|
||||
for p1 in range(begin_p1, end_p1, delta_p1):
|
||||
ind_p1 = [j for j, x in enumerate(ang_p1) if p1 <= x and x < p1 + delta_p1]
|
||||
|
||||
# Get angles from within nu range
|
||||
lines_new = [lines[x] for x in ind_p1]
|
||||
gamma_new = [gamma[x] for x in ind_p1]
|
||||
omega_new = [omega[x] for x in ind_p1]
|
||||
nu_new = [nu[x] for x in ind_p1]
|
||||
|
||||
angles_p2 = {"gamma": gamma_new, "omega": omega_new, "nu": nu_new}
|
||||
|
||||
# Get indices for second priority
|
||||
ang_p2 = angles_p2[priority[1]]
|
||||
if len(gamma_new) > 0 and to_reverse_p2:
|
||||
begin_p2 = ceil(max(ang_p2))
|
||||
end_p2 = floor(min(ang_p2))
|
||||
delta_p2 = -chunks[1]
|
||||
elif len(gamma_new) > 0 and not to_reverse_p2:
|
||||
end_p2 = ceil(max(ang_p2))
|
||||
begin_p2 = floor(min(ang_p2))
|
||||
delta_p2 = chunks[1]
|
||||
else:
|
||||
end_p2 = 0
|
||||
begin_p2 = 0
|
||||
delta_p2 = 1
|
||||
|
||||
to_reverse_p2 = not to_reverse_p2
|
||||
|
||||
for p2 in range(begin_p2, end_p2, delta_p2):
|
||||
min_p2 = min([p2, p2 + delta_p2])
|
||||
max_p2 = max([p2, p2 + delta_p2])
|
||||
ind_p2 = [j for j, x in enumerate(ang_p2) if min_p2 <= x and x < max_p2]
|
||||
|
||||
angle_new2 = [angles_p2[priority[2]][x] for x in ind_p2]
|
||||
|
||||
ind_final = [x for _, x in sorted(zip(angle_new2, ind_p2), reverse=to_reverse)]
|
||||
|
||||
to_reverse = not to_reverse
|
||||
|
||||
for i in ind_final:
|
||||
lines_update.append(lines_new[i])
|
||||
|
||||
with open(file_out, "w") as fileobj:
|
||||
for _ in range(3):
|
||||
fileobj.write(file_in_data.pop(0))
|
||||
|
||||
fileobj.writelines(lines_update)
|
36
pyzebra/utils.py
Normal file
36
pyzebra/utils.py
Normal file
@ -0,0 +1,36 @@
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
|
||||
SINQ_PATH = "/afs/psi.ch/project/sinqdata"
|
||||
ZEBRA_PROPOSALS_PATH = os.path.join(SINQ_PATH, "{year}/zebra/{proposal}")
|
||||
|
||||
|
||||
def find_proposal_path(proposal):
|
||||
for entry in os.scandir(SINQ_PATH):
|
||||
if entry.is_dir() and len(entry.name) == 4 and entry.name.isdigit():
|
||||
proposal_path = ZEBRA_PROPOSALS_PATH.format(year=entry.name, proposal=proposal)
|
||||
if os.path.isdir(proposal_path):
|
||||
# found it
|
||||
break
|
||||
else:
|
||||
raise ValueError(f"Can not find data for proposal '{proposal}'")
|
||||
|
||||
return proposal_path
|
||||
|
||||
|
||||
def parse_hkl(fileobj, data_type):
|
||||
next(fileobj)
|
||||
fields = map(str.lower, next(fileobj).strip("!").strip().split())
|
||||
next(fileobj)
|
||||
data = np.loadtxt(fileobj, unpack=True)
|
||||
res = dict(zip(fields, data))
|
||||
|
||||
# adapt to .ccl/.dat files naming convention
|
||||
res["counts"] = res.pop("f2")
|
||||
|
||||
if data_type == ".hkl":
|
||||
for ind in ("h", "k", "l"):
|
||||
res[ind] = res[ind].astype(int)
|
||||
|
||||
return res
|
295
pyzebra/xtal.py
295
pyzebra/xtal.py
@ -1,19 +1,17 @@
|
||||
import math
|
||||
|
||||
import numpy as np
|
||||
from numba import njit
|
||||
from scipy.optimize import curve_fit
|
||||
|
||||
import pyzebra
|
||||
|
||||
try:
|
||||
from matplotlib import pyplot as plt
|
||||
except ImportError:
|
||||
print("matplotlib is not available")
|
||||
|
||||
pi_r = 180 / np.pi
|
||||
IMAGE_W = 256
|
||||
IMAGE_H = 128
|
||||
|
||||
XNORM = 128
|
||||
YNORM = 64
|
||||
XPIX = 0.734
|
||||
YPIX = 1.4809
|
||||
|
||||
|
||||
@njit(cache=True)
|
||||
def z4frgn(wave, ga, nu):
|
||||
"""CALCULATES DIFFRACTION VECTOR IN LAB SYSTEM FROM GA AND NU
|
||||
|
||||
@ -25,36 +23,34 @@ def z4frgn(wave, ga, nu):
|
||||
"""
|
||||
ga_r = ga / pi_r
|
||||
nu_r = nu / pi_r
|
||||
z4 = [0.0, 0.0, 0.0]
|
||||
z4[0] = (np.sin(ga_r) * np.cos(nu_r)) / wave
|
||||
z4[1] = (np.cos(ga_r) * np.cos(nu_r) - 1.0) / wave
|
||||
z4[2] = (np.sin(nu_r)) / wave
|
||||
z4 = [np.sin(ga_r) * np.cos(nu_r), np.cos(ga_r) * np.cos(nu_r) - 1.0, np.sin(nu_r)]
|
||||
|
||||
return z4
|
||||
return np.array(z4) / wave
|
||||
|
||||
|
||||
@njit(cache=True)
|
||||
def phimat(phi):
|
||||
"""BUSING AND LEVY CONVENTION ROTATION MATRIX FOR PHI OR OMEGA
|
||||
def phimat_T(phi):
|
||||
"""TRANSPOSED BUSING AND LEVY CONVENTION ROTATION MATRIX FOR PHI OR OMEGA
|
||||
|
||||
Args:
|
||||
PHI
|
||||
|
||||
Returns:
|
||||
DUM
|
||||
DUM_T
|
||||
"""
|
||||
ph_r = phi / pi_r
|
||||
|
||||
dum = np.zeros(9).reshape(3, 3)
|
||||
dum = np.zeros((3, 3))
|
||||
dum[0, 0] = np.cos(ph_r)
|
||||
dum[0, 1] = np.sin(ph_r)
|
||||
dum[1, 0] = -dum[0, 1]
|
||||
dum[1, 0] = np.sin(ph_r)
|
||||
dum[0, 1] = -dum[1, 0]
|
||||
dum[1, 1] = dum[0, 0]
|
||||
dum[2, 2] = 1
|
||||
|
||||
return dum
|
||||
|
||||
|
||||
@njit(cache=True)
|
||||
def z1frnb(wave, ga, nu, om):
|
||||
"""CALCULATE DIFFRACTION VECTOR Z1 FROM GA, OM, NU, ASSUMING CH=PH=0
|
||||
|
||||
@ -65,30 +61,28 @@ def z1frnb(wave, ga, nu, om):
|
||||
Z1
|
||||
"""
|
||||
z4 = z4frgn(wave, ga, nu)
|
||||
dum = phimat(phi=om)
|
||||
dumt = np.transpose(dum)
|
||||
z3 = dumt.dot(z4)
|
||||
z3 = phimat_T(phi=om).dot(z4)
|
||||
|
||||
return z3
|
||||
|
||||
|
||||
@njit(cache=True)
|
||||
def chimat(chi):
|
||||
"""BUSING AND LEVY CONVENTION ROTATION MATRIX FOR CHI
|
||||
def chimat_T(chi):
|
||||
"""TRANSPOSED BUSING AND LEVY CONVENTION ROTATION MATRIX FOR CHI
|
||||
|
||||
Args:
|
||||
CHI
|
||||
|
||||
Returns:
|
||||
DUM
|
||||
DUM_T
|
||||
"""
|
||||
ch_r = chi / pi_r
|
||||
|
||||
dum = np.zeros(9).reshape(3, 3)
|
||||
dum = np.zeros((3, 3))
|
||||
dum[0, 0] = np.cos(ch_r)
|
||||
dum[0, 2] = np.sin(ch_r)
|
||||
dum[2, 0] = np.sin(ch_r)
|
||||
dum[1, 1] = 1
|
||||
dum[2, 0] = -dum[0, 2]
|
||||
dum[0, 2] = -dum[2, 0]
|
||||
dum[2, 2] = dum[0, 0]
|
||||
|
||||
return dum
|
||||
@ -104,13 +98,8 @@ def z1frz3(z3, chi, phi):
|
||||
Returns:
|
||||
Z1
|
||||
"""
|
||||
dum1 = chimat(chi)
|
||||
dum2 = np.transpose(dum1)
|
||||
z2 = dum2.dot(z3)
|
||||
|
||||
dum1 = phimat(phi)
|
||||
dum2 = np.transpose(dum1)
|
||||
z1 = dum2.dot(z2)
|
||||
z2 = chimat_T(chi).dot(z3)
|
||||
z1 = phimat_T(phi).dot(z2)
|
||||
|
||||
return z1
|
||||
|
||||
@ -292,185 +281,81 @@ def fixdnu(wave, z1, ch2, ph2, nu):
|
||||
return ch, ph, ga, om
|
||||
|
||||
|
||||
# for test run:
|
||||
# angtohkl(wave=1.18,ddist=616,gammad=48.66,om=-22.80,ch=0,ph=0,nud=0,x=128,y=64)
|
||||
|
||||
|
||||
def angtohkl(wave, ddist, gammad, om, ch, ph, nud, x, y):
|
||||
"""finds hkl-indices of a reflection from its position (x,y,angles) at the 2d-detector
|
||||
|
||||
Args:
|
||||
gammad, om, ch, ph, nud, xobs, yobs
|
||||
|
||||
Returns:
|
||||
|
||||
"""
|
||||
# define ub matrix if testing angtohkl(wave=1.18,ddist=616,gammad=48.66,om=-22.80,ch=0,ph=0,nud=0,x=128,y=64) against f90:
|
||||
# ub = np.array([-0.0178803,-0.0749231,0.0282804,-0.0070082,-0.0368001,-0.0577467,0.1609116,-0.0099281,0.0006274]).reshape(3,3)
|
||||
ub = np.array(
|
||||
[0.04489, 0.02045, -0.2334, -0.06447, 0.00129, -0.16356, -0.00328, 0.2542, 0.0196]
|
||||
).reshape(3, 3)
|
||||
print(
|
||||
"The input values are: ga=",
|
||||
gammad,
|
||||
", om=",
|
||||
om,
|
||||
", ch=",
|
||||
ch,
|
||||
", ph=",
|
||||
ph,
|
||||
", nu=",
|
||||
nud,
|
||||
", x=",
|
||||
x,
|
||||
", y=",
|
||||
y,
|
||||
)
|
||||
|
||||
ga, nu = det2pol(ddist, gammad, nud, x, y)
|
||||
|
||||
print(
|
||||
"The calculated actual angles are: ga=",
|
||||
ga,
|
||||
", om=",
|
||||
om,
|
||||
", ch=",
|
||||
ch,
|
||||
", ph=",
|
||||
ph,
|
||||
", nu=",
|
||||
nu,
|
||||
)
|
||||
|
||||
z1 = z1frmd(wave, ga, om, ch, ph, nu)
|
||||
|
||||
print("The diffraction vector is:", z1[0], z1[1], z1[2])
|
||||
|
||||
ubinv = np.linalg.inv(ub)
|
||||
|
||||
h = ubinv[0, 0] * z1[0] + ubinv[0, 1] * z1[1] + ubinv[0, 2] * z1[2]
|
||||
k = ubinv[1, 0] * z1[0] + ubinv[1, 1] * z1[1] + ubinv[1, 2] * z1[2]
|
||||
l = ubinv[2, 0] * z1[0] + ubinv[2, 1] * z1[1] + ubinv[2, 2] * z1[2]
|
||||
|
||||
print("The Miller indexes are:", h, k, l)
|
||||
|
||||
ch2, ph2 = eqchph(z1)
|
||||
ch, ph, ga, om = fixdnu(wave, z1, ch2, ph2, nu)
|
||||
|
||||
print(
|
||||
"Bisecting angles to put reflection into the detector center: ga=",
|
||||
ga,
|
||||
", om=",
|
||||
om,
|
||||
", ch=",
|
||||
ch,
|
||||
", ph=",
|
||||
ph,
|
||||
", nu=",
|
||||
nu,
|
||||
)
|
||||
|
||||
|
||||
def ang2hkl(wave, ddist, gammad, om, ch, ph, nud, ub, x, y):
|
||||
"""Calculate hkl-indices of a reflection from its position (x,y,angles) at the 2d-detector
|
||||
"""
|
||||
def ang2hkl(wave, ddist, gammad, om, ch, ph, nud, ub_inv, x, y):
|
||||
"""Calculate hkl-indices of a reflection from its position (x,y,angles) at the 2d-detector"""
|
||||
ga, nu = det2pol(ddist, gammad, nud, x, y)
|
||||
z1 = z1frmd(wave, ga, om, ch, ph, nu)
|
||||
ubinv = np.linalg.inv(ub)
|
||||
hkl = ubinv @ z1
|
||||
hkl = ub_inv @ z1
|
||||
|
||||
return hkl
|
||||
|
||||
|
||||
def gauss(x, *p):
|
||||
"""Defines Gaussian function
|
||||
def ang2hkl_det(wave, ddist, gammad, om, chi, phi, nud, ub_inv):
|
||||
"""Calculate hkl-indices of a reflection from its position (x,y,angles) at the 2d-detector"""
|
||||
xv, yv = np.meshgrid(range(IMAGE_W), range(IMAGE_H))
|
||||
xobs = (xv.ravel() - XNORM) * XPIX
|
||||
yobs = (yv.ravel() - YNORM) * YPIX
|
||||
|
||||
Args:
|
||||
A - amplitude, mu - position of the center, sigma - width
|
||||
a = xobs
|
||||
b = ddist * np.cos(yobs / ddist)
|
||||
z = ddist * np.sin(yobs / ddist)
|
||||
d = np.sqrt(a * a + b * b)
|
||||
|
||||
Returns:
|
||||
Gaussian function
|
||||
"""
|
||||
A, mu, sigma = p
|
||||
return A * np.exp(-((x - mu) ** 2) / (2.0 * sigma ** 2))
|
||||
gamma = gammad + np.arctan2(a, b) * pi_r
|
||||
nu = nud + np.arctan2(z, d) * pi_r
|
||||
|
||||
gamma_r = gamma / pi_r
|
||||
nu_r = nu / pi_r
|
||||
z4 = np.vstack(
|
||||
(
|
||||
np.sin(gamma_r) * np.cos(nu_r) / wave,
|
||||
(np.cos(gamma_r) * np.cos(nu_r) - 1) / wave,
|
||||
np.sin(nu_r) / wave,
|
||||
)
|
||||
)
|
||||
|
||||
om_r = om / pi_r
|
||||
dum3 = np.zeros((3, 3))
|
||||
dum3[0, 0] = np.cos(om_r)
|
||||
dum3[1, 0] = np.sin(om_r)
|
||||
dum3[0, 1] = -dum3[1, 0]
|
||||
dum3[1, 1] = dum3[0, 0]
|
||||
dum3[2, 2] = 1
|
||||
|
||||
chi_r = chi / pi_r
|
||||
dum2 = np.zeros((3, 3))
|
||||
dum2[0, 0] = np.cos(chi_r)
|
||||
dum2[2, 0] = np.sin(chi_r)
|
||||
dum2[1, 1] = 1
|
||||
dum2[0, 2] = -dum2[2, 0]
|
||||
dum2[2, 2] = dum2[0, 0]
|
||||
|
||||
phi_r = phi / pi_r
|
||||
dum1 = np.zeros((3, 3))
|
||||
dum1[0, 0] = np.cos(phi_r)
|
||||
dum1[1, 0] = np.sin(phi_r)
|
||||
dum1[0, 1] = -dum1[1, 0]
|
||||
dum1[1, 1] = dum1[0, 0]
|
||||
dum1[2, 2] = 1
|
||||
|
||||
hkl = (ub_inv @ dum1 @ dum2 @ dum3 @ z4).reshape(3, IMAGE_H, IMAGE_W)
|
||||
|
||||
return hkl
|
||||
|
||||
|
||||
def box_int(file, box):
|
||||
"""Calculates center of the peak in the NB-geometry angles and Intensity of the peak
|
||||
def ang2hkl_1d(wave, ga, om, ch, ph, nu, ub_inv):
|
||||
"""Calculate hkl-indices of a reflection from its position (angles) at the 1d-detector"""
|
||||
z1 = z1frmd(wave, ga, om, ch, ph, nu)
|
||||
hkl = ub_inv @ z1
|
||||
|
||||
Args:
|
||||
file name, box size [x0:xN, y0:yN, fr0:frN]
|
||||
return hkl
|
||||
|
||||
Returns:
|
||||
gamma, omPeak, nu polar angles, Int and data for 3 fit plots
|
||||
"""
|
||||
|
||||
dat = pyzebra.read_detector_data(file)
|
||||
def ang_proc(wave, ddist, gammad, om, ch, ph, nud, x, y):
|
||||
"""Utility function to calculate ch, ph, ga, om"""
|
||||
ga, nu = det2pol(ddist, gammad, nud, x, y)
|
||||
z1 = z1frmd(wave, ga, om, ch, ph, nu)
|
||||
ch2, ph2 = eqchph(z1)
|
||||
ch, ph, ga, om = fixdnu(wave, z1, ch2, ph2, nu)
|
||||
|
||||
sttC = dat["pol_angle"][0]
|
||||
om = dat["rot_angle"]
|
||||
nuC = dat["tlt_angle"][0]
|
||||
ddist = dat["ddist"]
|
||||
|
||||
# defining indices
|
||||
x0, xN, y0, yN, fr0, frN = box
|
||||
|
||||
# omega fit
|
||||
om = dat["rot_angle"][fr0:frN]
|
||||
cnts = np.sum(dat["data"][fr0:frN, y0:yN, x0:xN], axis=(1, 2))
|
||||
|
||||
p0 = [1.0, 0.0, 1.0]
|
||||
coeff, var_matrix = curve_fit(gauss, range(len(cnts)), cnts, p0=p0)
|
||||
|
||||
frC = fr0 + coeff[1]
|
||||
omF = dat["rot_angle"][math.floor(frC)]
|
||||
omC = dat["rot_angle"][math.ceil(frC)]
|
||||
frStep = frC - math.floor(frC)
|
||||
omStep = omC - omF
|
||||
omP = omF + omStep * frStep
|
||||
Int = coeff[1] * abs(coeff[2] * omStep) * math.sqrt(2) * math.sqrt(np.pi)
|
||||
# omega plot
|
||||
x_fit = np.linspace(0, len(cnts), 100)
|
||||
y_fit = gauss(x_fit, *coeff)
|
||||
plt.figure()
|
||||
plt.subplot(131)
|
||||
plt.plot(range(len(cnts)), cnts)
|
||||
plt.plot(x_fit, y_fit)
|
||||
plt.ylabel("Intensity in the box")
|
||||
plt.xlabel("Frame N of the box")
|
||||
label = "om"
|
||||
# gamma fit
|
||||
sliceXY = dat["data"][fr0:frN, y0:yN, x0:xN]
|
||||
sliceXZ = np.sum(sliceXY, axis=1)
|
||||
sliceYZ = np.sum(sliceXY, axis=2)
|
||||
|
||||
projX = np.sum(sliceXZ, axis=0)
|
||||
p0 = [1.0, 0.0, 1.0]
|
||||
coeff, var_matrix = curve_fit(gauss, range(len(projX)), projX, p0=p0)
|
||||
x = x0 + coeff[1]
|
||||
# gamma plot
|
||||
x_fit = np.linspace(0, len(projX), 100)
|
||||
y_fit = gauss(x_fit, *coeff)
|
||||
plt.subplot(132)
|
||||
plt.plot(range(len(projX)), projX)
|
||||
plt.plot(x_fit, y_fit)
|
||||
plt.ylabel("Intensity in the box")
|
||||
plt.xlabel("X-pixel of the box")
|
||||
|
||||
# nu fit
|
||||
projY = np.sum(sliceYZ, axis=0)
|
||||
p0 = [1.0, 0.0, 1.0]
|
||||
coeff, var_matrix = curve_fit(gauss, range(len(projY)), projY, p0=p0)
|
||||
y = y0 + coeff[1]
|
||||
# nu plot
|
||||
x_fit = np.linspace(0, len(projY), 100)
|
||||
y_fit = gauss(x_fit, *coeff)
|
||||
plt.subplot(133)
|
||||
plt.plot(range(len(projY)), projY)
|
||||
plt.plot(x_fit, y_fit)
|
||||
plt.ylabel("Intensity in the box")
|
||||
plt.xlabel("Y-pixel of the box")
|
||||
|
||||
ga, nu = pyzebra.det2pol(ddist, sttC, nuC, x, y)
|
||||
|
||||
return ga[0], omP, nu[0], Int
|
||||
return ch, ph, ga, om
|
||||
|
Loading…
x
Reference in New Issue
Block a user