Compare commits
201 Commits
Author | SHA1 | Date | |
---|---|---|---|
0c620d4a08 | |||
5d65afa194 | |||
d312c99f96 | |||
526821e073 | |||
d606230feb | |||
ce5cff05a7 | |||
a738f74f88 | |||
4785c97f33 | |||
e68fedc7e5 | |||
2581996625 | |||
d014849abd | |||
aa6e668f97 | |||
85c74c39f0 | |||
0e97f44cc7 | |||
9c5f7e6284 | |||
167e5136b1 | |||
21d642b7d7 | |||
fe68b1de7e | |||
3291a67e7d | |||
041c5c0e8b | |||
566cebb01a | |||
a70b4fae57 | |||
97836b2906 | |||
7ee8eba007 | |||
5169e08777 | |||
ed8e6d262f | |||
426bb16792 | |||
554716fc9a | |||
69767da850 | |||
3e174f22e5 | |||
7c1e2fdf0c | |||
b61327b5f2 | |||
0e3aea142d | |||
c7b05d252f | |||
a65708004b | |||
af1336df78 | |||
966d6349df | |||
5dbb208e92 | |||
af70302bf3 | |||
8727e25a2d | |||
fac3592ab2 | |||
599e2e1e74 | |||
11fae5d47a | |||
7ee4b1c86a | |||
e76623e55e | |||
794d5c49d4 | |||
0b4f4b1ce9 | |||
15bd970a72 | |||
89ff0b15f5 | |||
9e3466fcaa | |||
55f3198f9d | |||
747b008d5e | |||
1a7cd2a4b5 | |||
eb4e30b7e0 | |||
82f3c53380 | |||
ac76d4b2c3 | |||
dab52d9508 | |||
b63ef90e11 | |||
d5ac2c6d56 | |||
36faad1d9d | |||
9140798c74 | |||
96eaa27e55 | |||
b418fb8300 | |||
699c5f3d11 | |||
6a822c4c85 | |||
c2e1f1def1 | |||
2b9775faed | |||
ea7d611819 | |||
9ef07cff19 | |||
12077dd5f3 | |||
3b1a2b1a0b | |||
8eee968344 | |||
97936d6c2a | |||
91d3a0ac9e | |||
4f1f03224c | |||
16a47cf3b3 | |||
e3de0f7217 | |||
983e0dab42 | |||
a6f97f59e8 | |||
bf3b44405d | |||
93a557fea9 | |||
b31c0b413c | |||
20527e8d2b | |||
e09538eaeb | |||
239949b7c0 | |||
7e6df95c49 | |||
e38993e69d | |||
6bf401aba8 | |||
0c4fffff0d | |||
2c727d34bd | |||
5e979eb9e3 | |||
ba6a99b912 | |||
315b025341 | |||
d0b67b8565 | |||
1761003d8a | |||
d42a53ed47 | |||
cd1e5c42c0 | |||
c2215e9b6d | |||
c5ec09a5e3 | |||
b0a4e35d3d | |||
db85eee329 | |||
67d0af292b | |||
241c2a9779 | |||
8c265027bf | |||
f5c405bee8 | |||
ae33ee825e | |||
f694249298 | |||
b7ac6d4359 | |||
e27ba7c711 | |||
ddaeda35c7 | |||
dac1237009 | |||
999ceba3b7 | |||
0a634ca8da | |||
1f1762063d | |||
fdb2027fd4 | |||
559e18ed5d | |||
d6202d4a6b | |||
4b3b14e4ac | |||
b4db41d672 | |||
16174f5cc4 | |||
b50783f6be | |||
f830acf70d | |||
ea41301605 | |||
6a341e5001 | |||
9bbed3b55d | |||
6bd2398f5e | |||
768dd77ef5 | |||
cf6f7a8506 | |||
654d281c49 | |||
950f76d4be | |||
238f3e4fbc | |||
826363a0f5 | |||
4822121b3b | |||
56609ad5ff | |||
a9b0a8a01d | |||
aa6bcb6c6b | |||
216de442a5 | |||
9507339c2a | |||
0c158db48f | |||
9bd959e656 | |||
65b28fffc6 | |||
0d8a30b995 | |||
c602a3df2e | |||
b19b70caae | |||
b08f3c27db | |||
e15f9c9c3e | |||
b62573fa09 | |||
f7f016cf1c | |||
8c8715b041 | |||
008761e661 | |||
4343d6e2b6 | |||
11e1a6b60c | |||
8be637a7f3 | |||
4fbfe21e99 | |||
b5b77d165a | |||
0e176cb2f3 | |||
2ba0964e07 | |||
b31f359ee7 | |||
63150a4b19 | |||
8d779b11f6 | |||
b2d603b3c5 | |||
2ddb0a668a | |||
de81f2fd9f | |||
b2fc2d604a | |||
78096efcef | |||
5b0f97959e | |||
8fb1c5f247 | |||
58641ab94f | |||
a6bcb8ffa1 | |||
7b6e6bf396 | |||
45f295fcf8 | |||
3c58fd2102 | |||
abbaded278 | |||
fbe992c901 | |||
dec282d1b7 | |||
4429823629 | |||
80fddb514a | |||
cfe9832c1e | |||
fd942672df | |||
60cb733ca7 | |||
7c2ecef56d | |||
468f33e606 | |||
dbc643aba9 | |||
0856705024 | |||
ce608f1b49 | |||
3eaf54eda3 | |||
a496267a9d | |||
1a3ebfbcbd | |||
7bcb23c1bd | |||
b28fe39bbb | |||
42c6e6b921 | |||
dba2dc6149 | |||
a0c9b0162b | |||
00b0c2d708 | |||
4ae8890bb8 | |||
430ffc2caa | |||
aee5c82925 | |||
7e16ea0fea | |||
6ff1b2b54f | |||
6099df650b | |||
0347566aeb |
25
.github/workflows/deployment.yaml
vendored
Normal file
25
.github/workflows/deployment.yaml
vendored
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
name: Deployment
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
publish-conda-package:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Prepare
|
||||||
|
run: |
|
||||||
|
$CONDA/bin/conda install --quiet --yes conda-build anaconda-client
|
||||||
|
$CONDA/bin/conda config --append channels conda-forge
|
||||||
|
$CONDA/bin/conda config --set anaconda_upload yes
|
||||||
|
|
||||||
|
- name: Build and upload
|
||||||
|
env:
|
||||||
|
ANACONDA_TOKEN: ${{ secrets.ANACONDA_TOKEN }}
|
||||||
|
run: |
|
||||||
|
$CONDA/bin/conda build --token $ANACONDA_TOKEN conda-recipe
|
33
.travis.yml
33
.travis.yml
@ -1,33 +0,0 @@
|
|||||||
language: python
|
|
||||||
python:
|
|
||||||
- 3.6
|
|
||||||
- 3.7
|
|
||||||
- 3.8
|
|
||||||
|
|
||||||
# Build only tagged commits
|
|
||||||
if: tag IS present
|
|
||||||
|
|
||||||
before_install:
|
|
||||||
- wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh
|
|
||||||
- bash miniconda.sh -b -p $HOME/miniconda
|
|
||||||
- export PATH="$HOME/miniconda/bin:$PATH"
|
|
||||||
- conda config --append channels conda-forge
|
|
||||||
- conda config --set always_yes yes
|
|
||||||
- conda config --set anaconda_upload no
|
|
||||||
|
|
||||||
install:
|
|
||||||
- conda update -q conda
|
|
||||||
- conda install -q python=$TRAVIS_PYTHON_VERSION conda-build anaconda-client
|
|
||||||
|
|
||||||
script:
|
|
||||||
- conda build conda-recipe
|
|
||||||
|
|
||||||
deploy:
|
|
||||||
provider: script
|
|
||||||
script: anaconda -t $ANACONDA_TOKEN upload $HOME/miniconda/conda-bld/**/pyzebra-*.tar.bz2
|
|
||||||
on:
|
|
||||||
branch: master
|
|
||||||
tags: true
|
|
||||||
|
|
||||||
notifications:
|
|
||||||
email: false
|
|
2
.vscode/launch.json
vendored
2
.vscode/launch.json
vendored
@ -5,7 +5,7 @@
|
|||||||
"name": "pyzebra",
|
"name": "pyzebra",
|
||||||
"type": "python",
|
"type": "python",
|
||||||
"request": "launch",
|
"request": "launch",
|
||||||
"program": "${workspaceFolder}/pyzebra/cli.py",
|
"program": "${workspaceFolder}/pyzebra/app/cli.py",
|
||||||
"console": "internalConsole",
|
"console": "internalConsole",
|
||||||
"env": {},
|
"env": {},
|
||||||
},
|
},
|
||||||
|
2
conda-recipe/bld.bat
Normal file
2
conda-recipe/bld.bat
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
"%PYTHON%" setup.py install --single-version-externally-managed --record=record.txt
|
||||||
|
if errorlevel 1 exit 1
|
@ -8,20 +8,22 @@ source:
|
|||||||
path: ..
|
path: ..
|
||||||
|
|
||||||
build:
|
build:
|
||||||
|
noarch: python
|
||||||
number: 0
|
number: 0
|
||||||
entry_points:
|
entry_points:
|
||||||
- pyzebra = pyzebra.cli:main
|
- pyzebra = pyzebra.app.cli:main
|
||||||
|
|
||||||
requirements:
|
requirements:
|
||||||
build:
|
build:
|
||||||
- python
|
- python >=3.7
|
||||||
- setuptools
|
- setuptools
|
||||||
run:
|
run:
|
||||||
- python
|
- python >=3.7
|
||||||
- numpy
|
- numpy
|
||||||
- scipy
|
- scipy
|
||||||
- h5py
|
- h5py
|
||||||
- bokeh
|
- bokeh =2.3
|
||||||
|
- matplotlib
|
||||||
- numba
|
- numba
|
||||||
- lmfit
|
- lmfit
|
||||||
- uncertainties
|
- uncertainties
|
||||||
|
9
make_release.py
Normal file → Executable file
9
make_release.py
Normal file → Executable file
@ -3,14 +3,19 @@
|
|||||||
import argparse
|
import argparse
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
branch = subprocess.check_output("git rev-parse --abbrev-ref HEAD", shell=True).decode().strip()
|
||||||
|
if branch != "master":
|
||||||
|
print("Aborting, not on 'master' branch.")
|
||||||
|
return
|
||||||
|
|
||||||
filepath = "pyzebra/__init__.py"
|
filepath = "pyzebra/__init__.py"
|
||||||
|
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument("level", type=str, choices=["patch", "minor", "major"])
|
parser.add_argument("level", type=str, choices=["patch", "minor", "major"])
|
||||||
parser.add_argument("tag_msg", type=str, help="tag message")
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
with open(filepath) as f:
|
with open(filepath) as f:
|
||||||
@ -35,7 +40,7 @@ def main():
|
|||||||
f.write(re.sub(r'__version__ = "(.*?)"', f'__version__ = "{new_version}"', file_content))
|
f.write(re.sub(r'__version__ = "(.*?)"', f'__version__ = "{new_version}"', file_content))
|
||||||
|
|
||||||
os.system(f"git commit {filepath} -m 'Updating for version {new_version}'")
|
os.system(f"git commit {filepath} -m 'Updating for version {new_version}'")
|
||||||
os.system(f"git tag -a {new_version} -m '{args.tag_msg}'")
|
os.system(f"git tag -a {new_version} -m 'Release {new_version}'")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -1,10 +1,7 @@
|
|||||||
import pyzebra.ccl_dict_operation
|
|
||||||
from pyzebra.anatric import *
|
from pyzebra.anatric import *
|
||||||
from pyzebra.ccl_findpeaks import ccl_findpeaks
|
from pyzebra.ccl_io import *
|
||||||
from pyzebra.comm_export import export_comm
|
|
||||||
from pyzebra.fit2 import fitccl
|
|
||||||
from pyzebra.h5 import *
|
from pyzebra.h5 import *
|
||||||
from pyzebra.load_1D import load_1D, parse_1D
|
|
||||||
from pyzebra.xtal import *
|
from pyzebra.xtal import *
|
||||||
|
from pyzebra.ccl_process import *
|
||||||
|
|
||||||
__version__ = "0.1.0"
|
__version__ = "0.3.0"
|
||||||
|
@ -2,7 +2,6 @@ import subprocess
|
|||||||
import xml.etree.ElementTree as ET
|
import xml.etree.ElementTree as ET
|
||||||
|
|
||||||
|
|
||||||
ANATRIC_PATH = "/afs/psi.ch/project/sinq/rhel7/bin/anatric"
|
|
||||||
DATA_FACTORY_IMPLEMENTATION = [
|
DATA_FACTORY_IMPLEMENTATION = [
|
||||||
"trics",
|
"trics",
|
||||||
"morph",
|
"morph",
|
||||||
@ -24,8 +23,8 @@ REFLECTION_PRINTER_FORMATS = [
|
|||||||
ALGORITHMS = ["adaptivemaxcog", "adaptivedynamic"]
|
ALGORITHMS = ["adaptivemaxcog", "adaptivedynamic"]
|
||||||
|
|
||||||
|
|
||||||
def anatric(config_file):
|
def anatric(config_file, anatric_path="/afs/psi.ch/project/sinq/rhel7/bin/anatric"):
|
||||||
subprocess.run([ANATRIC_PATH, config_file], check=True)
|
subprocess.run([anatric_path, config_file], check=True)
|
||||||
|
|
||||||
|
|
||||||
class AnatricConfig:
|
class AnatricConfig:
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
import argparse
|
|
||||||
import logging
|
import logging
|
||||||
import sys
|
import sys
|
||||||
from io import StringIO
|
from io import StringIO
|
||||||
@ -10,15 +9,11 @@ from bokeh.models import Tabs, TextAreaInput
|
|||||||
import panel_ccl_integrate
|
import panel_ccl_integrate
|
||||||
import panel_hdf_anatric
|
import panel_hdf_anatric
|
||||||
import panel_hdf_viewer
|
import panel_hdf_viewer
|
||||||
|
import panel_param_study
|
||||||
|
import panel_spind
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
prog="pyzebra", formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
|
||||||
)
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
doc = curdoc()
|
doc = curdoc()
|
||||||
doc.title = "pyzebra"
|
|
||||||
|
|
||||||
sys.stdout = StringIO()
|
sys.stdout = StringIO()
|
||||||
stdout_textareainput = TextAreaInput(title="print output:", height=150)
|
stdout_textareainput = TextAreaInput(title="print output:", height=150)
|
||||||
@ -26,7 +21,7 @@ stdout_textareainput = TextAreaInput(title="print output:", height=150)
|
|||||||
bokeh_stream = StringIO()
|
bokeh_stream = StringIO()
|
||||||
bokeh_handler = logging.StreamHandler(bokeh_stream)
|
bokeh_handler = logging.StreamHandler(bokeh_stream)
|
||||||
bokeh_handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
|
bokeh_handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
|
||||||
bokeh_logger = logging.getLogger('bokeh')
|
bokeh_logger = logging.getLogger("bokeh")
|
||||||
bokeh_logger.addHandler(bokeh_handler)
|
bokeh_logger.addHandler(bokeh_handler)
|
||||||
bokeh_log_textareainput = TextAreaInput(title="server output:", height=150)
|
bokeh_log_textareainput = TextAreaInput(title="server output:", height=150)
|
||||||
|
|
||||||
@ -34,10 +29,12 @@ bokeh_log_textareainput = TextAreaInput(title="server output:", height=150)
|
|||||||
tab_hdf_viewer = panel_hdf_viewer.create()
|
tab_hdf_viewer = panel_hdf_viewer.create()
|
||||||
tab_hdf_anatric = panel_hdf_anatric.create()
|
tab_hdf_anatric = panel_hdf_anatric.create()
|
||||||
tab_ccl_integrate = panel_ccl_integrate.create()
|
tab_ccl_integrate = panel_ccl_integrate.create()
|
||||||
|
tab_param_study = panel_param_study.create()
|
||||||
|
tab_spind = panel_spind.create()
|
||||||
|
|
||||||
doc.add_root(
|
doc.add_root(
|
||||||
column(
|
column(
|
||||||
Tabs(tabs=[tab_hdf_viewer, tab_hdf_anatric, tab_ccl_integrate]),
|
Tabs(tabs=[tab_hdf_viewer, tab_hdf_anatric, tab_ccl_integrate, tab_param_study, tab_spind]),
|
||||||
row(stdout_textareainput, bokeh_log_textareainput, sizing_mode="scale_both"),
|
row(stdout_textareainput, bokeh_log_textareainput, sizing_mode="scale_both"),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
@ -6,6 +6,8 @@ from bokeh.application.application import Application
|
|||||||
from bokeh.application.handlers import ScriptHandler
|
from bokeh.application.handlers import ScriptHandler
|
||||||
from bokeh.server.server import Server
|
from bokeh.server.server import Server
|
||||||
|
|
||||||
|
from pyzebra.app.handler import PyzebraHandler
|
||||||
|
|
||||||
logging.basicConfig(format="%(asctime)s %(message)s", level=logging.INFO)
|
logging.basicConfig(format="%(asctime)s %(message)s", level=logging.INFO)
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -16,7 +18,7 @@ def main():
|
|||||||
This is a wrapper around a bokeh server that provides an interface to launch the application,
|
This is a wrapper around a bokeh server that provides an interface to launch the application,
|
||||||
bundled with the pyzebra package.
|
bundled with the pyzebra package.
|
||||||
"""
|
"""
|
||||||
app_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "app", "app.py")
|
app_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "app.py")
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
prog="pyzebra", formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
prog="pyzebra", formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
||||||
@ -35,6 +37,13 @@ def main():
|
|||||||
help="hostname that can connect to the server websocket",
|
help="hostname that can connect to the server websocket",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--anatric-path",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="path to anatric executable",
|
||||||
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--args",
|
"--args",
|
||||||
nargs=argparse.REMAINDER,
|
nargs=argparse.REMAINDER,
|
||||||
@ -46,9 +55,10 @@ def main():
|
|||||||
|
|
||||||
logger.info(app_path)
|
logger.info(app_path)
|
||||||
|
|
||||||
|
pyzebra_handler = PyzebraHandler(args.anatric_path)
|
||||||
handler = ScriptHandler(filename=app_path, argv=args.args)
|
handler = ScriptHandler(filename=app_path, argv=args.args)
|
||||||
server = Server(
|
server = Server(
|
||||||
{"/": Application(handler)},
|
{"/": Application(pyzebra_handler, handler)},
|
||||||
port=args.port,
|
port=args.port,
|
||||||
allow_websocket_origin=args.allow_websocket_origin,
|
allow_websocket_origin=args.allow_websocket_origin,
|
||||||
)
|
)
|
30
pyzebra/app/handler.py
Normal file
30
pyzebra/app/handler.py
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
from bokeh.application.handlers import Handler
|
||||||
|
|
||||||
|
|
||||||
|
class PyzebraHandler(Handler):
|
||||||
|
"""Provides a mechanism for generic bokeh applications to build up new streamvis documents.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, anatric_path):
|
||||||
|
"""Initialize a pyzebra handler for bokeh applications.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
args (Namespace): Command line parsed arguments.
|
||||||
|
"""
|
||||||
|
super().__init__() # no-op
|
||||||
|
|
||||||
|
self.anatric_path = anatric_path
|
||||||
|
|
||||||
|
def modify_document(self, doc):
|
||||||
|
"""Modify an application document with pyzebra specific features.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
doc (Document) : A bokeh Document to update in-place
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Document
|
||||||
|
"""
|
||||||
|
doc.title = "pyzebra"
|
||||||
|
doc.anatric_path = self.anatric_path
|
||||||
|
|
||||||
|
return doc
|
@ -2,25 +2,33 @@ import base64
|
|||||||
import io
|
import io
|
||||||
import os
|
import os
|
||||||
import tempfile
|
import tempfile
|
||||||
|
import types
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from bokeh.layouts import column, row
|
from bokeh.layouts import column, row
|
||||||
from bokeh.models import (
|
from bokeh.models import (
|
||||||
Asterisk,
|
|
||||||
BasicTicker,
|
BasicTicker,
|
||||||
Button,
|
Button,
|
||||||
|
CheckboxEditor,
|
||||||
ColumnDataSource,
|
ColumnDataSource,
|
||||||
CustomJS,
|
CustomJS,
|
||||||
DataRange1d,
|
DataRange1d,
|
||||||
DataTable,
|
DataTable,
|
||||||
Div,
|
Div,
|
||||||
|
Dropdown,
|
||||||
FileInput,
|
FileInput,
|
||||||
Grid,
|
Grid,
|
||||||
|
Legend,
|
||||||
Line,
|
Line,
|
||||||
LinearAxis,
|
LinearAxis,
|
||||||
|
MultiLine,
|
||||||
|
MultiSelect,
|
||||||
|
NumberEditor,
|
||||||
Panel,
|
Panel,
|
||||||
|
PanTool,
|
||||||
Plot,
|
Plot,
|
||||||
RadioButtonGroup,
|
RadioButtonGroup,
|
||||||
|
ResetTool,
|
||||||
Scatter,
|
Scatter,
|
||||||
Select,
|
Select,
|
||||||
Spacer,
|
Spacer,
|
||||||
@ -30,25 +38,28 @@ from bokeh.models import (
|
|||||||
TextAreaInput,
|
TextAreaInput,
|
||||||
TextInput,
|
TextInput,
|
||||||
Toggle,
|
Toggle,
|
||||||
|
WheelZoomTool,
|
||||||
Whisker,
|
Whisker,
|
||||||
)
|
)
|
||||||
|
|
||||||
import pyzebra
|
import pyzebra
|
||||||
|
from pyzebra.ccl_io import AREA_METHODS
|
||||||
|
|
||||||
|
|
||||||
javaScript = """
|
javaScript = """
|
||||||
setTimeout(function() {
|
for (let i = 0; i < js_data.data['fname'].length; i++) {
|
||||||
const filename = 'output' + js_data.data['ext']
|
if (js_data.data['content'][i] === "") continue;
|
||||||
const blob = new Blob([js_data.data['cont']], {type: 'text/plain'})
|
|
||||||
|
const blob = new Blob([js_data.data['content'][i]], {type: 'text/plain'})
|
||||||
const link = document.createElement('a');
|
const link = document.createElement('a');
|
||||||
document.body.appendChild(link);
|
document.body.appendChild(link);
|
||||||
const url = window.URL.createObjectURL(blob);
|
const url = window.URL.createObjectURL(blob);
|
||||||
link.href = url;
|
link.href = url;
|
||||||
link.download = filename;
|
link.download = js_data.data['fname'][i];
|
||||||
link.click();
|
link.click();
|
||||||
window.URL.revokeObjectURL(url);
|
window.URL.revokeObjectURL(url);
|
||||||
document.body.removeChild(link);
|
document.body.removeChild(link);
|
||||||
}, 500);
|
}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
PROPOSAL_PATH = "/afs/psi.ch/project/sinqdata/2020/zebra/"
|
PROPOSAL_PATH = "/afs/psi.ch/project/sinqdata/2020/zebra/"
|
||||||
@ -56,388 +67,404 @@ PROPOSAL_PATH = "/afs/psi.ch/project/sinqdata/2020/zebra/"
|
|||||||
|
|
||||||
def create():
|
def create():
|
||||||
det_data = {}
|
det_data = {}
|
||||||
peak_pos_textinput_lock = False
|
fit_params = {}
|
||||||
js_data = ColumnDataSource(data=dict(cont=[], ext=[]))
|
js_data = ColumnDataSource(data=dict(content=["", ""], fname=["", ""]))
|
||||||
|
|
||||||
def proposal_textinput_callback(_attr, _old, new):
|
def proposal_textinput_callback(_attr, _old, new):
|
||||||
ccl_path = os.path.join(PROPOSAL_PATH, new)
|
ccl_path = os.path.join(PROPOSAL_PATH, new.strip())
|
||||||
ccl_file_list = []
|
ccl_file_list = []
|
||||||
for file in os.listdir(ccl_path):
|
for file in os.listdir(ccl_path):
|
||||||
if file.endswith(".ccl"):
|
if file.endswith((".ccl", ".dat")):
|
||||||
ccl_file_list.append((os.path.join(ccl_path, file), file))
|
ccl_file_list.append((os.path.join(ccl_path, file), file))
|
||||||
ccl_file_select.options = ccl_file_list
|
file_select.options = ccl_file_list
|
||||||
ccl_file_select.value = ccl_file_list[0][0]
|
|
||||||
|
|
||||||
proposal_textinput = TextInput(title="Enter proposal number:", default_size=145)
|
proposal_textinput = TextInput(title="Proposal number:", default_size=145)
|
||||||
proposal_textinput.on_change("value", proposal_textinput_callback)
|
proposal_textinput.on_change("value", proposal_textinput_callback)
|
||||||
|
|
||||||
def ccl_file_select_callback(_attr, _old, new):
|
def _init_datatable():
|
||||||
nonlocal det_data
|
scan_list = [s["idx"] for s in det_data]
|
||||||
with open(new) as file:
|
hkl = [f'{s["h"]} {s["k"]} {s["l"]}' for s in det_data]
|
||||||
_, ext = os.path.splitext(new)
|
export = [s.get("active", True) for s in det_data]
|
||||||
det_data = pyzebra.parse_1D(file, ext)
|
|
||||||
|
|
||||||
scan_list = list(det_data["scan"].keys())
|
|
||||||
hkl = [
|
|
||||||
f'{int(m["h_index"])} {int(m["k_index"])} {int(m["l_index"])}'
|
|
||||||
for m in det_data["scan"].values()
|
|
||||||
]
|
|
||||||
scan_table_source.data.update(
|
scan_table_source.data.update(
|
||||||
scan=scan_list, hkl=hkl, peaks=[0] * len(scan_list), fit=[0] * len(scan_list)
|
scan=scan_list, hkl=hkl, fit=[0] * len(scan_list), export=export,
|
||||||
)
|
)
|
||||||
scan_table_source.selected.indices = []
|
scan_table_source.selected.indices = []
|
||||||
scan_table_source.selected.indices = [0]
|
scan_table_source.selected.indices = [0]
|
||||||
|
|
||||||
ccl_file_select = Select(title="Available .ccl files")
|
merge_options = [(str(i), f"{i} ({idx})") for i, idx in enumerate(scan_list)]
|
||||||
ccl_file_select.on_change("value", ccl_file_select_callback)
|
merge_source_select.options = merge_options
|
||||||
|
merge_source_select.value = merge_options[0][0]
|
||||||
|
merge_dest_select.options = merge_options
|
||||||
|
merge_dest_select.value = merge_options[0][0]
|
||||||
|
|
||||||
|
def ccl_file_select_callback(_attr, _old, _new):
|
||||||
|
pass
|
||||||
|
|
||||||
|
file_select = MultiSelect(title="Available .ccl/.dat files:", default_size=200, height=250)
|
||||||
|
file_select.on_change("value", ccl_file_select_callback)
|
||||||
|
|
||||||
|
def file_open_button_callback():
|
||||||
|
nonlocal det_data
|
||||||
|
det_data = []
|
||||||
|
for f_name in file_select.value:
|
||||||
|
with open(f_name) as file:
|
||||||
|
base, ext = os.path.splitext(f_name)
|
||||||
|
if det_data:
|
||||||
|
append_data = pyzebra.parse_1D(file, ext)
|
||||||
|
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
|
||||||
|
pyzebra.merge_datasets(det_data, append_data)
|
||||||
|
else:
|
||||||
|
det_data = pyzebra.parse_1D(file, ext)
|
||||||
|
pyzebra.normalize_dataset(det_data, monitor_spinner.value)
|
||||||
|
pyzebra.merge_duplicates(det_data)
|
||||||
|
js_data.data.update(fname=[base + ".comm", base + ".incomm"])
|
||||||
|
|
||||||
|
_init_datatable()
|
||||||
|
|
||||||
|
file_open_button = Button(label="Open New", default_size=100)
|
||||||
|
file_open_button.on_click(file_open_button_callback)
|
||||||
|
|
||||||
|
def file_append_button_callback():
|
||||||
|
for f_name in file_select.value:
|
||||||
|
with open(f_name) as file:
|
||||||
|
_, ext = os.path.splitext(f_name)
|
||||||
|
append_data = pyzebra.parse_1D(file, ext)
|
||||||
|
|
||||||
|
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
|
||||||
|
pyzebra.merge_datasets(det_data, append_data)
|
||||||
|
|
||||||
|
_init_datatable()
|
||||||
|
|
||||||
|
file_append_button = Button(label="Append", default_size=100)
|
||||||
|
file_append_button.on_click(file_append_button_callback)
|
||||||
|
|
||||||
def upload_button_callback(_attr, _old, new):
|
def upload_button_callback(_attr, _old, new):
|
||||||
nonlocal det_data
|
nonlocal det_data
|
||||||
with io.StringIO(base64.b64decode(new).decode()) as file:
|
det_data = []
|
||||||
_, ext = os.path.splitext(upload_button.filename)
|
for f_str, f_name in zip(new, upload_button.filename):
|
||||||
det_data = pyzebra.parse_1D(file, ext)
|
with io.StringIO(base64.b64decode(f_str).decode()) as file:
|
||||||
|
base, ext = os.path.splitext(f_name)
|
||||||
|
if det_data:
|
||||||
|
append_data = pyzebra.parse_1D(file, ext)
|
||||||
|
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
|
||||||
|
pyzebra.merge_datasets(det_data, append_data)
|
||||||
|
else:
|
||||||
|
det_data = pyzebra.parse_1D(file, ext)
|
||||||
|
pyzebra.normalize_dataset(det_data, monitor_spinner.value)
|
||||||
|
pyzebra.merge_duplicates(det_data)
|
||||||
|
js_data.data.update(fname=[base + ".comm", base + ".incomm"])
|
||||||
|
|
||||||
scan_list = list(det_data["scan"].keys())
|
_init_datatable()
|
||||||
hkl = [
|
|
||||||
f'{int(m["h_index"])} {int(m["k_index"])} {int(m["l_index"])}'
|
|
||||||
for m in det_data["scan"].values()
|
|
||||||
]
|
|
||||||
scan_table_source.data.update(
|
|
||||||
scan=scan_list, hkl=hkl, peaks=[0] * len(scan_list), fit=[0] * len(scan_list)
|
|
||||||
)
|
|
||||||
scan_table_source.selected.indices = []
|
|
||||||
scan_table_source.selected.indices = [0]
|
|
||||||
|
|
||||||
upload_button = FileInput(accept=".ccl")
|
upload_div = Div(text="or upload new .ccl/.dat files:", margin=(5, 5, 0, 5))
|
||||||
|
upload_button = FileInput(accept=".ccl,.dat", multiple=True, default_size=200)
|
||||||
upload_button.on_change("value", upload_button_callback)
|
upload_button.on_change("value", upload_button_callback)
|
||||||
|
|
||||||
|
def append_upload_button_callback(_attr, _old, new):
|
||||||
|
for f_str, f_name in zip(new, append_upload_button.filename):
|
||||||
|
with io.StringIO(base64.b64decode(f_str).decode()) as file:
|
||||||
|
_, ext = os.path.splitext(f_name)
|
||||||
|
append_data = pyzebra.parse_1D(file, ext)
|
||||||
|
|
||||||
|
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
|
||||||
|
pyzebra.merge_datasets(det_data, append_data)
|
||||||
|
|
||||||
|
_init_datatable()
|
||||||
|
|
||||||
|
append_upload_div = Div(text="append extra files:", margin=(5, 5, 0, 5))
|
||||||
|
append_upload_button = FileInput(accept=".ccl,.dat", multiple=True, default_size=200)
|
||||||
|
append_upload_button.on_change("value", append_upload_button_callback)
|
||||||
|
|
||||||
|
def monitor_spinner_callback(_attr, old, new):
|
||||||
|
if det_data:
|
||||||
|
pyzebra.normalize_dataset(det_data, new)
|
||||||
|
_update_plot(_get_selected_scan())
|
||||||
|
|
||||||
|
monitor_spinner = Spinner(title="Monitor:", mode="int", value=100_000, low=1, width=145)
|
||||||
|
monitor_spinner.on_change("value", monitor_spinner_callback)
|
||||||
|
|
||||||
def _update_table():
|
def _update_table():
|
||||||
num_of_peaks = [scan.get("num_of_peaks", 0) for scan in det_data["scan"].values()]
|
fit_ok = [(1 if "fit" in scan else 0) for scan in det_data]
|
||||||
fit_ok = [(1 if "fit" in scan else 0) for scan in det_data["scan"].values()]
|
scan_table_source.data.update(fit=fit_ok)
|
||||||
scan_table_source.data.update(peaks=num_of_peaks, fit=fit_ok)
|
|
||||||
|
|
||||||
def _update_plot(ind):
|
def _update_plot(scan):
|
||||||
nonlocal peak_pos_textinput_lock
|
scan_motor = scan["scan_motor"]
|
||||||
peak_pos_textinput_lock = True
|
|
||||||
|
|
||||||
scan = det_data["scan"][ind]
|
|
||||||
y = scan["Counts"]
|
y = scan["Counts"]
|
||||||
x = scan["om"]
|
x = scan[scan_motor]
|
||||||
|
|
||||||
|
plot.axis[0].axis_label = scan_motor
|
||||||
plot_scatter_source.data.update(x=x, y=y, y_upper=y + np.sqrt(y), y_lower=y - np.sqrt(y))
|
plot_scatter_source.data.update(x=x, y=y, y_upper=y + np.sqrt(y), y_lower=y - np.sqrt(y))
|
||||||
|
|
||||||
num_of_peaks = scan.get("num_of_peaks")
|
|
||||||
if num_of_peaks is not None and num_of_peaks > 0:
|
|
||||||
peak_indexes = scan["peak_indexes"]
|
|
||||||
if len(peak_indexes) == 1:
|
|
||||||
peak_pos_textinput.value = str(scan["om"][peak_indexes[0]])
|
|
||||||
else:
|
|
||||||
peak_pos_textinput.value = str([scan["om"][ind] for ind in peak_indexes])
|
|
||||||
|
|
||||||
plot_peak_source.data.update(x=scan["om"][peak_indexes], y=scan["peak_heights"])
|
|
||||||
plot_line_smooth_source.data.update(x=x, y=scan["smooth_peaks"])
|
|
||||||
else:
|
|
||||||
peak_pos_textinput.value = None
|
|
||||||
plot_peak_source.data.update(x=[], y=[])
|
|
||||||
plot_line_smooth_source.data.update(x=[], y=[])
|
|
||||||
|
|
||||||
peak_pos_textinput_lock = False
|
|
||||||
|
|
||||||
fit = scan.get("fit")
|
fit = scan.get("fit")
|
||||||
if fit is not None:
|
if fit is not None:
|
||||||
plot_gauss_source.data.update(x=x, y=scan["fit"]["comps"]["gaussian"])
|
x_fit = np.linspace(x[0], x[-1], 100)
|
||||||
plot_bkg_source.data.update(x=x, y=scan["fit"]["comps"]["background"])
|
plot_fit_source.data.update(x=x_fit, y=fit.eval(x=x_fit))
|
||||||
params = fit["result"].params
|
|
||||||
fit_output_textinput.value = (
|
|
||||||
"%s \n"
|
|
||||||
"Gaussian: centre = %9.4f, sigma = %9.4f, area = %9.4f \n"
|
|
||||||
"background: slope = %9.4f, intercept = %9.4f \n"
|
|
||||||
"Int. area = %9.4f +/- %9.4f \n"
|
|
||||||
"fit area = %9.4f +/- %9.4f \n"
|
|
||||||
"ratio((fit-int)/fit) = %9.4f"
|
|
||||||
% (
|
|
||||||
ind,
|
|
||||||
params["g_cen"].value,
|
|
||||||
params["g_width"].value,
|
|
||||||
params["g_amp"].value,
|
|
||||||
params["slope"].value,
|
|
||||||
params["intercept"].value,
|
|
||||||
fit["int_area"].n,
|
|
||||||
fit["int_area"].s,
|
|
||||||
params["g_amp"].value,
|
|
||||||
params["g_amp"].stderr,
|
|
||||||
(params["g_amp"].value - fit["int_area"].n) / params["g_amp"].value,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
numfit_min, numfit_max = fit["numfit"]
|
|
||||||
if numfit_min is None:
|
|
||||||
numfit_min_span.location = None
|
|
||||||
else:
|
|
||||||
numfit_min_span.location = x[numfit_min]
|
|
||||||
|
|
||||||
if numfit_max is None:
|
x_bkg = []
|
||||||
numfit_max_span.location = None
|
y_bkg = []
|
||||||
else:
|
xs_peak = []
|
||||||
numfit_max_span.location = x[numfit_max]
|
ys_peak = []
|
||||||
|
comps = fit.eval_components(x=x_fit)
|
||||||
|
for i, model in enumerate(fit_params):
|
||||||
|
if "linear" in model:
|
||||||
|
x_bkg = x_fit
|
||||||
|
y_bkg = comps[f"f{i}_"]
|
||||||
|
|
||||||
|
elif any(val in model for val in ("gaussian", "voigt", "pvoigt")):
|
||||||
|
xs_peak.append(x_fit)
|
||||||
|
ys_peak.append(comps[f"f{i}_"])
|
||||||
|
|
||||||
|
plot_bkg_source.data.update(x=x_bkg, y=y_bkg)
|
||||||
|
plot_peak_source.data.update(xs=xs_peak, ys=ys_peak)
|
||||||
|
|
||||||
|
fit_output_textinput.value = fit.fit_report()
|
||||||
|
|
||||||
else:
|
else:
|
||||||
plot_gauss_source.data.update(x=[], y=[])
|
plot_fit_source.data.update(x=[], y=[])
|
||||||
plot_bkg_source.data.update(x=[], y=[])
|
plot_bkg_source.data.update(x=[], y=[])
|
||||||
|
plot_peak_source.data.update(xs=[], ys=[])
|
||||||
fit_output_textinput.value = ""
|
fit_output_textinput.value = ""
|
||||||
numfit_min_span.location = None
|
|
||||||
numfit_max_span.location = None
|
|
||||||
|
|
||||||
# Main plot
|
# Main plot
|
||||||
plot = Plot(
|
plot = Plot(
|
||||||
x_range=DataRange1d(),
|
x_range=DataRange1d(),
|
||||||
y_range=DataRange1d(),
|
y_range=DataRange1d(only_visible=True),
|
||||||
plot_height=400,
|
plot_height=470,
|
||||||
plot_width=700,
|
plot_width=700,
|
||||||
toolbar_location=None,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
plot.add_layout(LinearAxis(axis_label="Counts"), place="left")
|
plot.add_layout(LinearAxis(axis_label="Counts"), place="left")
|
||||||
plot.add_layout(LinearAxis(axis_label="Omega"), place="below")
|
plot.add_layout(LinearAxis(axis_label="Scan motor"), place="below")
|
||||||
|
|
||||||
plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
|
plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
|
||||||
plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
|
plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
|
||||||
|
|
||||||
plot_scatter_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
|
plot_scatter_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
|
||||||
plot.add_glyph(plot_scatter_source, Scatter(x="x", y="y", line_color="steelblue"))
|
plot_scatter = plot.add_glyph(
|
||||||
|
plot_scatter_source, Scatter(x="x", y="y", line_color="steelblue")
|
||||||
|
)
|
||||||
plot.add_layout(Whisker(source=plot_scatter_source, base="x", upper="y_upper", lower="y_lower"))
|
plot.add_layout(Whisker(source=plot_scatter_source, base="x", upper="y_upper", lower="y_lower"))
|
||||||
|
|
||||||
plot_line_smooth_source = ColumnDataSource(dict(x=[0], y=[0]))
|
plot_fit_source = ColumnDataSource(dict(x=[0], y=[0]))
|
||||||
plot.add_glyph(
|
plot_fit = plot.add_glyph(plot_fit_source, Line(x="x", y="y"))
|
||||||
plot_line_smooth_source, Line(x="x", y="y", line_color="steelblue", line_dash="dashed")
|
|
||||||
)
|
|
||||||
|
|
||||||
plot_gauss_source = ColumnDataSource(dict(x=[0], y=[0]))
|
|
||||||
plot.add_glyph(plot_gauss_source, Line(x="x", y="y", line_color="red", line_dash="dashed"))
|
|
||||||
|
|
||||||
plot_bkg_source = ColumnDataSource(dict(x=[0], y=[0]))
|
plot_bkg_source = ColumnDataSource(dict(x=[0], y=[0]))
|
||||||
plot.add_glyph(plot_bkg_source, Line(x="x", y="y", line_color="green", line_dash="dashed"))
|
plot_bkg = plot.add_glyph(
|
||||||
|
plot_bkg_source, Line(x="x", y="y", line_color="green", line_dash="dashed")
|
||||||
|
)
|
||||||
|
|
||||||
plot_peak_source = ColumnDataSource(dict(x=[], y=[]))
|
plot_peak_source = ColumnDataSource(dict(xs=[0], ys=[0]))
|
||||||
plot.add_glyph(plot_peak_source, Asterisk(x="x", y="y", size=10, line_color="red"))
|
plot_peak = plot.add_glyph(
|
||||||
|
plot_peak_source, MultiLine(xs="xs", ys="ys", line_color="red", line_dash="dashed")
|
||||||
|
)
|
||||||
|
|
||||||
numfit_min_span = Span(location=None, dimension="height", line_dash="dashed")
|
fit_from_span = Span(location=None, dimension="height", line_dash="dashed")
|
||||||
plot.add_layout(numfit_min_span)
|
plot.add_layout(fit_from_span)
|
||||||
|
|
||||||
numfit_max_span = Span(location=None, dimension="height", line_dash="dashed")
|
fit_to_span = Span(location=None, dimension="height", line_dash="dashed")
|
||||||
plot.add_layout(numfit_max_span)
|
plot.add_layout(fit_to_span)
|
||||||
|
|
||||||
|
plot.add_layout(
|
||||||
|
Legend(
|
||||||
|
items=[
|
||||||
|
("data", [plot_scatter]),
|
||||||
|
("best fit", [plot_fit]),
|
||||||
|
("peak", [plot_peak]),
|
||||||
|
("linear", [plot_bkg]),
|
||||||
|
],
|
||||||
|
location="top_left",
|
||||||
|
click_policy="hide",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
|
||||||
|
plot.toolbar.logo = None
|
||||||
|
|
||||||
# Scan select
|
# Scan select
|
||||||
def scan_table_callback(_attr, _old, new):
|
def scan_table_select_callback(_attr, old, new):
|
||||||
if new:
|
if not new:
|
||||||
_update_plot(scan_table_source.data["scan"][new[-1]])
|
# skip empty selections
|
||||||
|
return
|
||||||
|
|
||||||
scan_table_source = ColumnDataSource(dict(scan=[], hkl=[], peaks=[], fit=[]))
|
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
|
||||||
|
if len(new) > 1:
|
||||||
|
# drop selection to the previous one
|
||||||
|
scan_table_source.selected.indices = old
|
||||||
|
return
|
||||||
|
|
||||||
|
if len(old) > 1:
|
||||||
|
# skip unnecessary update caused by selection drop
|
||||||
|
return
|
||||||
|
|
||||||
|
_update_plot(det_data[new[0]])
|
||||||
|
|
||||||
|
scan_table_source = ColumnDataSource(dict(scan=[], hkl=[], fit=[], export=[]))
|
||||||
scan_table = DataTable(
|
scan_table = DataTable(
|
||||||
source=scan_table_source,
|
source=scan_table_source,
|
||||||
columns=[
|
columns=[
|
||||||
TableColumn(field="scan", title="scan"),
|
TableColumn(field="scan", title="Scan", width=50),
|
||||||
TableColumn(field="hkl", title="hkl"),
|
TableColumn(field="hkl", title="hkl", width=100),
|
||||||
TableColumn(field="peaks", title="Peaks"),
|
TableColumn(field="fit", title="Fit", width=50),
|
||||||
TableColumn(field="fit", title="Fit"),
|
TableColumn(field="export", title="Export", editor=CheckboxEditor(), width=50),
|
||||||
],
|
],
|
||||||
width=200,
|
width=310, # +60 because of the index column
|
||||||
index_position=None,
|
height=350,
|
||||||
|
autosize_mode="none",
|
||||||
|
editable=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
scan_table_source.selected.on_change("indices", scan_table_callback)
|
scan_table_source.selected.on_change("indices", scan_table_select_callback)
|
||||||
|
|
||||||
def peak_pos_textinput_callback(_attr, _old, new):
|
def _get_selected_scan():
|
||||||
if new is not None and not peak_pos_textinput_lock:
|
return det_data[scan_table_source.selected.indices[0]]
|
||||||
sel_ind = scan_table_source.selected.indices[-1]
|
|
||||||
scan_name = scan_table_source.data["scan"][sel_ind]
|
|
||||||
scan = det_data["scan"][scan_name]
|
|
||||||
|
|
||||||
scan["num_of_peaks"] = 1
|
merge_dest_select = Select(title="destination:", width=100)
|
||||||
peak_ind = (np.abs(scan["om"] - float(new))).argmin()
|
merge_source_select = Select(title="source:", width=100)
|
||||||
scan["peak_indexes"] = np.array([peak_ind], dtype=np.int64)
|
|
||||||
scan["peak_heights"] = np.array([scan["smooth_peaks"][peak_ind]])
|
|
||||||
_update_table()
|
|
||||||
_update_plot(scan_name)
|
|
||||||
|
|
||||||
peak_pos_textinput = TextInput(title="Peak position:", default_size=145)
|
def merge_button_callback():
|
||||||
peak_pos_textinput.on_change("value", peak_pos_textinput_callback)
|
scan_dest_ind = int(merge_dest_select.value)
|
||||||
|
scan_source_ind = int(merge_source_select.value)
|
||||||
|
|
||||||
peak_int_ratio_spinner = Spinner(
|
if scan_dest_ind == scan_source_ind:
|
||||||
title="Peak intensity ratio:", value=0.8, step=0.01, low=0, high=1, default_size=145
|
print("WARNING: Selected scans for merging are identical")
|
||||||
|
return
|
||||||
|
|
||||||
|
pyzebra.merge_scans(det_data[scan_dest_ind], det_data[scan_source_ind])
|
||||||
|
_update_plot(_get_selected_scan())
|
||||||
|
|
||||||
|
merge_button = Button(label="Merge scans", width=145)
|
||||||
|
merge_button.on_click(merge_button_callback)
|
||||||
|
|
||||||
|
def fit_from_spinner_callback(_attr, _old, new):
|
||||||
|
fit_from_span.location = new
|
||||||
|
|
||||||
|
fit_from_spinner = Spinner(title="Fit from:", default_size=145)
|
||||||
|
fit_from_spinner.on_change("value", fit_from_spinner_callback)
|
||||||
|
|
||||||
|
def fit_to_spinner_callback(_attr, _old, new):
|
||||||
|
fit_to_span.location = new
|
||||||
|
|
||||||
|
fit_to_spinner = Spinner(title="to:", default_size=145)
|
||||||
|
fit_to_spinner.on_change("value", fit_to_spinner_callback)
|
||||||
|
|
||||||
|
def fitparams_add_dropdown_callback(click):
|
||||||
|
# bokeh requires (str, str) for MultiSelect options
|
||||||
|
new_tag = f"{click.item}-{fitparams_select.tags[0]}"
|
||||||
|
fitparams_select.options.append((new_tag, click.item))
|
||||||
|
fit_params[new_tag] = fitparams_factory(click.item)
|
||||||
|
fitparams_select.tags[0] += 1
|
||||||
|
|
||||||
|
fitparams_add_dropdown = Dropdown(
|
||||||
|
label="Add fit function",
|
||||||
|
menu=[
|
||||||
|
("Linear", "linear"),
|
||||||
|
("Gaussian", "gaussian"),
|
||||||
|
("Voigt", "voigt"),
|
||||||
|
("Pseudo Voigt", "pvoigt"),
|
||||||
|
# ("Pseudo Voigt1", "pseudovoigt1"),
|
||||||
|
],
|
||||||
|
default_size=145,
|
||||||
|
disabled=True,
|
||||||
)
|
)
|
||||||
peak_prominence_spinner = Spinner(title="Peak prominence:", value=50, low=0, default_size=145)
|
fitparams_add_dropdown.on_click(fitparams_add_dropdown_callback)
|
||||||
smooth_toggle = Toggle(label="Smooth curve", default_size=145)
|
|
||||||
window_size_spinner = Spinner(title="Window size:", value=7, step=2, low=1, default_size=145)
|
|
||||||
poly_order_spinner = Spinner(title="Poly order:", value=3, low=0, default_size=145)
|
|
||||||
|
|
||||||
centre_guess = Spinner(default_size=100)
|
def fitparams_select_callback(_attr, old, new):
|
||||||
centre_vary = Toggle(default_size=100, active=True)
|
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
|
||||||
centre_min = Spinner(default_size=100)
|
if len(new) > 1:
|
||||||
centre_max = Spinner(default_size=100)
|
# drop selection to the previous one
|
||||||
sigma_guess = Spinner(default_size=100)
|
fitparams_select.value = old
|
||||||
sigma_vary = Toggle(default_size=100, active=True)
|
return
|
||||||
sigma_min = Spinner(default_size=100)
|
|
||||||
sigma_max = Spinner(default_size=100)
|
|
||||||
ampl_guess = Spinner(default_size=100)
|
|
||||||
ampl_vary = Toggle(default_size=100, active=True)
|
|
||||||
ampl_min = Spinner(default_size=100)
|
|
||||||
ampl_max = Spinner(default_size=100)
|
|
||||||
slope_guess = Spinner(default_size=100)
|
|
||||||
slope_vary = Toggle(default_size=100, active=True)
|
|
||||||
slope_min = Spinner(default_size=100)
|
|
||||||
slope_max = Spinner(default_size=100)
|
|
||||||
offset_guess = Spinner(default_size=100)
|
|
||||||
offset_vary = Toggle(default_size=100, active=True)
|
|
||||||
offset_min = Spinner(default_size=100)
|
|
||||||
offset_max = Spinner(default_size=100)
|
|
||||||
integ_from = Spinner(title="Integrate from:", default_size=145)
|
|
||||||
integ_to = Spinner(title="to:", default_size=145)
|
|
||||||
|
|
||||||
def fitparam_reset_button_callback():
|
if len(old) > 1:
|
||||||
centre_guess.value = None
|
# skip unnecessary update caused by selection drop
|
||||||
centre_vary.active = True
|
return
|
||||||
centre_min.value = None
|
|
||||||
centre_max.value = None
|
|
||||||
sigma_guess.value = None
|
|
||||||
sigma_vary.active = True
|
|
||||||
sigma_min.value = None
|
|
||||||
sigma_max.value = None
|
|
||||||
ampl_guess.value = None
|
|
||||||
ampl_vary.active = True
|
|
||||||
ampl_min.value = None
|
|
||||||
ampl_max.value = None
|
|
||||||
slope_guess.value = None
|
|
||||||
slope_vary.active = True
|
|
||||||
slope_min.value = None
|
|
||||||
slope_max.value = None
|
|
||||||
offset_guess.value = None
|
|
||||||
offset_vary.active = True
|
|
||||||
offset_min.value = None
|
|
||||||
offset_max.value = None
|
|
||||||
integ_from.value = None
|
|
||||||
integ_to.value = None
|
|
||||||
|
|
||||||
fitparam_reset_button = Button(label="Reset to defaults", default_size=145)
|
if new:
|
||||||
fitparam_reset_button.on_click(fitparam_reset_button_callback)
|
fitparams_table_source.data.update(fit_params[new[0]])
|
||||||
|
else:
|
||||||
|
fitparams_table_source.data.update(dict(param=[], value=[], vary=[], min=[], max=[]))
|
||||||
|
|
||||||
fit_output_textinput = TextAreaInput(title="Fit results:", width=450, height=400)
|
fitparams_select = MultiSelect(options=[], height=120, default_size=145)
|
||||||
|
fitparams_select.tags = [0]
|
||||||
|
fitparams_select.on_change("value", fitparams_select_callback)
|
||||||
|
|
||||||
def peakfind_all_button_callback():
|
def fitparams_remove_button_callback():
|
||||||
for scan in det_data["scan"].values():
|
if fitparams_select.value:
|
||||||
pyzebra.ccl_findpeaks(
|
sel_tag = fitparams_select.value[0]
|
||||||
scan,
|
del fit_params[sel_tag]
|
||||||
int_threshold=peak_int_ratio_spinner.value,
|
for elem in fitparams_select.options:
|
||||||
prominence=peak_prominence_spinner.value,
|
if elem[0] == sel_tag:
|
||||||
smooth=smooth_toggle.active,
|
fitparams_select.options.remove(elem)
|
||||||
window_size=window_size_spinner.value,
|
break
|
||||||
poly_order=poly_order_spinner.value,
|
|
||||||
)
|
|
||||||
|
|
||||||
_update_table()
|
fitparams_select.value = []
|
||||||
|
|
||||||
sel_ind = scan_table_source.selected.indices[-1]
|
fitparams_remove_button = Button(label="Remove fit function", default_size=145, disabled=True)
|
||||||
_update_plot(scan_table_source.data["scan"][sel_ind])
|
fitparams_remove_button.on_click(fitparams_remove_button_callback)
|
||||||
|
|
||||||
peakfind_all_button = Button(label="Peak Find All", button_type="primary", default_size=145)
|
def fitparams_factory(function):
|
||||||
peakfind_all_button.on_click(peakfind_all_button_callback)
|
if function == "linear":
|
||||||
|
params = ["slope", "intercept"]
|
||||||
|
elif function == "gaussian":
|
||||||
|
params = ["amplitude", "center", "sigma"]
|
||||||
|
elif function == "voigt":
|
||||||
|
params = ["amplitude", "center", "sigma", "gamma"]
|
||||||
|
elif function == "pvoigt":
|
||||||
|
params = ["amplitude", "center", "sigma", "fraction"]
|
||||||
|
elif function == "pseudovoigt1":
|
||||||
|
params = ["amplitude", "center", "g_sigma", "l_sigma", "fraction"]
|
||||||
|
else:
|
||||||
|
raise ValueError("Unknown fit function")
|
||||||
|
|
||||||
def peakfind_button_callback():
|
n = len(params)
|
||||||
sel_ind = scan_table_source.selected.indices[-1]
|
fitparams = dict(
|
||||||
scan = scan_table_source.data["scan"][sel_ind]
|
param=params, value=[None] * n, vary=[True] * n, min=[None] * n, max=[None] * n,
|
||||||
pyzebra.ccl_findpeaks(
|
|
||||||
det_data["scan"][scan],
|
|
||||||
int_threshold=peak_int_ratio_spinner.value,
|
|
||||||
prominence=peak_prominence_spinner.value,
|
|
||||||
smooth=smooth_toggle.active,
|
|
||||||
window_size=window_size_spinner.value,
|
|
||||||
poly_order=poly_order_spinner.value,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
_update_table()
|
return fitparams
|
||||||
_update_plot(scan)
|
|
||||||
|
|
||||||
peakfind_button = Button(label="Peak Find Current", default_size=145)
|
fitparams_table_source = ColumnDataSource(dict(param=[], value=[], vary=[], min=[], max=[]))
|
||||||
peakfind_button.on_click(peakfind_button_callback)
|
fitparams_table = DataTable(
|
||||||
|
source=fitparams_table_source,
|
||||||
|
columns=[
|
||||||
|
TableColumn(field="param", title="Parameter"),
|
||||||
|
TableColumn(field="value", title="Value", editor=NumberEditor()),
|
||||||
|
TableColumn(field="vary", title="Vary", editor=CheckboxEditor()),
|
||||||
|
TableColumn(field="min", title="Min", editor=NumberEditor()),
|
||||||
|
TableColumn(field="max", title="Max", editor=NumberEditor()),
|
||||||
|
],
|
||||||
|
height=200,
|
||||||
|
width=350,
|
||||||
|
index_position=None,
|
||||||
|
editable=True,
|
||||||
|
auto_edit=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# start with `background` and `gauss` fit functions added
|
||||||
|
fitparams_add_dropdown_callback(types.SimpleNamespace(item="linear"))
|
||||||
|
fitparams_add_dropdown_callback(types.SimpleNamespace(item="gaussian"))
|
||||||
|
fitparams_select.value = ["gaussian-1"] # add selection to gauss
|
||||||
|
|
||||||
|
fit_output_textinput = TextAreaInput(title="Fit results:", width=750, height=200)
|
||||||
|
|
||||||
def fit_all_button_callback():
|
def fit_all_button_callback():
|
||||||
for scan in det_data["scan"].values():
|
for scan, export in zip(det_data, scan_table_source.data["export"]):
|
||||||
pyzebra.fitccl(
|
if export:
|
||||||
scan,
|
pyzebra.fit_scan(
|
||||||
guess=[
|
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
|
||||||
centre_guess.value,
|
)
|
||||||
sigma_guess.value,
|
|
||||||
ampl_guess.value,
|
|
||||||
slope_guess.value,
|
|
||||||
offset_guess.value,
|
|
||||||
],
|
|
||||||
vary=[
|
|
||||||
centre_vary.active,
|
|
||||||
sigma_vary.active,
|
|
||||||
ampl_vary.active,
|
|
||||||
slope_vary.active,
|
|
||||||
offset_vary.active,
|
|
||||||
],
|
|
||||||
constraints_min=[
|
|
||||||
centre_min.value,
|
|
||||||
sigma_min.value,
|
|
||||||
ampl_min.value,
|
|
||||||
slope_min.value,
|
|
||||||
offset_min.value,
|
|
||||||
],
|
|
||||||
constraints_max=[
|
|
||||||
centre_max.value,
|
|
||||||
sigma_max.value,
|
|
||||||
ampl_max.value,
|
|
||||||
slope_max.value,
|
|
||||||
offset_max.value,
|
|
||||||
],
|
|
||||||
numfit_min=integ_from.value,
|
|
||||||
numfit_max=integ_to.value,
|
|
||||||
)
|
|
||||||
|
|
||||||
sel_ind = scan_table_source.selected.indices[-1]
|
_update_plot(_get_selected_scan())
|
||||||
_update_plot(scan_table_source.data["scan"][sel_ind])
|
|
||||||
_update_table()
|
_update_table()
|
||||||
|
|
||||||
fit_all_button = Button(label="Fit All", button_type="primary", default_size=145)
|
fit_all_button = Button(label="Fit All", button_type="primary", default_size=145)
|
||||||
fit_all_button.on_click(fit_all_button_callback)
|
fit_all_button.on_click(fit_all_button_callback)
|
||||||
|
|
||||||
def fit_button_callback():
|
def fit_button_callback():
|
||||||
sel_ind = scan_table_source.selected.indices[-1]
|
scan = _get_selected_scan()
|
||||||
scan = scan_table_source.data["scan"][sel_ind]
|
pyzebra.fit_scan(
|
||||||
|
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
|
||||||
pyzebra.fitccl(
|
|
||||||
det_data["scan"][scan],
|
|
||||||
guess=[
|
|
||||||
centre_guess.value,
|
|
||||||
sigma_guess.value,
|
|
||||||
ampl_guess.value,
|
|
||||||
slope_guess.value,
|
|
||||||
offset_guess.value,
|
|
||||||
],
|
|
||||||
vary=[
|
|
||||||
centre_vary.active,
|
|
||||||
sigma_vary.active,
|
|
||||||
ampl_vary.active,
|
|
||||||
slope_vary.active,
|
|
||||||
offset_vary.active,
|
|
||||||
],
|
|
||||||
constraints_min=[
|
|
||||||
centre_min.value,
|
|
||||||
sigma_min.value,
|
|
||||||
ampl_min.value,
|
|
||||||
slope_min.value,
|
|
||||||
offset_min.value,
|
|
||||||
],
|
|
||||||
constraints_max=[
|
|
||||||
centre_max.value,
|
|
||||||
sigma_max.value,
|
|
||||||
ampl_max.value,
|
|
||||||
slope_max.value,
|
|
||||||
offset_max.value,
|
|
||||||
],
|
|
||||||
numfit_min=integ_from.value,
|
|
||||||
numfit_max=integ_to.value,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
_update_plot(scan)
|
_update_plot(scan)
|
||||||
@ -446,103 +473,95 @@ def create():
|
|||||||
fit_button = Button(label="Fit Current", default_size=145)
|
fit_button = Button(label="Fit Current", default_size=145)
|
||||||
fit_button.on_click(fit_button_callback)
|
fit_button.on_click(fit_button_callback)
|
||||||
|
|
||||||
def area_method_radiobutton_callback(_attr, _old, new):
|
|
||||||
det_data["meta"]["area_method"] = ("fit", "integ")[new]
|
|
||||||
|
|
||||||
area_method_radiobutton = RadioButtonGroup(
|
area_method_radiobutton = RadioButtonGroup(
|
||||||
labels=["Fit", "Integral"], active=0, default_size=145
|
labels=["Fit area", "Int area"], active=0, default_size=145, disabled=True
|
||||||
)
|
)
|
||||||
area_method_radiobutton.on_change("active", area_method_radiobutton_callback)
|
|
||||||
|
|
||||||
preview_output_textinput = TextAreaInput(title="Export file preview:", width=450, height=400)
|
bin_size_spinner = Spinner(
|
||||||
|
title="Bin size:", value=1, low=1, step=1, default_size=145, disabled=True
|
||||||
|
)
|
||||||
|
|
||||||
def preview_output_button_callback():
|
lorentz_toggle = Toggle(label="Lorentz Correction", default_size=145)
|
||||||
if det_data["meta"]["indices"] == "hkl":
|
|
||||||
ext = ".comm"
|
|
||||||
elif det_data["meta"]["indices"] == "real":
|
|
||||||
ext = ".incomm"
|
|
||||||
|
|
||||||
|
export_preview_textinput = TextAreaInput(title="Export preview:", width=500, height=400)
|
||||||
|
|
||||||
|
def preview_button_callback():
|
||||||
with tempfile.TemporaryDirectory() as temp_dir:
|
with tempfile.TemporaryDirectory() as temp_dir:
|
||||||
temp_file = temp_dir + "/temp"
|
temp_file = temp_dir + "/temp"
|
||||||
pyzebra.export_comm(det_data, temp_file)
|
export_data = []
|
||||||
|
for s, export in zip(det_data, scan_table_source.data["export"]):
|
||||||
|
if export:
|
||||||
|
export_data.append(s)
|
||||||
|
|
||||||
with open(f"{temp_file}{ext}") as f:
|
pyzebra.export_1D(
|
||||||
preview_output_textinput.value = f.read()
|
export_data,
|
||||||
|
temp_file,
|
||||||
|
area_method=AREA_METHODS[int(area_method_radiobutton.active)],
|
||||||
|
lorentz=lorentz_toggle.active,
|
||||||
|
hkl_precision=int(hkl_precision_select.value),
|
||||||
|
)
|
||||||
|
|
||||||
preview_output_button = Button(label="Preview file", default_size=220)
|
exported_content = ""
|
||||||
preview_output_button.on_click(preview_output_button_callback)
|
file_content = []
|
||||||
|
for ext in (".comm", ".incomm"):
|
||||||
|
fname = temp_file + ext
|
||||||
|
if os.path.isfile(fname):
|
||||||
|
with open(fname) as f:
|
||||||
|
content = f.read()
|
||||||
|
exported_content += f"{ext} file:\n" + content
|
||||||
|
else:
|
||||||
|
content = ""
|
||||||
|
file_content.append(content)
|
||||||
|
|
||||||
def export_results(det_data):
|
js_data.data.update(content=file_content)
|
||||||
if det_data["meta"]["indices"] == "hkl":
|
export_preview_textinput.value = exported_content
|
||||||
ext = ".comm"
|
|
||||||
elif det_data["meta"]["indices"] == "real":
|
|
||||||
ext = ".incomm"
|
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory() as temp_dir:
|
preview_button = Button(label="Preview", default_size=200)
|
||||||
temp_file = temp_dir + "/temp"
|
preview_button.on_click(preview_button_callback)
|
||||||
pyzebra.export_comm(det_data, temp_file)
|
|
||||||
|
|
||||||
with open(f"{temp_file}{ext}") as f:
|
hkl_precision_select = Select(
|
||||||
output_content = f.read()
|
title="hkl precision:", options=["2", "3", "4"], value="2", default_size=80
|
||||||
|
)
|
||||||
|
|
||||||
return output_content, ext
|
save_button = Button(label="Download preview", button_type="success", default_size=200)
|
||||||
|
|
||||||
def save_button_callback():
|
|
||||||
cont, ext = export_results(det_data)
|
|
||||||
js_data.data.update(cont=[cont], ext=[ext])
|
|
||||||
|
|
||||||
save_button = Button(label="Download file", button_type="success", default_size=220)
|
|
||||||
save_button.on_click(save_button_callback)
|
|
||||||
save_button.js_on_click(CustomJS(args={"js_data": js_data}, code=javaScript))
|
save_button.js_on_click(CustomJS(args={"js_data": js_data}, code=javaScript))
|
||||||
|
|
||||||
findpeak_controls = column(
|
|
||||||
row(peak_pos_textinput, column(Spacer(height=19), smooth_toggle)),
|
|
||||||
row(peak_int_ratio_spinner, peak_prominence_spinner),
|
|
||||||
row(window_size_spinner, poly_order_spinner),
|
|
||||||
row(peakfind_button, peakfind_all_button),
|
|
||||||
)
|
|
||||||
|
|
||||||
div_1 = Div(text="Guess:")
|
|
||||||
div_2 = Div(text="Vary:")
|
|
||||||
div_3 = Div(text="Min:")
|
|
||||||
div_4 = Div(text="Max:")
|
|
||||||
div_5 = Div(text="Gauss Centre:", margin=[5, 5, -5, 5])
|
|
||||||
div_6 = Div(text="Gauss Sigma:", margin=[5, 5, -5, 5])
|
|
||||||
div_7 = Div(text="Gauss Ampl.:", margin=[5, 5, -5, 5])
|
|
||||||
div_8 = Div(text="Slope:", margin=[5, 5, -5, 5])
|
|
||||||
div_9 = Div(text="Offset:", margin=[5, 5, -5, 5])
|
|
||||||
fitpeak_controls = row(
|
fitpeak_controls = row(
|
||||||
column(
|
column(fitparams_add_dropdown, fitparams_select, fitparams_remove_button),
|
||||||
Spacer(height=36),
|
fitparams_table,
|
||||||
div_1,
|
|
||||||
Spacer(height=12),
|
|
||||||
div_2,
|
|
||||||
Spacer(height=12),
|
|
||||||
div_3,
|
|
||||||
Spacer(height=12),
|
|
||||||
div_4,
|
|
||||||
),
|
|
||||||
column(div_5, centre_guess, centre_vary, centre_min, centre_max),
|
|
||||||
column(div_6, sigma_guess, sigma_vary, sigma_min, sigma_max),
|
|
||||||
column(div_7, ampl_guess, ampl_vary, ampl_min, ampl_max),
|
|
||||||
column(div_8, slope_guess, slope_vary, slope_min, slope_max),
|
|
||||||
column(div_9, offset_guess, offset_vary, offset_min, offset_max),
|
|
||||||
Spacer(width=20),
|
Spacer(width=20),
|
||||||
column(
|
column(
|
||||||
row(integ_from, integ_to),
|
row(fit_from_spinner, fit_to_spinner),
|
||||||
row(fitparam_reset_button, area_method_radiobutton),
|
row(bin_size_spinner, column(Spacer(height=19), lorentz_toggle)),
|
||||||
|
row(area_method_radiobutton),
|
||||||
row(fit_button, fit_all_button),
|
row(fit_button, fit_all_button),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
export_layout = column(preview_output_textinput, row(preview_output_button, save_button))
|
scan_layout = column(
|
||||||
|
scan_table,
|
||||||
|
monitor_spinner,
|
||||||
|
row(column(Spacer(height=19), merge_button), merge_dest_select, merge_source_select),
|
||||||
|
)
|
||||||
|
|
||||||
|
import_layout = column(
|
||||||
|
proposal_textinput,
|
||||||
|
file_select,
|
||||||
|
row(file_open_button, file_append_button),
|
||||||
|
upload_div,
|
||||||
|
upload_button,
|
||||||
|
append_upload_div,
|
||||||
|
append_upload_button,
|
||||||
|
)
|
||||||
|
|
||||||
|
export_layout = column(
|
||||||
|
export_preview_textinput,
|
||||||
|
row(hkl_precision_select, column(Spacer(height=19), row(preview_button, save_button))),
|
||||||
|
)
|
||||||
|
|
||||||
upload_div = Div(text="Or upload .ccl file:")
|
|
||||||
tab_layout = column(
|
tab_layout = column(
|
||||||
row(proposal_textinput, ccl_file_select),
|
row(import_layout, scan_layout, plot, Spacer(width=30), export_layout),
|
||||||
row(column(Spacer(height=5), upload_div), upload_button),
|
row(fitpeak_controls, fit_output_textinput),
|
||||||
row(scan_table, plot, Spacer(width=30), fit_output_textinput, export_layout),
|
|
||||||
row(findpeak_controls, Spacer(width=30), fitpeak_controls),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return Panel(child=tab_layout, title="ccl integrate")
|
return Panel(child=tab_layout, title="ccl integrate")
|
||||||
|
@ -12,6 +12,7 @@ from bokeh.models import (
|
|||||||
Panel,
|
Panel,
|
||||||
RadioButtonGroup,
|
RadioButtonGroup,
|
||||||
Select,
|
Select,
|
||||||
|
Spacer,
|
||||||
TextAreaInput,
|
TextAreaInput,
|
||||||
TextInput,
|
TextInput,
|
||||||
)
|
)
|
||||||
@ -21,6 +22,7 @@ from pyzebra.anatric import DATA_FACTORY_IMPLEMENTATION, REFLECTION_PRINTER_FORM
|
|||||||
|
|
||||||
|
|
||||||
def create():
|
def create():
|
||||||
|
doc = curdoc()
|
||||||
config = pyzebra.AnatricConfig()
|
config = pyzebra.AnatricConfig()
|
||||||
|
|
||||||
def _load_config_file(file):
|
def _load_config_file(file):
|
||||||
@ -97,12 +99,11 @@ def create():
|
|||||||
minPeakCount_textinput.disabled = disable_adaptivedynamic
|
minPeakCount_textinput.disabled = disable_adaptivedynamic
|
||||||
displacementCurve_textinput.disabled = disable_adaptivedynamic
|
displacementCurve_textinput.disabled = disable_adaptivedynamic
|
||||||
|
|
||||||
upload_div = Div(text="Open XML configuration file:")
|
|
||||||
|
|
||||||
def upload_button_callback(_attr, _old, new):
|
def upload_button_callback(_attr, _old, new):
|
||||||
with io.BytesIO(base64.b64decode(new)) as file:
|
with io.BytesIO(base64.b64decode(new)) as file:
|
||||||
_load_config_file(file)
|
_load_config_file(file)
|
||||||
|
|
||||||
|
upload_div = Div(text="Open XML configuration file:")
|
||||||
upload_button = FileInput(accept=".xml")
|
upload_button = FileInput(accept=".xml")
|
||||||
upload_button.on_change("value", upload_button_callback)
|
upload_button.on_change("value", upload_button_callback)
|
||||||
|
|
||||||
@ -111,7 +112,7 @@ def create():
|
|||||||
def logfile_textinput_callback(_attr, _old, new):
|
def logfile_textinput_callback(_attr, _old, new):
|
||||||
config.logfile = new
|
config.logfile = new
|
||||||
|
|
||||||
logfile_textinput = TextInput(title="Logfile:", value="logfile.log", width=520)
|
logfile_textinput = TextInput(title="Logfile:", value="logfile.log", width=320)
|
||||||
logfile_textinput.on_change("value", logfile_textinput_callback)
|
logfile_textinput.on_change("value", logfile_textinput_callback)
|
||||||
|
|
||||||
def logfile_verbosity_select_callback(_attr, _old, new):
|
def logfile_verbosity_select_callback(_attr, _old, new):
|
||||||
@ -132,7 +133,7 @@ def create():
|
|||||||
def filelist_format_textinput_callback(_attr, _old, new):
|
def filelist_format_textinput_callback(_attr, _old, new):
|
||||||
config.filelist_format = new
|
config.filelist_format = new
|
||||||
|
|
||||||
filelist_format_textinput = TextInput(title="format:", width=490)
|
filelist_format_textinput = TextInput(title="format:", width=290)
|
||||||
filelist_format_textinput.on_change("value", filelist_format_textinput_callback)
|
filelist_format_textinput.on_change("value", filelist_format_textinput_callback)
|
||||||
|
|
||||||
def filelist_datapath_textinput_callback(_attr, _old, new):
|
def filelist_datapath_textinput_callback(_attr, _old, new):
|
||||||
@ -160,7 +161,7 @@ def create():
|
|||||||
def lambda_textinput_callback(_attr, _old, new):
|
def lambda_textinput_callback(_attr, _old, new):
|
||||||
config.crystal_lambda = new
|
config.crystal_lambda = new
|
||||||
|
|
||||||
lambda_textinput = TextInput(title="lambda:", width=140)
|
lambda_textinput = TextInput(title="lambda:", width=145)
|
||||||
lambda_textinput.on_change("value", lambda_textinput_callback)
|
lambda_textinput.on_change("value", lambda_textinput_callback)
|
||||||
|
|
||||||
def ub_textareainput_callback(_attr, _old, new):
|
def ub_textareainput_callback(_attr, _old, new):
|
||||||
@ -172,19 +173,19 @@ def create():
|
|||||||
def zeroOM_textinput_callback(_attr, _old, new):
|
def zeroOM_textinput_callback(_attr, _old, new):
|
||||||
config.crystal_zeroOM = new
|
config.crystal_zeroOM = new
|
||||||
|
|
||||||
zeroOM_textinput = TextInput(title="zeroOM:", width=140)
|
zeroOM_textinput = TextInput(title="zeroOM:", width=145)
|
||||||
zeroOM_textinput.on_change("value", zeroOM_textinput_callback)
|
zeroOM_textinput.on_change("value", zeroOM_textinput_callback)
|
||||||
|
|
||||||
def zeroSTT_textinput_callback(_attr, _old, new):
|
def zeroSTT_textinput_callback(_attr, _old, new):
|
||||||
config.crystal_zeroSTT = new
|
config.crystal_zeroSTT = new
|
||||||
|
|
||||||
zeroSTT_textinput = TextInput(title="zeroSTT:", width=140)
|
zeroSTT_textinput = TextInput(title="zeroSTT:", width=145)
|
||||||
zeroSTT_textinput.on_change("value", zeroSTT_textinput_callback)
|
zeroSTT_textinput.on_change("value", zeroSTT_textinput_callback)
|
||||||
|
|
||||||
def zeroCHI_textinput_callback(_attr, _old, new):
|
def zeroCHI_textinput_callback(_attr, _old, new):
|
||||||
config.crystal_zeroCHI = new
|
config.crystal_zeroCHI = new
|
||||||
|
|
||||||
zeroCHI_textinput = TextInput(title="zeroCHI:", width=140)
|
zeroCHI_textinput = TextInput(title="zeroCHI:", width=145)
|
||||||
zeroCHI_textinput.on_change("value", zeroCHI_textinput_callback)
|
zeroCHI_textinput.on_change("value", zeroCHI_textinput_callback)
|
||||||
|
|
||||||
# ---- DataFactory
|
# ---- DataFactory
|
||||||
@ -192,14 +193,14 @@ def create():
|
|||||||
config.dataFactory_implementation = new
|
config.dataFactory_implementation = new
|
||||||
|
|
||||||
dataFactory_implementation_select = Select(
|
dataFactory_implementation_select = Select(
|
||||||
title="DataFactory implementation:", options=DATA_FACTORY_IMPLEMENTATION, width=300,
|
title="DataFactory implement.:", options=DATA_FACTORY_IMPLEMENTATION, width=145,
|
||||||
)
|
)
|
||||||
dataFactory_implementation_select.on_change("value", dataFactory_implementation_select_callback)
|
dataFactory_implementation_select.on_change("value", dataFactory_implementation_select_callback)
|
||||||
|
|
||||||
def dataFactory_dist1_textinput_callback(_attr, _old, new):
|
def dataFactory_dist1_textinput_callback(_attr, _old, new):
|
||||||
config.dataFactory_dist1 = new
|
config.dataFactory_dist1 = new
|
||||||
|
|
||||||
dataFactory_dist1_textinput = TextInput(title="dist1:", width=290)
|
dataFactory_dist1_textinput = TextInput(title="dist1:", width=145)
|
||||||
dataFactory_dist1_textinput.on_change("value", dataFactory_dist1_textinput_callback)
|
dataFactory_dist1_textinput.on_change("value", dataFactory_dist1_textinput_callback)
|
||||||
|
|
||||||
# ---- BackgroundProcessor
|
# ---- BackgroundProcessor
|
||||||
@ -211,7 +212,7 @@ def create():
|
|||||||
config.reflectionPrinter_format = new
|
config.reflectionPrinter_format = new
|
||||||
|
|
||||||
reflectionPrinter_format_select = Select(
|
reflectionPrinter_format_select = Select(
|
||||||
title="ReflectionPrinter format:", options=REFLECTION_PRINTER_FORMATS, width=300,
|
title="ReflectionPrinter format:", options=REFLECTION_PRINTER_FORMATS, width=145,
|
||||||
)
|
)
|
||||||
reflectionPrinter_format_select.on_change("value", reflectionPrinter_format_select_callback)
|
reflectionPrinter_format_select.on_change("value", reflectionPrinter_format_select_callback)
|
||||||
|
|
||||||
@ -345,7 +346,10 @@ def create():
|
|||||||
with tempfile.TemporaryDirectory() as temp_dir:
|
with tempfile.TemporaryDirectory() as temp_dir:
|
||||||
temp_file = temp_dir + "/temp.xml"
|
temp_file = temp_dir + "/temp.xml"
|
||||||
config.save_as(temp_file)
|
config.save_as(temp_file)
|
||||||
pyzebra.anatric(temp_file)
|
if doc.anatric_path:
|
||||||
|
pyzebra.anatric(temp_file, anatric_path=doc.anatric_path)
|
||||||
|
else:
|
||||||
|
pyzebra.anatric(temp_file)
|
||||||
|
|
||||||
with open(config.logfile) as f_log:
|
with open(config.logfile) as f_log:
|
||||||
output_log.value = f_log.read()
|
output_log.value = f_log.read()
|
||||||
@ -353,57 +357,63 @@ def create():
|
|||||||
process_button = Button(label="Process", button_type="primary")
|
process_button = Button(label="Process", button_type="primary")
|
||||||
process_button.on_click(process_button_callback)
|
process_button.on_click(process_button_callback)
|
||||||
|
|
||||||
output_log = TextAreaInput(title="Logfile output:", height=700, disabled=True)
|
output_log = TextAreaInput(title="Logfile output:", height=600, disabled=True)
|
||||||
output_config = TextAreaInput(title="Current config:", height=700, width=400, disabled=True)
|
output_config = TextAreaInput(title="Current config:", height=600, width=400, disabled=True)
|
||||||
|
|
||||||
tab_layout = row(
|
general_params_layout = column(
|
||||||
column(
|
row(logfile_textinput, logfile_verbosity_select),
|
||||||
upload_div,
|
row(filelist_type, filelist_format_textinput),
|
||||||
upload_button,
|
filelist_datapath_textinput,
|
||||||
row(logfile_textinput, logfile_verbosity_select),
|
filelist_ranges_textareainput,
|
||||||
row(filelist_type, filelist_format_textinput),
|
crystal_sample_textinput,
|
||||||
filelist_datapath_textinput,
|
row(lambda_textinput, zeroOM_textinput),
|
||||||
filelist_ranges_textareainput,
|
row(zeroSTT_textinput, zeroCHI_textinput),
|
||||||
crystal_sample_textinput,
|
ub_textareainput,
|
||||||
row(lambda_textinput, zeroOM_textinput, zeroSTT_textinput, zeroCHI_textinput),
|
row(dataFactory_implementation_select, dataFactory_dist1_textinput),
|
||||||
ub_textareainput,
|
reflectionPrinter_format_select,
|
||||||
row(dataFactory_implementation_select, dataFactory_dist1_textinput),
|
)
|
||||||
reflectionPrinter_format_select,
|
|
||||||
process_button,
|
algorithm_params_layout = column(
|
||||||
),
|
mode_radio_button_group,
|
||||||
column(
|
row(
|
||||||
mode_radio_button_group,
|
column(
|
||||||
row(
|
threshold_textinput,
|
||||||
column(
|
shell_textinput,
|
||||||
threshold_textinput,
|
steepness_textinput,
|
||||||
shell_textinput,
|
duplicateDistance_textinput,
|
||||||
steepness_textinput,
|
maxequal_textinput,
|
||||||
duplicateDistance_textinput,
|
aps_window_textinput,
|
||||||
maxequal_textinput,
|
),
|
||||||
aps_window_textinput,
|
column(
|
||||||
),
|
adm_window_textinput,
|
||||||
column(
|
border_textinput,
|
||||||
adm_window_textinput,
|
minWindow_textinput,
|
||||||
border_textinput,
|
reflectionFile_textinput,
|
||||||
minWindow_textinput,
|
targetMonitor_textinput,
|
||||||
reflectionFile_textinput,
|
smoothSize_textinput,
|
||||||
targetMonitor_textinput,
|
loop_textinput,
|
||||||
smoothSize_textinput,
|
minPeakCount_textinput,
|
||||||
loop_textinput,
|
displacementCurve_textinput,
|
||||||
minPeakCount_textinput,
|
|
||||||
displacementCurve_textinput,
|
|
||||||
),
|
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
output_config,
|
)
|
||||||
output_log,
|
|
||||||
|
tab_layout = column(
|
||||||
|
row(column(Spacer(height=2), upload_div), upload_button),
|
||||||
|
row(
|
||||||
|
general_params_layout,
|
||||||
|
algorithm_params_layout,
|
||||||
|
column(row(output_config, output_log), row(process_button)),
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
async def update_config():
|
async def update_config():
|
||||||
config.save_as("debug.xml")
|
with tempfile.TemporaryDirectory() as temp_dir:
|
||||||
with open("debug.xml") as f_config:
|
temp_file = temp_dir + "/debug.xml"
|
||||||
output_config.value = f_config.read()
|
config.save_as(temp_file)
|
||||||
|
with open(temp_file) as f_config:
|
||||||
|
output_config.value = f_config.read()
|
||||||
|
|
||||||
curdoc().add_periodic_callback(update_config, 1000)
|
doc.add_periodic_callback(update_config, 1000)
|
||||||
|
|
||||||
return Panel(child=tab_layout, title="hdf anatric")
|
return Panel(child=tab_layout, title="hdf anatric")
|
||||||
|
@ -30,6 +30,7 @@ from bokeh.models import (
|
|||||||
Spacer,
|
Spacer,
|
||||||
Spinner,
|
Spinner,
|
||||||
TextAreaInput,
|
TextAreaInput,
|
||||||
|
TextInput,
|
||||||
Title,
|
Title,
|
||||||
Toggle,
|
Toggle,
|
||||||
WheelZoomTool,
|
WheelZoomTool,
|
||||||
@ -40,12 +41,28 @@ import pyzebra
|
|||||||
|
|
||||||
IMAGE_W = 256
|
IMAGE_W = 256
|
||||||
IMAGE_H = 128
|
IMAGE_H = 128
|
||||||
|
IMAGE_PLOT_W = int(IMAGE_W * 2.5)
|
||||||
|
IMAGE_PLOT_H = int(IMAGE_H * 2.5)
|
||||||
|
|
||||||
|
PROPOSAL_PATH = "/afs/psi.ch/project/sinqdata/2020/zebra/"
|
||||||
|
|
||||||
|
|
||||||
def create():
|
def create():
|
||||||
det_data = {}
|
det_data = {}
|
||||||
roi_selection = {}
|
roi_selection = {}
|
||||||
|
|
||||||
|
def proposal_textinput_callback(_attr, _old, new):
|
||||||
|
full_proposal_path = os.path.join(PROPOSAL_PATH, new.strip())
|
||||||
|
file_list = []
|
||||||
|
for file in os.listdir(full_proposal_path):
|
||||||
|
if file.endswith(".hdf"):
|
||||||
|
file_list.append((os.path.join(full_proposal_path, file), file))
|
||||||
|
filelist.options = file_list
|
||||||
|
filelist.value = file_list[0][0]
|
||||||
|
|
||||||
|
proposal_textinput = TextInput(title="Enter proposal number:", default_size=145)
|
||||||
|
proposal_textinput.on_change("value", proposal_textinput_callback)
|
||||||
|
|
||||||
def upload_button_callback(_attr, _old, new):
|
def upload_button_callback(_attr, _old, new):
|
||||||
with io.StringIO(base64.b64decode(new).decode()) as file:
|
with io.StringIO(base64.b64decode(new).decode()) as file:
|
||||||
h5meta_list = pyzebra.parse_h5meta(file)
|
h5meta_list = pyzebra.parse_h5meta(file)
|
||||||
@ -53,6 +70,7 @@ def create():
|
|||||||
filelist.options = [(entry, os.path.basename(entry)) for entry in file_list]
|
filelist.options = [(entry, os.path.basename(entry)) for entry in file_list]
|
||||||
filelist.value = file_list[0]
|
filelist.value = file_list[0]
|
||||||
|
|
||||||
|
upload_div = Div(text="or upload .cami file:", margin=(5, 5, 0, 5))
|
||||||
upload_button = FileInput(accept=".cami")
|
upload_button = FileInput(accept=".cami")
|
||||||
upload_button.on_change("value", upload_button_callback)
|
upload_button.on_change("value", upload_button_callback)
|
||||||
|
|
||||||
@ -74,8 +92,8 @@ def create():
|
|||||||
image_source.data.update(image=[current_image])
|
image_source.data.update(image=[current_image])
|
||||||
|
|
||||||
if auto_toggle.active:
|
if auto_toggle.active:
|
||||||
im_max = int(np.max(current_image))
|
im_min = np.min(current_image)
|
||||||
im_min = int(np.min(current_image))
|
im_max = np.max(current_image)
|
||||||
|
|
||||||
display_min_spinner.value = im_min
|
display_min_spinner.value = im_min
|
||||||
display_max_spinner.value = im_max
|
display_max_spinner.value = im_max
|
||||||
@ -83,11 +101,18 @@ def create():
|
|||||||
image_glyph.color_mapper.low = im_min
|
image_glyph.color_mapper.low = im_min
|
||||||
image_glyph.color_mapper.high = im_max
|
image_glyph.color_mapper.high = im_max
|
||||||
|
|
||||||
magnetic_field_spinner.value = det_data["magnetic_field"][index]
|
if "mf" in det_data:
|
||||||
temperature_spinner.value = det_data["temperature"][index]
|
mf_spinner.value = det_data["mf"][index]
|
||||||
|
else:
|
||||||
|
mf_spinner.value = None
|
||||||
|
|
||||||
|
if "temp" in det_data:
|
||||||
|
temp_spinner.value = det_data["temp"][index]
|
||||||
|
else:
|
||||||
|
temp_spinner.value = None
|
||||||
|
|
||||||
gamma, nu = calculate_pol(det_data, index)
|
gamma, nu = calculate_pol(det_data, index)
|
||||||
omega = np.ones((IMAGE_H, IMAGE_W)) * det_data["rot_angle"][index]
|
omega = np.ones((IMAGE_H, IMAGE_W)) * det_data["omega"][index]
|
||||||
image_source.data.update(gamma=[gamma], nu=[nu], omega=[omega])
|
image_source.data.update(gamma=[gamma], nu=[nu], omega=[omega])
|
||||||
|
|
||||||
def update_overview_plot():
|
def update_overview_plot():
|
||||||
@ -99,6 +124,18 @@ def create():
|
|||||||
overview_plot_x_image_source.data.update(image=[overview_x], dw=[n_x])
|
overview_plot_x_image_source.data.update(image=[overview_x], dw=[n_x])
|
||||||
overview_plot_y_image_source.data.update(image=[overview_y], dw=[n_y])
|
overview_plot_y_image_source.data.update(image=[overview_y], dw=[n_y])
|
||||||
|
|
||||||
|
if proj_auto_toggle.active:
|
||||||
|
im_min = min(np.min(overview_x), np.min(overview_y))
|
||||||
|
im_max = max(np.max(overview_x), np.max(overview_y))
|
||||||
|
|
||||||
|
proj_display_min_spinner.value = im_min
|
||||||
|
proj_display_max_spinner.value = im_max
|
||||||
|
|
||||||
|
overview_plot_x_image_glyph.color_mapper.low = im_min
|
||||||
|
overview_plot_y_image_glyph.color_mapper.low = im_min
|
||||||
|
overview_plot_x_image_glyph.color_mapper.high = im_max
|
||||||
|
overview_plot_y_image_glyph.color_mapper.high = im_max
|
||||||
|
|
||||||
if frame_button_group.active == 0: # Frame
|
if frame_button_group.active == 0: # Frame
|
||||||
overview_plot_x.axis[1].axis_label = "Frame"
|
overview_plot_x.axis[1].axis_label = "Frame"
|
||||||
overview_plot_y.axis[1].axis_label = "Frame"
|
overview_plot_y.axis[1].axis_label = "Frame"
|
||||||
@ -106,15 +143,16 @@ def create():
|
|||||||
overview_plot_x_image_source.data.update(y=[0], dh=[n_im])
|
overview_plot_x_image_source.data.update(y=[0], dh=[n_im])
|
||||||
overview_plot_y_image_source.data.update(y=[0], dh=[n_im])
|
overview_plot_y_image_source.data.update(y=[0], dh=[n_im])
|
||||||
|
|
||||||
elif frame_button_group.active == 1: # Omega
|
elif frame_button_group.active == 1: # Variable angle
|
||||||
overview_plot_x.axis[1].axis_label = "Omega"
|
scan_motor = det_data["scan_motor"]
|
||||||
overview_plot_y.axis[1].axis_label = "Omega"
|
overview_plot_x.axis[1].axis_label = scan_motor
|
||||||
|
overview_plot_y.axis[1].axis_label = scan_motor
|
||||||
|
|
||||||
om = det_data["rot_angle"]
|
var = det_data[scan_motor]
|
||||||
om_start = om[0]
|
var_start = var[0]
|
||||||
om_end = (om[-1] - om[0]) * n_im / (n_im - 1)
|
var_end = (var[-1] - var[0]) * n_im / (n_im - 1)
|
||||||
overview_plot_x_image_source.data.update(y=[om_start], dh=[om_end])
|
overview_plot_x_image_source.data.update(y=[var_start], dh=[var_end])
|
||||||
overview_plot_y_image_source.data.update(y=[om_start], dh=[om_end])
|
overview_plot_y_image_source.data.update(y=[var_start], dh=[var_end])
|
||||||
|
|
||||||
def filelist_callback(_attr, _old, new):
|
def filelist_callback(_attr, _old, new):
|
||||||
nonlocal det_data
|
nonlocal det_data
|
||||||
@ -122,10 +160,17 @@ def create():
|
|||||||
|
|
||||||
index_spinner.value = 0
|
index_spinner.value = 0
|
||||||
index_spinner.high = det_data["data"].shape[0] - 1
|
index_spinner.high = det_data["data"].shape[0] - 1
|
||||||
|
|
||||||
|
zebra_mode = det_data["zebra_mode"]
|
||||||
|
if zebra_mode == "nb":
|
||||||
|
geometry_textinput.value = "normal beam"
|
||||||
|
else: # zebra_mode == "bi"
|
||||||
|
geometry_textinput.value = "bisecting"
|
||||||
|
|
||||||
update_image(0)
|
update_image(0)
|
||||||
update_overview_plot()
|
update_overview_plot()
|
||||||
|
|
||||||
filelist = Select()
|
filelist = Select(title="Available .hdf files:")
|
||||||
filelist.on_change("value", filelist_callback)
|
filelist.on_change("value", filelist_callback)
|
||||||
|
|
||||||
def index_spinner_callback(_attr, _old, new):
|
def index_spinner_callback(_attr, _old, new):
|
||||||
@ -137,8 +182,8 @@ def create():
|
|||||||
plot = Plot(
|
plot = Plot(
|
||||||
x_range=Range1d(0, IMAGE_W, bounds=(0, IMAGE_W)),
|
x_range=Range1d(0, IMAGE_W, bounds=(0, IMAGE_W)),
|
||||||
y_range=Range1d(0, IMAGE_H, bounds=(0, IMAGE_H)),
|
y_range=Range1d(0, IMAGE_H, bounds=(0, IMAGE_H)),
|
||||||
plot_height=IMAGE_H * 3,
|
plot_height=IMAGE_PLOT_H,
|
||||||
plot_width=IMAGE_W * 3,
|
plot_width=IMAGE_PLOT_W,
|
||||||
toolbar_location="left",
|
toolbar_location="left",
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -191,8 +236,8 @@ def create():
|
|||||||
proj_v = Plot(
|
proj_v = Plot(
|
||||||
x_range=plot.x_range,
|
x_range=plot.x_range,
|
||||||
y_range=DataRange1d(),
|
y_range=DataRange1d(),
|
||||||
plot_height=200,
|
plot_height=150,
|
||||||
plot_width=IMAGE_W * 3,
|
plot_width=IMAGE_PLOT_W,
|
||||||
toolbar_location=None,
|
toolbar_location=None,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -208,8 +253,8 @@ def create():
|
|||||||
proj_h = Plot(
|
proj_h = Plot(
|
||||||
x_range=DataRange1d(),
|
x_range=DataRange1d(),
|
||||||
y_range=plot.y_range,
|
y_range=plot.y_range,
|
||||||
plot_height=IMAGE_H * 3,
|
plot_height=IMAGE_PLOT_H,
|
||||||
plot_width=200,
|
plot_width=150,
|
||||||
toolbar_location=None,
|
toolbar_location=None,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -272,8 +317,8 @@ def create():
|
|||||||
title=Title(text="Projections on X-axis"),
|
title=Title(text="Projections on X-axis"),
|
||||||
x_range=det_x_range,
|
x_range=det_x_range,
|
||||||
y_range=frame_range,
|
y_range=frame_range,
|
||||||
plot_height=500,
|
plot_height=400,
|
||||||
plot_width=IMAGE_W * 3,
|
plot_width=IMAGE_PLOT_W,
|
||||||
)
|
)
|
||||||
|
|
||||||
# ---- tools
|
# ---- tools
|
||||||
@ -309,8 +354,8 @@ def create():
|
|||||||
title=Title(text="Projections on Y-axis"),
|
title=Title(text="Projections on Y-axis"),
|
||||||
x_range=det_y_range,
|
x_range=det_y_range,
|
||||||
y_range=frame_range,
|
y_range=frame_range,
|
||||||
plot_height=500,
|
plot_height=400,
|
||||||
plot_width=IMAGE_H * 3,
|
plot_width=IMAGE_PLOT_H,
|
||||||
)
|
)
|
||||||
|
|
||||||
# ---- tools
|
# ---- tools
|
||||||
@ -344,14 +389,14 @@ def create():
|
|||||||
def frame_button_group_callback(_active):
|
def frame_button_group_callback(_active):
|
||||||
update_overview_plot()
|
update_overview_plot()
|
||||||
|
|
||||||
frame_button_group = RadioButtonGroup(labels=["Frames", "Omega"], active=0)
|
frame_button_group = RadioButtonGroup(labels=["Frames", "Variable Angle"], active=0)
|
||||||
frame_button_group.on_click(frame_button_group_callback)
|
frame_button_group.on_click(frame_button_group_callback)
|
||||||
|
|
||||||
roi_avg_plot = Plot(
|
roi_avg_plot = Plot(
|
||||||
x_range=DataRange1d(),
|
x_range=DataRange1d(),
|
||||||
y_range=DataRange1d(),
|
y_range=DataRange1d(),
|
||||||
plot_height=200,
|
plot_height=200,
|
||||||
plot_width=IMAGE_W * 3,
|
plot_width=IMAGE_PLOT_W,
|
||||||
toolbar_location="left",
|
toolbar_location="left",
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -385,10 +430,7 @@ def create():
|
|||||||
colormap.on_change("value", colormap_callback)
|
colormap.on_change("value", colormap_callback)
|
||||||
colormap.value = "plasma"
|
colormap.value = "plasma"
|
||||||
|
|
||||||
radio_button_group = RadioButtonGroup(labels=["nb", "nb_bi"], active=0)
|
|
||||||
|
|
||||||
STEP = 1
|
STEP = 1
|
||||||
|
|
||||||
# ---- colormap auto toggle button
|
# ---- colormap auto toggle button
|
||||||
def auto_toggle_callback(state):
|
def auto_toggle_callback(state):
|
||||||
if state:
|
if state:
|
||||||
@ -400,7 +442,9 @@ def create():
|
|||||||
|
|
||||||
update_image()
|
update_image()
|
||||||
|
|
||||||
auto_toggle = Toggle(label="Auto Range", active=True, button_type="default", default_size=145)
|
auto_toggle = Toggle(
|
||||||
|
label="Main Auto Range", active=True, button_type="default", default_size=125
|
||||||
|
)
|
||||||
auto_toggle.on_click(auto_toggle_callback)
|
auto_toggle.on_click(auto_toggle_callback)
|
||||||
|
|
||||||
# ---- colormap display max value
|
# ---- colormap display max value
|
||||||
@ -409,12 +453,12 @@ def create():
|
|||||||
image_glyph.color_mapper.high = new_value
|
image_glyph.color_mapper.high = new_value
|
||||||
|
|
||||||
display_max_spinner = Spinner(
|
display_max_spinner = Spinner(
|
||||||
title="Maximal Display Value:",
|
title="Max Value:",
|
||||||
low=0 + STEP,
|
low=0 + STEP,
|
||||||
value=1,
|
value=1,
|
||||||
step=STEP,
|
step=STEP,
|
||||||
disabled=auto_toggle.active,
|
disabled=auto_toggle.active,
|
||||||
default_size=145,
|
default_size=80,
|
||||||
)
|
)
|
||||||
display_max_spinner.on_change("value", display_max_spinner_callback)
|
display_max_spinner.on_change("value", display_max_spinner_callback)
|
||||||
|
|
||||||
@ -424,19 +468,69 @@ def create():
|
|||||||
image_glyph.color_mapper.low = new_value
|
image_glyph.color_mapper.low = new_value
|
||||||
|
|
||||||
display_min_spinner = Spinner(
|
display_min_spinner = Spinner(
|
||||||
title="Minimal Display Value:",
|
title="Min Value:",
|
||||||
|
low=0,
|
||||||
high=1 - STEP,
|
high=1 - STEP,
|
||||||
value=0,
|
value=0,
|
||||||
step=STEP,
|
step=STEP,
|
||||||
disabled=auto_toggle.active,
|
disabled=auto_toggle.active,
|
||||||
default_size=145,
|
default_size=80,
|
||||||
)
|
)
|
||||||
display_min_spinner.on_change("value", display_min_spinner_callback)
|
display_min_spinner.on_change("value", display_min_spinner_callback)
|
||||||
|
|
||||||
|
PROJ_STEP = 0.1
|
||||||
|
# ---- proj colormap auto toggle button
|
||||||
|
def proj_auto_toggle_callback(state):
|
||||||
|
if state:
|
||||||
|
proj_display_min_spinner.disabled = True
|
||||||
|
proj_display_max_spinner.disabled = True
|
||||||
|
else:
|
||||||
|
proj_display_min_spinner.disabled = False
|
||||||
|
proj_display_max_spinner.disabled = False
|
||||||
|
|
||||||
|
update_overview_plot()
|
||||||
|
|
||||||
|
proj_auto_toggle = Toggle(
|
||||||
|
label="Proj Auto Range", active=True, button_type="default", default_size=125
|
||||||
|
)
|
||||||
|
proj_auto_toggle.on_click(proj_auto_toggle_callback)
|
||||||
|
|
||||||
|
# ---- proj colormap display max value
|
||||||
|
def proj_display_max_spinner_callback(_attr, _old_value, new_value):
|
||||||
|
proj_display_min_spinner.high = new_value - PROJ_STEP
|
||||||
|
overview_plot_x_image_glyph.color_mapper.high = new_value
|
||||||
|
overview_plot_y_image_glyph.color_mapper.high = new_value
|
||||||
|
|
||||||
|
proj_display_max_spinner = Spinner(
|
||||||
|
title="Max Value:",
|
||||||
|
low=0 + PROJ_STEP,
|
||||||
|
value=1,
|
||||||
|
step=PROJ_STEP,
|
||||||
|
disabled=proj_auto_toggle.active,
|
||||||
|
default_size=80,
|
||||||
|
)
|
||||||
|
proj_display_max_spinner.on_change("value", proj_display_max_spinner_callback)
|
||||||
|
|
||||||
|
# ---- proj colormap display min value
|
||||||
|
def proj_display_min_spinner_callback(_attr, _old_value, new_value):
|
||||||
|
proj_display_max_spinner.low = new_value + PROJ_STEP
|
||||||
|
overview_plot_x_image_glyph.color_mapper.low = new_value
|
||||||
|
overview_plot_y_image_glyph.color_mapper.low = new_value
|
||||||
|
|
||||||
|
proj_display_min_spinner = Spinner(
|
||||||
|
title="Min Value:",
|
||||||
|
low=0,
|
||||||
|
high=1 - PROJ_STEP,
|
||||||
|
value=0,
|
||||||
|
step=PROJ_STEP,
|
||||||
|
disabled=proj_auto_toggle.active,
|
||||||
|
default_size=80,
|
||||||
|
)
|
||||||
|
proj_display_min_spinner.on_change("value", proj_display_min_spinner_callback)
|
||||||
|
|
||||||
def hkl_button_callback():
|
def hkl_button_callback():
|
||||||
index = index_spinner.value
|
index = index_spinner.value
|
||||||
setup_type = "nb_bi" if radio_button_group.active else "nb"
|
h, k, l = calculate_hkl(det_data, index)
|
||||||
h, k, l = calculate_hkl(det_data, index, setup_type)
|
|
||||||
image_source.data.update(h=[h], k=[k], l=[l])
|
image_source.data.update(h=[h], k=[k], l=[l])
|
||||||
|
|
||||||
hkl_button = Button(label="Calculate hkl (slow)")
|
hkl_button = Button(label="Calculate hkl (slow)")
|
||||||
@ -466,19 +560,25 @@ def create():
|
|||||||
selection_button = Button(label="Add selection")
|
selection_button = Button(label="Add selection")
|
||||||
selection_button.on_click(selection_button_callback)
|
selection_button.on_click(selection_button_callback)
|
||||||
|
|
||||||
magnetic_field_spinner = Spinner(
|
mf_spinner = Spinner(
|
||||||
title="Magnetic field:", format="0.00", width=145, disabled=True
|
title="Magnetic field:", format="0.00", width=145, disabled=True
|
||||||
)
|
)
|
||||||
temperature_spinner = Spinner(title="Temperature:", format="0.00", width=145, disabled=True)
|
temp_spinner = Spinner(title="Temperature:", format="0.00", width=145, disabled=True)
|
||||||
|
geometry_textinput = TextInput(title="Geometry:", disabled=True)
|
||||||
|
|
||||||
# Final layout
|
# Final layout
|
||||||
layout_image = column(gridplot([[proj_v, None], [plot, proj_h]], merge_tools=False))
|
layout_image = column(gridplot([[proj_v, None], [plot, proj_h]], merge_tools=False))
|
||||||
colormap_layout = column(
|
colormap_layout = column(
|
||||||
row(colormap, column(Spacer(height=19), auto_toggle)),
|
row(colormap),
|
||||||
row(display_max_spinner, display_min_spinner),
|
row(column(Spacer(height=19), auto_toggle), display_max_spinner, display_min_spinner),
|
||||||
|
row(
|
||||||
|
column(Spacer(height=19), proj_auto_toggle),
|
||||||
|
proj_display_max_spinner,
|
||||||
|
proj_display_min_spinner,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
hkl_layout = column(radio_button_group, hkl_button)
|
hkl_layout = column(geometry_textinput, hkl_button)
|
||||||
params_layout = row(magnetic_field_spinner, temperature_spinner)
|
params_layout = row(mf_spinner, temp_spinner)
|
||||||
|
|
||||||
layout_controls = row(
|
layout_controls = row(
|
||||||
column(selection_button, selection_list),
|
column(selection_button, selection_list),
|
||||||
@ -497,10 +597,11 @@ def create():
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
upload_div = Div(text="Upload .cami file:")
|
|
||||||
tab_layout = row(
|
tab_layout = row(
|
||||||
column(
|
column(
|
||||||
row(column(Spacer(height=5), upload_div), upload_button, filelist),
|
row(
|
||||||
|
proposal_textinput, filelist, Spacer(width=100), column(upload_div, upload_button),
|
||||||
|
),
|
||||||
layout_overview,
|
layout_overview,
|
||||||
layout_controls,
|
layout_controls,
|
||||||
),
|
),
|
||||||
@ -510,31 +611,32 @@ def create():
|
|||||||
return Panel(child=tab_layout, title="hdf viewer")
|
return Panel(child=tab_layout, title="hdf viewer")
|
||||||
|
|
||||||
|
|
||||||
def calculate_hkl(det_data, index, setup_type="nb_bi"):
|
def calculate_hkl(det_data, index):
|
||||||
h = np.empty(shape=(IMAGE_H, IMAGE_W))
|
h = np.empty(shape=(IMAGE_H, IMAGE_W))
|
||||||
k = np.empty(shape=(IMAGE_H, IMAGE_W))
|
k = np.empty(shape=(IMAGE_H, IMAGE_W))
|
||||||
l = np.empty(shape=(IMAGE_H, IMAGE_W))
|
l = np.empty(shape=(IMAGE_H, IMAGE_W))
|
||||||
|
|
||||||
wave = det_data["wave"]
|
wave = det_data["wave"]
|
||||||
ddist = det_data["ddist"]
|
ddist = det_data["ddist"]
|
||||||
gammad = det_data["pol_angle"][index]
|
gammad = det_data["gamma"][index]
|
||||||
om = det_data["rot_angle"][index]
|
om = det_data["omega"][index]
|
||||||
nud = det_data["tlt_angle"]
|
nud = det_data["nu"]
|
||||||
ub = det_data["UB"]
|
ub = det_data["ub"]
|
||||||
|
geometry = det_data["zebra_mode"]
|
||||||
|
|
||||||
if setup_type == "nb_bi":
|
if geometry == "bi":
|
||||||
ch = det_data["chi_angle"][index]
|
chi = det_data["chi"][index]
|
||||||
ph = det_data["phi_angle"][index]
|
phi = det_data["phi"][index]
|
||||||
elif setup_type == "nb":
|
elif geometry == "nb":
|
||||||
ch = 0
|
chi = 0
|
||||||
ph = 0
|
phi = 0
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Unknown setup type '{setup_type}'")
|
raise ValueError(f"Unknown geometry type '{geometry}'")
|
||||||
|
|
||||||
for xi in np.arange(IMAGE_W):
|
for xi in np.arange(IMAGE_W):
|
||||||
for yi in np.arange(IMAGE_H):
|
for yi in np.arange(IMAGE_H):
|
||||||
h[yi, xi], k[yi, xi], l[yi, xi] = pyzebra.ang2hkl(
|
h[yi, xi], k[yi, xi], l[yi, xi] = pyzebra.ang2hkl(
|
||||||
wave, ddist, gammad, om, ch, ph, nud, ub, xi, yi
|
wave, ddist, gammad, om, chi, phi, nud, ub, xi, yi
|
||||||
)
|
)
|
||||||
|
|
||||||
return h, k, l
|
return h, k, l
|
||||||
@ -545,8 +647,8 @@ def calculate_pol(det_data, index):
|
|||||||
nu = np.empty(shape=(IMAGE_H, IMAGE_W))
|
nu = np.empty(shape=(IMAGE_H, IMAGE_W))
|
||||||
|
|
||||||
ddist = det_data["ddist"]
|
ddist = det_data["ddist"]
|
||||||
gammad = det_data["pol_angle"][index]
|
gammad = det_data["gamma"][index]
|
||||||
nud = det_data["tlt_angle"]
|
nud = det_data["nu"]
|
||||||
|
|
||||||
for xi in np.arange(IMAGE_W):
|
for xi in np.arange(IMAGE_W):
|
||||||
for yi in np.arange(IMAGE_H):
|
for yi in np.arange(IMAGE_H):
|
||||||
|
649
pyzebra/app/panel_param_study.py
Normal file
649
pyzebra/app/panel_param_study.py
Normal file
@ -0,0 +1,649 @@
|
|||||||
|
import base64
|
||||||
|
import io
|
||||||
|
import itertools
|
||||||
|
import os
|
||||||
|
import tempfile
|
||||||
|
import types
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from bokeh.layouts import column, row
|
||||||
|
from bokeh.models import (
|
||||||
|
BasicTicker,
|
||||||
|
Button,
|
||||||
|
CheckboxEditor,
|
||||||
|
ColumnDataSource,
|
||||||
|
CustomJS,
|
||||||
|
DataRange1d,
|
||||||
|
DataTable,
|
||||||
|
Div,
|
||||||
|
Dropdown,
|
||||||
|
FileInput,
|
||||||
|
Grid,
|
||||||
|
HoverTool,
|
||||||
|
Legend,
|
||||||
|
Line,
|
||||||
|
LinearAxis,
|
||||||
|
MultiLine,
|
||||||
|
MultiSelect,
|
||||||
|
NumberEditor,
|
||||||
|
Panel,
|
||||||
|
PanTool,
|
||||||
|
Plot,
|
||||||
|
RadioButtonGroup,
|
||||||
|
ResetTool,
|
||||||
|
Scatter,
|
||||||
|
Select,
|
||||||
|
Spacer,
|
||||||
|
Span,
|
||||||
|
Spinner,
|
||||||
|
TableColumn,
|
||||||
|
Tabs,
|
||||||
|
TextAreaInput,
|
||||||
|
TextInput,
|
||||||
|
Toggle,
|
||||||
|
WheelZoomTool,
|
||||||
|
Whisker,
|
||||||
|
)
|
||||||
|
from bokeh.palettes import Category10, Turbo256
|
||||||
|
from bokeh.transform import linear_cmap
|
||||||
|
|
||||||
|
import pyzebra
|
||||||
|
from pyzebra.ccl_io import AREA_METHODS
|
||||||
|
|
||||||
|
javaScript = """
|
||||||
|
for (let i = 0; i < js_data.data['fname'].length; i++) {
|
||||||
|
if (js_data.data['content'][i] === "") continue;
|
||||||
|
|
||||||
|
const blob = new Blob([js_data.data['content'][i]], {type: 'text/plain'})
|
||||||
|
const link = document.createElement('a');
|
||||||
|
document.body.appendChild(link);
|
||||||
|
const url = window.URL.createObjectURL(blob);
|
||||||
|
link.href = url;
|
||||||
|
link.download = js_data.data['fname'][i];
|
||||||
|
link.click();
|
||||||
|
window.URL.revokeObjectURL(url);
|
||||||
|
document.body.removeChild(link);
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
PROPOSAL_PATH = "/afs/psi.ch/project/sinqdata/2020/zebra/"
|
||||||
|
|
||||||
|
|
||||||
|
def color_palette(n_colors):
|
||||||
|
palette = itertools.cycle(Category10[10])
|
||||||
|
return list(itertools.islice(palette, n_colors))
|
||||||
|
|
||||||
|
|
||||||
|
def create():
|
||||||
|
det_data = []
|
||||||
|
fit_params = {}
|
||||||
|
js_data = ColumnDataSource(data=dict(content=["", ""], fname=["", ""]))
|
||||||
|
|
||||||
|
def proposal_textinput_callback(_attr, _old, new):
|
||||||
|
full_proposal_path = os.path.join(PROPOSAL_PATH, new.strip())
|
||||||
|
dat_file_list = []
|
||||||
|
for file in os.listdir(full_proposal_path):
|
||||||
|
if file.endswith(".dat"):
|
||||||
|
dat_file_list.append((os.path.join(full_proposal_path, file), file))
|
||||||
|
file_select.options = dat_file_list
|
||||||
|
|
||||||
|
proposal_textinput = TextInput(title="Proposal number:", default_size=200)
|
||||||
|
proposal_textinput.on_change("value", proposal_textinput_callback)
|
||||||
|
|
||||||
|
def _init_datatable():
|
||||||
|
scan_list = [s["idx"] for s in det_data]
|
||||||
|
file_list = []
|
||||||
|
for scan in det_data:
|
||||||
|
file_list.append(os.path.basename(scan["original_filename"]))
|
||||||
|
|
||||||
|
scan_table_source.data.update(
|
||||||
|
file=file_list,
|
||||||
|
scan=scan_list,
|
||||||
|
param=[None] * len(scan_list),
|
||||||
|
fit=[0] * len(scan_list),
|
||||||
|
export=[True] * len(scan_list),
|
||||||
|
)
|
||||||
|
scan_table_source.selected.indices = []
|
||||||
|
scan_table_source.selected.indices = [0]
|
||||||
|
|
||||||
|
param_select.value = "user defined"
|
||||||
|
|
||||||
|
def file_select_callback(_attr, _old, _new):
|
||||||
|
pass
|
||||||
|
|
||||||
|
file_select = MultiSelect(title="Available .dat files:", default_size=200, height=250)
|
||||||
|
file_select.on_change("value", file_select_callback)
|
||||||
|
|
||||||
|
def file_open_button_callback():
|
||||||
|
nonlocal det_data
|
||||||
|
det_data = []
|
||||||
|
for f_name in file_select.value:
|
||||||
|
with open(f_name) as file:
|
||||||
|
base, ext = os.path.splitext(f_name)
|
||||||
|
if det_data:
|
||||||
|
append_data = pyzebra.parse_1D(file, ext)
|
||||||
|
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
|
||||||
|
det_data.extend(append_data)
|
||||||
|
else:
|
||||||
|
det_data = pyzebra.parse_1D(file, ext)
|
||||||
|
pyzebra.normalize_dataset(det_data, monitor_spinner.value)
|
||||||
|
js_data.data.update(fname=[base + ".comm", base + ".incomm"])
|
||||||
|
|
||||||
|
_init_datatable()
|
||||||
|
|
||||||
|
file_open_button = Button(label="Open New", default_size=100)
|
||||||
|
file_open_button.on_click(file_open_button_callback)
|
||||||
|
|
||||||
|
def file_append_button_callback():
|
||||||
|
for f_name in file_select.value:
|
||||||
|
with open(f_name) as file:
|
||||||
|
_, ext = os.path.splitext(f_name)
|
||||||
|
append_data = pyzebra.parse_1D(file, ext)
|
||||||
|
|
||||||
|
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
|
||||||
|
det_data.extend(append_data)
|
||||||
|
|
||||||
|
_init_datatable()
|
||||||
|
|
||||||
|
file_append_button = Button(label="Append", default_size=100)
|
||||||
|
file_append_button.on_click(file_append_button_callback)
|
||||||
|
|
||||||
|
def upload_button_callback(_attr, _old, new):
|
||||||
|
nonlocal det_data
|
||||||
|
det_data = []
|
||||||
|
for f_str, f_name in zip(new, upload_button.filename):
|
||||||
|
with io.StringIO(base64.b64decode(f_str).decode()) as file:
|
||||||
|
base, ext = os.path.splitext(f_name)
|
||||||
|
if det_data:
|
||||||
|
append_data = pyzebra.parse_1D(file, ext)
|
||||||
|
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
|
||||||
|
det_data.extend(append_data)
|
||||||
|
else:
|
||||||
|
det_data = pyzebra.parse_1D(file, ext)
|
||||||
|
pyzebra.normalize_dataset(det_data, monitor_spinner.value)
|
||||||
|
js_data.data.update(fname=[base + ".comm", base + ".incomm"])
|
||||||
|
|
||||||
|
_init_datatable()
|
||||||
|
|
||||||
|
upload_div = Div(text="or upload new .dat files:", margin=(5, 5, 0, 5))
|
||||||
|
upload_button = FileInput(accept=".dat", multiple=True, default_size=200)
|
||||||
|
upload_button.on_change("value", upload_button_callback)
|
||||||
|
|
||||||
|
def append_upload_button_callback(_attr, _old, new):
|
||||||
|
for f_str, f_name in zip(new, append_upload_button.filename):
|
||||||
|
with io.StringIO(base64.b64decode(f_str).decode()) as file:
|
||||||
|
_, ext = os.path.splitext(f_name)
|
||||||
|
append_data = pyzebra.parse_1D(file, ext)
|
||||||
|
|
||||||
|
pyzebra.normalize_dataset(append_data, monitor_spinner.value)
|
||||||
|
det_data.extend(append_data)
|
||||||
|
|
||||||
|
_init_datatable()
|
||||||
|
|
||||||
|
append_upload_div = Div(text="append extra files:", margin=(5, 5, 0, 5))
|
||||||
|
append_upload_button = FileInput(accept=".dat", multiple=True, default_size=200)
|
||||||
|
append_upload_button.on_change("value", append_upload_button_callback)
|
||||||
|
|
||||||
|
def monitor_spinner_callback(_attr, _old, new):
|
||||||
|
if det_data:
|
||||||
|
pyzebra.normalize_dataset(det_data, new)
|
||||||
|
_update_plot()
|
||||||
|
|
||||||
|
monitor_spinner = Spinner(title="Monitor:", mode="int", value=100_000, low=1, width=145)
|
||||||
|
monitor_spinner.on_change("value", monitor_spinner_callback)
|
||||||
|
|
||||||
|
def _update_table():
|
||||||
|
fit_ok = [(1 if "fit" in scan else 0) for scan in det_data]
|
||||||
|
scan_table_source.data.update(fit=fit_ok)
|
||||||
|
|
||||||
|
def _update_plot():
|
||||||
|
_update_single_scan_plot(_get_selected_scan())
|
||||||
|
_update_overview()
|
||||||
|
|
||||||
|
def _update_single_scan_plot(scan):
|
||||||
|
scan_motor = scan["scan_motor"]
|
||||||
|
|
||||||
|
y = scan["Counts"]
|
||||||
|
x = scan[scan_motor]
|
||||||
|
|
||||||
|
plot.axis[0].axis_label = scan_motor
|
||||||
|
plot_scatter_source.data.update(x=x, y=y, y_upper=y + np.sqrt(y), y_lower=y - np.sqrt(y))
|
||||||
|
|
||||||
|
fit = scan.get("fit")
|
||||||
|
if fit is not None:
|
||||||
|
x_fit = np.linspace(x[0], x[-1], 100)
|
||||||
|
plot_fit_source.data.update(x=x_fit, y=fit.eval(x=x_fit))
|
||||||
|
|
||||||
|
x_bkg = []
|
||||||
|
y_bkg = []
|
||||||
|
xs_peak = []
|
||||||
|
ys_peak = []
|
||||||
|
comps = fit.eval_components(x=x_fit)
|
||||||
|
for i, model in enumerate(fit_params):
|
||||||
|
if "linear" in model:
|
||||||
|
x_bkg = x_fit
|
||||||
|
y_bkg = comps[f"f{i}_"]
|
||||||
|
|
||||||
|
elif any(val in model for val in ("gaussian", "voigt", "pvoigt")):
|
||||||
|
xs_peak.append(x_fit)
|
||||||
|
ys_peak.append(comps[f"f{i}_"])
|
||||||
|
|
||||||
|
plot_bkg_source.data.update(x=x_bkg, y=y_bkg)
|
||||||
|
plot_peak_source.data.update(xs=xs_peak, ys=ys_peak)
|
||||||
|
|
||||||
|
fit_output_textinput.value = fit.fit_report()
|
||||||
|
|
||||||
|
else:
|
||||||
|
plot_fit_source.data.update(x=[], y=[])
|
||||||
|
plot_bkg_source.data.update(x=[], y=[])
|
||||||
|
plot_peak_source.data.update(xs=[], ys=[])
|
||||||
|
fit_output_textinput.value = ""
|
||||||
|
|
||||||
|
def _update_overview():
|
||||||
|
xs = []
|
||||||
|
ys = []
|
||||||
|
param = []
|
||||||
|
x = []
|
||||||
|
y = []
|
||||||
|
par = []
|
||||||
|
for s, p in enumerate(scan_table_source.data["param"]):
|
||||||
|
if p is not None:
|
||||||
|
scan = det_data[s]
|
||||||
|
scan_motor = scan["scan_motor"]
|
||||||
|
xs.append(scan[scan_motor])
|
||||||
|
x.extend(scan[scan_motor])
|
||||||
|
ys.append(scan["Counts"])
|
||||||
|
y.extend([float(p)] * len(scan[scan_motor]))
|
||||||
|
param.append(float(p))
|
||||||
|
par.extend(scan["Counts"])
|
||||||
|
|
||||||
|
if det_data:
|
||||||
|
scan_motor = det_data[0]["scan_motor"]
|
||||||
|
ov_plot.axis[0].axis_label = scan_motor
|
||||||
|
ov_param_plot.axis[0].axis_label = scan_motor
|
||||||
|
|
||||||
|
ov_plot_mline_source.data.update(xs=xs, ys=ys, param=param, color=color_palette(len(xs)))
|
||||||
|
|
||||||
|
if y:
|
||||||
|
mapper["transform"].low = np.min([np.min(y) for y in ys])
|
||||||
|
mapper["transform"].high = np.max([np.max(y) for y in ys])
|
||||||
|
ov_param_plot_scatter_source.data.update(x=x, y=y, param=par)
|
||||||
|
|
||||||
|
# Main plot
|
||||||
|
plot = Plot(
|
||||||
|
x_range=DataRange1d(),
|
||||||
|
y_range=DataRange1d(only_visible=True),
|
||||||
|
plot_height=450,
|
||||||
|
plot_width=700,
|
||||||
|
)
|
||||||
|
|
||||||
|
plot.add_layout(LinearAxis(axis_label="Counts"), place="left")
|
||||||
|
plot.add_layout(LinearAxis(axis_label="Scan motor"), place="below")
|
||||||
|
|
||||||
|
plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
|
||||||
|
plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
|
||||||
|
|
||||||
|
plot_scatter_source = ColumnDataSource(dict(x=[0], y=[0], y_upper=[0], y_lower=[0]))
|
||||||
|
plot_scatter = plot.add_glyph(
|
||||||
|
plot_scatter_source, Scatter(x="x", y="y", line_color="steelblue")
|
||||||
|
)
|
||||||
|
plot.add_layout(Whisker(source=plot_scatter_source, base="x", upper="y_upper", lower="y_lower"))
|
||||||
|
|
||||||
|
plot_fit_source = ColumnDataSource(dict(x=[0], y=[0]))
|
||||||
|
plot_fit = plot.add_glyph(plot_fit_source, Line(x="x", y="y"))
|
||||||
|
|
||||||
|
plot_bkg_source = ColumnDataSource(dict(x=[0], y=[0]))
|
||||||
|
plot_bkg = plot.add_glyph(
|
||||||
|
plot_bkg_source, Line(x="x", y="y", line_color="green", line_dash="dashed")
|
||||||
|
)
|
||||||
|
|
||||||
|
plot_peak_source = ColumnDataSource(dict(xs=[0], ys=[0]))
|
||||||
|
plot_peak = plot.add_glyph(
|
||||||
|
plot_peak_source, MultiLine(xs="xs", ys="ys", line_color="red", line_dash="dashed")
|
||||||
|
)
|
||||||
|
|
||||||
|
fit_from_span = Span(location=None, dimension="height", line_dash="dashed")
|
||||||
|
plot.add_layout(fit_from_span)
|
||||||
|
|
||||||
|
fit_to_span = Span(location=None, dimension="height", line_dash="dashed")
|
||||||
|
plot.add_layout(fit_to_span)
|
||||||
|
|
||||||
|
plot.add_layout(
|
||||||
|
Legend(
|
||||||
|
items=[
|
||||||
|
("data", [plot_scatter]),
|
||||||
|
("best fit", [plot_fit]),
|
||||||
|
("peak", [plot_peak]),
|
||||||
|
("linear", [plot_bkg]),
|
||||||
|
],
|
||||||
|
location="top_left",
|
||||||
|
click_policy="hide",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
|
||||||
|
plot.toolbar.logo = None
|
||||||
|
|
||||||
|
# Overview multilines plot
|
||||||
|
ov_plot = Plot(x_range=DataRange1d(), y_range=DataRange1d(), plot_height=400, plot_width=700)
|
||||||
|
|
||||||
|
ov_plot.add_layout(LinearAxis(axis_label="Counts"), place="left")
|
||||||
|
ov_plot.add_layout(LinearAxis(axis_label="Scan motor"), place="below")
|
||||||
|
|
||||||
|
ov_plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
|
||||||
|
ov_plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
|
||||||
|
|
||||||
|
ov_plot_mline_source = ColumnDataSource(dict(xs=[], ys=[], param=[], color=[]))
|
||||||
|
ov_plot.add_glyph(ov_plot_mline_source, MultiLine(xs="xs", ys="ys", line_color="color"))
|
||||||
|
|
||||||
|
hover_tool = HoverTool(tooltips=[("param", "@param")])
|
||||||
|
ov_plot.add_tools(PanTool(), WheelZoomTool(), hover_tool, ResetTool())
|
||||||
|
|
||||||
|
ov_plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
|
||||||
|
ov_plot.toolbar.logo = None
|
||||||
|
|
||||||
|
# Overview perams plot
|
||||||
|
ov_param_plot = Plot(
|
||||||
|
x_range=DataRange1d(), y_range=DataRange1d(), plot_height=400, plot_width=700
|
||||||
|
)
|
||||||
|
|
||||||
|
ov_param_plot.add_layout(LinearAxis(axis_label="Param"), place="left")
|
||||||
|
ov_param_plot.add_layout(LinearAxis(axis_label="Scan motor"), place="below")
|
||||||
|
|
||||||
|
ov_param_plot.add_layout(Grid(dimension=0, ticker=BasicTicker()))
|
||||||
|
ov_param_plot.add_layout(Grid(dimension=1, ticker=BasicTicker()))
|
||||||
|
|
||||||
|
ov_param_plot_scatter_source = ColumnDataSource(dict(x=[], y=[], param=[]))
|
||||||
|
mapper = linear_cmap(field_name="param", palette=Turbo256, low=0, high=50)
|
||||||
|
ov_param_plot.add_glyph(
|
||||||
|
ov_param_plot_scatter_source,
|
||||||
|
Scatter(x="x", y="y", line_color=mapper, fill_color=mapper, size=10),
|
||||||
|
)
|
||||||
|
|
||||||
|
ov_param_plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
|
||||||
|
ov_param_plot.toolbar.logo = None
|
||||||
|
|
||||||
|
# Plot tabs
|
||||||
|
plots = Tabs(
|
||||||
|
tabs=[
|
||||||
|
Panel(child=plot, title="single scan"),
|
||||||
|
Panel(child=ov_plot, title="overview"),
|
||||||
|
Panel(child=ov_param_plot, title="overview map"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Scan select
|
||||||
|
def scan_table_select_callback(_attr, old, new):
|
||||||
|
if not new:
|
||||||
|
# skip empty selections
|
||||||
|
return
|
||||||
|
|
||||||
|
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
|
||||||
|
if len(new) > 1:
|
||||||
|
# drop selection to the previous one
|
||||||
|
scan_table_source.selected.indices = old
|
||||||
|
return
|
||||||
|
|
||||||
|
if len(old) > 1:
|
||||||
|
# skip unnecessary update caused by selection drop
|
||||||
|
return
|
||||||
|
|
||||||
|
_update_plot()
|
||||||
|
|
||||||
|
scan_table_source = ColumnDataSource(dict(file=[], scan=[], param=[], fit=[], export=[]))
|
||||||
|
scan_table = DataTable(
|
||||||
|
source=scan_table_source,
|
||||||
|
columns=[
|
||||||
|
TableColumn(field="file", title="file", width=150),
|
||||||
|
TableColumn(field="scan", title="scan", width=50),
|
||||||
|
TableColumn(field="param", title="param", editor=NumberEditor(), width=50),
|
||||||
|
TableColumn(field="fit", title="Fit", width=50),
|
||||||
|
TableColumn(field="export", title="Export", editor=CheckboxEditor(), width=50),
|
||||||
|
],
|
||||||
|
width=410, # +60 because of the index column
|
||||||
|
editable=True,
|
||||||
|
autosize_mode="none",
|
||||||
|
)
|
||||||
|
|
||||||
|
def scan_table_source_callback(_attr, _old, _new):
|
||||||
|
if scan_table_source.selected.indices:
|
||||||
|
_update_plot()
|
||||||
|
|
||||||
|
scan_table_source.selected.on_change("indices", scan_table_select_callback)
|
||||||
|
scan_table_source.on_change("data", scan_table_source_callback)
|
||||||
|
|
||||||
|
def _get_selected_scan():
|
||||||
|
return det_data[scan_table_source.selected.indices[0]]
|
||||||
|
|
||||||
|
def param_select_callback(_attr, _old, new):
|
||||||
|
if new == "user defined":
|
||||||
|
param = [None] * len(det_data)
|
||||||
|
else:
|
||||||
|
param = [scan[new] for scan in det_data]
|
||||||
|
|
||||||
|
scan_table_source.data["param"] = param
|
||||||
|
|
||||||
|
param_select = Select(
|
||||||
|
title="Parameter:",
|
||||||
|
options=["user defined", "temp", "mf", "h", "k", "l"],
|
||||||
|
value="user defined",
|
||||||
|
default_size=145,
|
||||||
|
)
|
||||||
|
param_select.on_change("value", param_select_callback)
|
||||||
|
|
||||||
|
def fit_from_spinner_callback(_attr, _old, new):
|
||||||
|
fit_from_span.location = new
|
||||||
|
|
||||||
|
fit_from_spinner = Spinner(title="Fit from:", default_size=145)
|
||||||
|
fit_from_spinner.on_change("value", fit_from_spinner_callback)
|
||||||
|
|
||||||
|
def fit_to_spinner_callback(_attr, _old, new):
|
||||||
|
fit_to_span.location = new
|
||||||
|
|
||||||
|
fit_to_spinner = Spinner(title="to:", default_size=145)
|
||||||
|
fit_to_spinner.on_change("value", fit_to_spinner_callback)
|
||||||
|
|
||||||
|
def fitparams_add_dropdown_callback(click):
|
||||||
|
# bokeh requires (str, str) for MultiSelect options
|
||||||
|
new_tag = f"{click.item}-{fitparams_select.tags[0]}"
|
||||||
|
fitparams_select.options.append((new_tag, click.item))
|
||||||
|
fit_params[new_tag] = fitparams_factory(click.item)
|
||||||
|
fitparams_select.tags[0] += 1
|
||||||
|
|
||||||
|
fitparams_add_dropdown = Dropdown(
|
||||||
|
label="Add fit function",
|
||||||
|
menu=[
|
||||||
|
("Linear", "linear"),
|
||||||
|
("Gaussian", "gaussian"),
|
||||||
|
("Voigt", "voigt"),
|
||||||
|
("Pseudo Voigt", "pvoigt"),
|
||||||
|
# ("Pseudo Voigt1", "pseudovoigt1"),
|
||||||
|
],
|
||||||
|
default_size=145,
|
||||||
|
)
|
||||||
|
fitparams_add_dropdown.on_click(fitparams_add_dropdown_callback)
|
||||||
|
|
||||||
|
def fitparams_select_callback(_attr, old, new):
|
||||||
|
# Avoid selection of multiple indicies (via Shift+Click or Ctrl+Click)
|
||||||
|
if len(new) > 1:
|
||||||
|
# drop selection to the previous one
|
||||||
|
fitparams_select.value = old
|
||||||
|
return
|
||||||
|
|
||||||
|
if len(old) > 1:
|
||||||
|
# skip unnecessary update caused by selection drop
|
||||||
|
return
|
||||||
|
|
||||||
|
if new:
|
||||||
|
fitparams_table_source.data.update(fit_params[new[0]])
|
||||||
|
else:
|
||||||
|
fitparams_table_source.data.update(dict(param=[], value=[], vary=[], min=[], max=[]))
|
||||||
|
|
||||||
|
fitparams_select = MultiSelect(options=[], height=120, default_size=145)
|
||||||
|
fitparams_select.tags = [0]
|
||||||
|
fitparams_select.on_change("value", fitparams_select_callback)
|
||||||
|
|
||||||
|
def fitparams_remove_button_callback():
|
||||||
|
if fitparams_select.value:
|
||||||
|
sel_tag = fitparams_select.value[0]
|
||||||
|
del fit_params[sel_tag]
|
||||||
|
for elem in fitparams_select.options:
|
||||||
|
if elem[0] == sel_tag:
|
||||||
|
fitparams_select.options.remove(elem)
|
||||||
|
break
|
||||||
|
|
||||||
|
fitparams_select.value = []
|
||||||
|
|
||||||
|
fitparams_remove_button = Button(label="Remove fit function", default_size=145)
|
||||||
|
fitparams_remove_button.on_click(fitparams_remove_button_callback)
|
||||||
|
|
||||||
|
def fitparams_factory(function):
|
||||||
|
if function == "linear":
|
||||||
|
params = ["slope", "intercept"]
|
||||||
|
elif function == "gaussian":
|
||||||
|
params = ["amplitude", "center", "sigma"]
|
||||||
|
elif function == "voigt":
|
||||||
|
params = ["amplitude", "center", "sigma", "gamma"]
|
||||||
|
elif function == "pvoigt":
|
||||||
|
params = ["amplitude", "center", "sigma", "fraction"]
|
||||||
|
elif function == "pseudovoigt1":
|
||||||
|
params = ["amplitude", "center", "g_sigma", "l_sigma", "fraction"]
|
||||||
|
else:
|
||||||
|
raise ValueError("Unknown fit function")
|
||||||
|
|
||||||
|
n = len(params)
|
||||||
|
fitparams = dict(
|
||||||
|
param=params, value=[None] * n, vary=[True] * n, min=[None] * n, max=[None] * n,
|
||||||
|
)
|
||||||
|
|
||||||
|
return fitparams
|
||||||
|
|
||||||
|
fitparams_table_source = ColumnDataSource(dict(param=[], value=[], vary=[], min=[], max=[]))
|
||||||
|
fitparams_table = DataTable(
|
||||||
|
source=fitparams_table_source,
|
||||||
|
columns=[
|
||||||
|
TableColumn(field="param", title="Parameter"),
|
||||||
|
TableColumn(field="value", title="Value", editor=NumberEditor()),
|
||||||
|
TableColumn(field="vary", title="Vary", editor=CheckboxEditor()),
|
||||||
|
TableColumn(field="min", title="Min", editor=NumberEditor()),
|
||||||
|
TableColumn(field="max", title="Max", editor=NumberEditor()),
|
||||||
|
],
|
||||||
|
height=200,
|
||||||
|
width=350,
|
||||||
|
index_position=None,
|
||||||
|
editable=True,
|
||||||
|
auto_edit=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# start with `background` and `gauss` fit functions added
|
||||||
|
fitparams_add_dropdown_callback(types.SimpleNamespace(item="linear"))
|
||||||
|
fitparams_add_dropdown_callback(types.SimpleNamespace(item="gaussian"))
|
||||||
|
fitparams_select.value = ["gaussian-1"] # add selection to gauss
|
||||||
|
|
||||||
|
fit_output_textinput = TextAreaInput(title="Fit results:", width=750, height=200)
|
||||||
|
|
||||||
|
def fit_all_button_callback():
|
||||||
|
for scan, export in zip(det_data, scan_table_source.data["export"]):
|
||||||
|
if export:
|
||||||
|
pyzebra.fit_scan(
|
||||||
|
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
|
||||||
|
)
|
||||||
|
|
||||||
|
_update_plot()
|
||||||
|
_update_table()
|
||||||
|
|
||||||
|
fit_all_button = Button(label="Fit All", button_type="primary", default_size=145)
|
||||||
|
fit_all_button.on_click(fit_all_button_callback)
|
||||||
|
|
||||||
|
def fit_button_callback():
|
||||||
|
scan = _get_selected_scan()
|
||||||
|
pyzebra.fit_scan(
|
||||||
|
scan, fit_params, fit_from=fit_from_spinner.value, fit_to=fit_to_spinner.value
|
||||||
|
)
|
||||||
|
|
||||||
|
_update_plot()
|
||||||
|
_update_table()
|
||||||
|
|
||||||
|
fit_button = Button(label="Fit Current", default_size=145)
|
||||||
|
fit_button.on_click(fit_button_callback)
|
||||||
|
|
||||||
|
area_method_radiobutton = RadioButtonGroup(
|
||||||
|
labels=["Fit area", "Int area"], active=0, default_size=145, disabled=True
|
||||||
|
)
|
||||||
|
|
||||||
|
bin_size_spinner = Spinner(
|
||||||
|
title="Bin size:", value=1, low=1, step=1, default_size=145, disabled=True
|
||||||
|
)
|
||||||
|
|
||||||
|
lorentz_toggle = Toggle(label="Lorentz Correction", default_size=145)
|
||||||
|
|
||||||
|
export_preview_textinput = TextAreaInput(title="Export preview:", width=450, height=400)
|
||||||
|
|
||||||
|
def preview_button_callback():
|
||||||
|
with tempfile.TemporaryDirectory() as temp_dir:
|
||||||
|
temp_file = temp_dir + "/temp"
|
||||||
|
export_data = []
|
||||||
|
for s, export in zip(det_data, scan_table_source.data["export"]):
|
||||||
|
if export:
|
||||||
|
export_data.append(s)
|
||||||
|
|
||||||
|
pyzebra.export_1D(
|
||||||
|
export_data,
|
||||||
|
temp_file,
|
||||||
|
area_method=AREA_METHODS[int(area_method_radiobutton.active)],
|
||||||
|
lorentz=lorentz_toggle.active,
|
||||||
|
)
|
||||||
|
|
||||||
|
exported_content = ""
|
||||||
|
file_content = []
|
||||||
|
for ext in (".comm", ".incomm"):
|
||||||
|
fname = temp_file + ext
|
||||||
|
if os.path.isfile(fname):
|
||||||
|
with open(fname) as f:
|
||||||
|
content = f.read()
|
||||||
|
exported_content += f"{ext} file:\n" + content
|
||||||
|
else:
|
||||||
|
content = ""
|
||||||
|
file_content.append(content)
|
||||||
|
|
||||||
|
js_data.data.update(content=file_content)
|
||||||
|
export_preview_textinput.value = exported_content
|
||||||
|
|
||||||
|
preview_button = Button(label="Preview", default_size=220)
|
||||||
|
preview_button.on_click(preview_button_callback)
|
||||||
|
|
||||||
|
save_button = Button(label="Download preview", button_type="success", default_size=220)
|
||||||
|
save_button.js_on_click(CustomJS(args={"js_data": js_data}, code=javaScript))
|
||||||
|
|
||||||
|
fitpeak_controls = row(
|
||||||
|
column(fitparams_add_dropdown, fitparams_select, fitparams_remove_button),
|
||||||
|
fitparams_table,
|
||||||
|
Spacer(width=20),
|
||||||
|
column(
|
||||||
|
row(fit_from_spinner, fit_to_spinner),
|
||||||
|
row(bin_size_spinner, column(Spacer(height=19), lorentz_toggle)),
|
||||||
|
row(area_method_radiobutton),
|
||||||
|
row(fit_button, fit_all_button),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
scan_layout = column(scan_table, row(monitor_spinner, param_select))
|
||||||
|
|
||||||
|
import_layout = column(
|
||||||
|
proposal_textinput,
|
||||||
|
file_select,
|
||||||
|
row(file_open_button, file_append_button),
|
||||||
|
upload_div,
|
||||||
|
upload_button,
|
||||||
|
append_upload_div,
|
||||||
|
append_upload_button,
|
||||||
|
)
|
||||||
|
|
||||||
|
export_layout = column(export_preview_textinput, row(preview_button, save_button))
|
||||||
|
|
||||||
|
tab_layout = column(
|
||||||
|
row(import_layout, scan_layout, plots, Spacer(width=30), export_layout),
|
||||||
|
row(fitpeak_controls, fit_output_textinput),
|
||||||
|
)
|
||||||
|
|
||||||
|
return Panel(child=tab_layout, title="param study")
|
253
pyzebra/app/panel_spind.py
Normal file
253
pyzebra/app/panel_spind.py
Normal file
@ -0,0 +1,253 @@
|
|||||||
|
import ast
|
||||||
|
import math
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import tempfile
|
||||||
|
from collections import defaultdict
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from bokeh.layouts import column, row
|
||||||
|
from bokeh.models import (
|
||||||
|
Button,
|
||||||
|
ColumnDataSource,
|
||||||
|
DataTable,
|
||||||
|
Panel,
|
||||||
|
Spinner,
|
||||||
|
TableColumn,
|
||||||
|
TextAreaInput,
|
||||||
|
TextInput,
|
||||||
|
)
|
||||||
|
from scipy.optimize import curve_fit
|
||||||
|
|
||||||
|
import pyzebra
|
||||||
|
|
||||||
|
|
||||||
|
def create():
|
||||||
|
path_prefix_textinput = TextInput(title="Path prefix:", value="")
|
||||||
|
selection_list = TextAreaInput(title="ROIs:", rows=7)
|
||||||
|
lattice_const_textinput = TextInput(
|
||||||
|
title="Lattice constants:", value="8.3211,8.3211,8.3211,90.00,90.00,90.00"
|
||||||
|
)
|
||||||
|
max_res_spinner = Spinner(title="max-res", value=2, step=0.01)
|
||||||
|
seed_pool_size_spinner = Spinner(title="seed-pool-size", value=5, step=0.01)
|
||||||
|
seed_len_tol_spinner = Spinner(title="seed-len-tol", value=0.02, step=0.01)
|
||||||
|
seed_angle_tol_spinner = Spinner(title="seed-angle-tol", value=1, step=0.01)
|
||||||
|
eval_hkl_tol_spinner = Spinner(title="eval-hkl-tol", value=0.15, step=0.01)
|
||||||
|
|
||||||
|
diff_vec = []
|
||||||
|
|
||||||
|
def process_button_callback():
|
||||||
|
nonlocal diff_vec
|
||||||
|
with tempfile.TemporaryDirectory() as temp_dir:
|
||||||
|
temp_peak_list_dir = os.path.join(temp_dir, "peak_list")
|
||||||
|
os.mkdir(temp_peak_list_dir)
|
||||||
|
temp_event_file = os.path.join(temp_peak_list_dir, "event-0.txt")
|
||||||
|
temp_hkl_file = os.path.join(temp_dir, "hkl.h5")
|
||||||
|
roi_dict = ast.literal_eval(selection_list.value)
|
||||||
|
|
||||||
|
comp_proc = subprocess.run(
|
||||||
|
[
|
||||||
|
"mpiexec",
|
||||||
|
"-n",
|
||||||
|
"2",
|
||||||
|
"python",
|
||||||
|
"spind/gen_hkl_table.py",
|
||||||
|
lattice_const_textinput.value,
|
||||||
|
"--max-res",
|
||||||
|
str(max_res_spinner.value),
|
||||||
|
"-o",
|
||||||
|
temp_hkl_file,
|
||||||
|
],
|
||||||
|
check=True,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
text=True,
|
||||||
|
)
|
||||||
|
print(" ".join(comp_proc.args))
|
||||||
|
print(comp_proc.stdout)
|
||||||
|
|
||||||
|
diff_vec = prepare_event_file(temp_event_file, roi_dict, path_prefix_textinput.value)
|
||||||
|
|
||||||
|
comp_proc = subprocess.run(
|
||||||
|
[
|
||||||
|
"mpiexec",
|
||||||
|
"-n",
|
||||||
|
"2",
|
||||||
|
"python",
|
||||||
|
"spind/SPIND.py",
|
||||||
|
temp_peak_list_dir,
|
||||||
|
temp_hkl_file,
|
||||||
|
"-o",
|
||||||
|
temp_dir,
|
||||||
|
"--seed-pool-size",
|
||||||
|
str(seed_pool_size_spinner.value),
|
||||||
|
"--seed-len-tol",
|
||||||
|
str(seed_len_tol_spinner.value),
|
||||||
|
"--seed-angle-tol",
|
||||||
|
str(seed_angle_tol_spinner.value),
|
||||||
|
"--eval-hkl-tol",
|
||||||
|
str(eval_hkl_tol_spinner.value),
|
||||||
|
],
|
||||||
|
check=True,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
text=True,
|
||||||
|
)
|
||||||
|
print(" ".join(comp_proc.args))
|
||||||
|
print(comp_proc.stdout)
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(os.path.join(temp_dir, "spind.txt")) as f_out:
|
||||||
|
spind_res = defaultdict(list)
|
||||||
|
for line in f_out:
|
||||||
|
c1, c2, c3, c4, c5, *c_rest = line.split()
|
||||||
|
spind_res["label"].append(c1)
|
||||||
|
spind_res["crystal_id"].append(c2)
|
||||||
|
spind_res["match_rate"].append(c3)
|
||||||
|
spind_res["matched_peaks"].append(c4)
|
||||||
|
spind_res["column_5"].append(c5)
|
||||||
|
|
||||||
|
# last digits are spind UB matrix
|
||||||
|
vals = list(map(float, c_rest))
|
||||||
|
ub_matrix_spind = np.array(vals).reshape(3, 3)
|
||||||
|
ub_matrix = np.linalg.inv(np.transpose(ub_matrix_spind)) * 1e10
|
||||||
|
spind_res["ub_matrix"].append(ub_matrix)
|
||||||
|
|
||||||
|
results_table_source.data.update(spind_res)
|
||||||
|
|
||||||
|
except FileNotFoundError:
|
||||||
|
print("No results from spind")
|
||||||
|
|
||||||
|
process_button = Button(label="Process", button_type="primary")
|
||||||
|
process_button.on_click(process_button_callback)
|
||||||
|
|
||||||
|
hkl_textareainput = TextAreaInput(title="hkl values:", rows=7)
|
||||||
|
|
||||||
|
def results_table_select_callback(_attr, old, new):
|
||||||
|
if new:
|
||||||
|
ind = new[0]
|
||||||
|
ub_matrix = results_table_source.data["ub_matrix"][ind]
|
||||||
|
res = ""
|
||||||
|
for vec in diff_vec:
|
||||||
|
res += f"{vec @ ub_matrix}\n"
|
||||||
|
hkl_textareainput.value = res
|
||||||
|
else:
|
||||||
|
hkl_textareainput.value = None
|
||||||
|
|
||||||
|
results_table_source = ColumnDataSource(dict())
|
||||||
|
results_table = DataTable(
|
||||||
|
source=results_table_source,
|
||||||
|
columns=[
|
||||||
|
TableColumn(field="label", title="Label", width=50),
|
||||||
|
TableColumn(field="crystal_id", title="Crystal ID", width=100),
|
||||||
|
TableColumn(field="match_rate", title="Match Rate", width=100),
|
||||||
|
TableColumn(field="matched_peaks", title="Matched Peaks", width=100),
|
||||||
|
TableColumn(field="column_5", title="", width=100),
|
||||||
|
TableColumn(field="ub_matrix", title="UB Matrix", width=250),
|
||||||
|
],
|
||||||
|
height=300,
|
||||||
|
width=700,
|
||||||
|
autosize_mode="none",
|
||||||
|
index_position=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
results_table_source.selected.on_change("indices", results_table_select_callback)
|
||||||
|
|
||||||
|
tab_layout = row(
|
||||||
|
column(
|
||||||
|
path_prefix_textinput,
|
||||||
|
selection_list,
|
||||||
|
lattice_const_textinput,
|
||||||
|
max_res_spinner,
|
||||||
|
seed_pool_size_spinner,
|
||||||
|
seed_len_tol_spinner,
|
||||||
|
seed_angle_tol_spinner,
|
||||||
|
eval_hkl_tol_spinner,
|
||||||
|
process_button,
|
||||||
|
),
|
||||||
|
column(results_table, row(hkl_textareainput)),
|
||||||
|
)
|
||||||
|
|
||||||
|
return Panel(child=tab_layout, title="spind")
|
||||||
|
|
||||||
|
|
||||||
|
def gauss(x, *p):
|
||||||
|
"""Defines Gaussian function
|
||||||
|
Args:
|
||||||
|
A - amplitude, mu - position of the center, sigma - width
|
||||||
|
Returns:
|
||||||
|
Gaussian function
|
||||||
|
"""
|
||||||
|
A, mu, sigma = p
|
||||||
|
return A * np.exp(-((x - mu) ** 2) / (2.0 * sigma ** 2))
|
||||||
|
|
||||||
|
|
||||||
|
def prepare_event_file(export_filename, roi_dict, path_prefix=""):
|
||||||
|
diff_vec = []
|
||||||
|
p0 = [1.0, 0.0, 1.0]
|
||||||
|
maxfev = 100000
|
||||||
|
with open(export_filename, "w") as f:
|
||||||
|
for file, rois in roi_dict.items():
|
||||||
|
dat = pyzebra.read_detector_data(path_prefix + file + ".hdf")
|
||||||
|
|
||||||
|
wave = dat["wave"]
|
||||||
|
ddist = dat["ddist"]
|
||||||
|
|
||||||
|
gamma = dat["gamma"][0]
|
||||||
|
omega = dat["omega"][0]
|
||||||
|
nu = dat["nu"][0]
|
||||||
|
chi = dat["chi"][0]
|
||||||
|
phi = dat["phi"][0]
|
||||||
|
|
||||||
|
scan_motor = dat["scan_motor"]
|
||||||
|
var_angle = dat[scan_motor]
|
||||||
|
|
||||||
|
for roi in rois:
|
||||||
|
x0, xN, y0, yN, fr0, frN = roi
|
||||||
|
data_roi = dat["data"][fr0:frN, y0:yN, x0:xN]
|
||||||
|
|
||||||
|
cnts = np.sum(data_roi, axis=(1, 2))
|
||||||
|
coeff, _ = curve_fit(gauss, range(len(cnts)), cnts, p0=p0, maxfev=maxfev)
|
||||||
|
|
||||||
|
m = cnts.mean()
|
||||||
|
sd = cnts.std()
|
||||||
|
snr_cnts = np.where(sd == 0, 0, m / sd)
|
||||||
|
|
||||||
|
frC = fr0 + coeff[1]
|
||||||
|
var_F = var_angle[math.floor(frC)]
|
||||||
|
var_C = var_angle[math.ceil(frC)]
|
||||||
|
frStep = frC - math.floor(frC)
|
||||||
|
var_step = var_C - var_F
|
||||||
|
var_p = var_F + var_step * frStep
|
||||||
|
|
||||||
|
if scan_motor == "gamma":
|
||||||
|
gamma = var_p
|
||||||
|
elif scan_motor == "omega":
|
||||||
|
omega = var_p
|
||||||
|
elif scan_motor == "nu":
|
||||||
|
nu = var_p
|
||||||
|
elif scan_motor == "chi":
|
||||||
|
chi = var_p
|
||||||
|
elif scan_motor == "phi":
|
||||||
|
phi = var_p
|
||||||
|
|
||||||
|
intensity = coeff[1] * abs(coeff[2] * var_step) * math.sqrt(2) * math.sqrt(np.pi)
|
||||||
|
|
||||||
|
projX = np.sum(data_roi, axis=(0, 1))
|
||||||
|
coeff, _ = curve_fit(gauss, range(len(projX)), projX, p0=p0, maxfev=maxfev)
|
||||||
|
x_pos = x0 + coeff[1]
|
||||||
|
|
||||||
|
projY = np.sum(data_roi, axis=(0, 2))
|
||||||
|
coeff, _ = curve_fit(gauss, range(len(projY)), projY, p0=p0, maxfev=maxfev)
|
||||||
|
y_pos = y0 + coeff[1]
|
||||||
|
|
||||||
|
ga, nu = pyzebra.det2pol(ddist, gamma, nu, x_pos, y_pos)
|
||||||
|
diff_vector = pyzebra.z1frmd(wave, ga, omega, chi, phi, nu)
|
||||||
|
d_spacing = float(pyzebra.dandth(wave, diff_vector)[0])
|
||||||
|
dv1, dv2, dv3 = diff_vector.flatten() * 1e10
|
||||||
|
|
||||||
|
diff_vec.append(diff_vector.flatten())
|
||||||
|
|
||||||
|
f.write(f"{x_pos} {y_pos} {intensity} {snr_cnts} {dv1} {dv2} {dv3} {d_spacing}\n")
|
||||||
|
|
||||||
|
return diff_vec
|
@ -1,513 +0,0 @@
|
|||||||
import numpy as np
|
|
||||||
import uncertainties as u
|
|
||||||
|
|
||||||
from .fit2 import create_uncertanities
|
|
||||||
|
|
||||||
|
|
||||||
def add_dict(dict1, dict2):
|
|
||||||
"""adds two dictionaries, meta of the new is saved as meata+original_filename and
|
|
||||||
measurements are shifted to continue with numbering of first dict
|
|
||||||
:arg dict1 : dictionarry to add to
|
|
||||||
:arg dict2 : dictionarry from which to take the measurements
|
|
||||||
:return dict1 : combined dictionary
|
|
||||||
Note: dict1 must be made from ccl, otherwise we would have to change the structure of loaded
|
|
||||||
dat file"""
|
|
||||||
max_measurement_dict1 = max([int(str(keys)[1:]) for keys in dict1["scan"]])
|
|
||||||
if dict2["meta"]["data_type"] == ".ccl":
|
|
||||||
new_filenames = [
|
|
||||||
"M" + str(x + max_measurement_dict1)
|
|
||||||
for x in [int(str(keys)[1:]) for keys in dict2["scan"]]
|
|
||||||
]
|
|
||||||
new_meta_name = "meta" + str(dict2["meta"]["original_filename"])
|
|
||||||
if new_meta_name not in dict1:
|
|
||||||
for keys, name in zip(dict2["scan"], new_filenames):
|
|
||||||
dict2["scan"][keys]["file_of_origin"] = str(dict2["meta"]["original_filename"])
|
|
||||||
dict1["scan"][name] = dict2["scan"][keys]
|
|
||||||
|
|
||||||
dict1[new_meta_name] = dict2["meta"]
|
|
||||||
|
|
||||||
else:
|
|
||||||
raise KeyError(
|
|
||||||
str(
|
|
||||||
"The file %s has alredy been added to %s"
|
|
||||||
% (dict2["meta"]["original_filename"], dict1["meta"]["original_filename"])
|
|
||||||
)
|
|
||||||
)
|
|
||||||
elif dict2["meta"]["data_type"] == ".dat":
|
|
||||||
d = {}
|
|
||||||
new_name = "M" + str(max_measurement_dict1 + 1)
|
|
||||||
hkl = dict2["meta"]["title"]
|
|
||||||
d["h_index"] = float(hkl.split()[-3])
|
|
||||||
d["k_index"] = float(hkl.split()[-2])
|
|
||||||
d["l_index"] = float(hkl.split()[-1])
|
|
||||||
d["number_of_measurements"] = len(dict2["scan"]["NP"])
|
|
||||||
d["om"] = dict2["scan"]["om"]
|
|
||||||
d["Counts"] = dict2["scan"]["Counts"]
|
|
||||||
d["monitor"] = dict2["scan"]["Monitor1"][0]
|
|
||||||
d["temperature"] = dict2["meta"]["temp"]
|
|
||||||
d["mag_field"] = dict2["meta"]["mf"]
|
|
||||||
d["omega_angle"] = dict2["meta"]["omega"]
|
|
||||||
dict1["scan"][new_name] = d
|
|
||||||
print(hkl.split())
|
|
||||||
for keys in d:
|
|
||||||
print(keys)
|
|
||||||
|
|
||||||
print("s")
|
|
||||||
|
|
||||||
return dict1
|
|
||||||
|
|
||||||
|
|
||||||
def auto(dict):
|
|
||||||
"""takes just unique tuples from all tuples in dictionary returend by scan_dict
|
|
||||||
intendet for automatic merge if you doesent want to specify what scans to merge together
|
|
||||||
args: dict - dictionary from scan_dict function
|
|
||||||
:return dict - dict without repetitions"""
|
|
||||||
for keys in dict:
|
|
||||||
tuple_list = dict[keys]
|
|
||||||
new = list()
|
|
||||||
for i in range(len(tuple_list)):
|
|
||||||
if tuple_list[0][0] == tuple_list[i][0]:
|
|
||||||
new.append(tuple_list[i])
|
|
||||||
dict[keys] = new
|
|
||||||
return dict
|
|
||||||
|
|
||||||
|
|
||||||
def scan_dict(dict):
|
|
||||||
"""scans dictionary for duplicate hkl indexes
|
|
||||||
:arg dict : dictionary to scan
|
|
||||||
:return dictionary with matching scans, if there are none, the dict is empty
|
|
||||||
note: can be checked by "not d", true if empty
|
|
||||||
"""
|
|
||||||
|
|
||||||
d = {}
|
|
||||||
for i in dict["scan"]:
|
|
||||||
for j in dict["scan"]:
|
|
||||||
if dict["scan"][str(i)] != dict["scan"][str(j)]:
|
|
||||||
itup = (
|
|
||||||
dict["scan"][str(i)]["h_index"],
|
|
||||||
dict["scan"][str(i)]["k_index"],
|
|
||||||
dict["scan"][str(i)]["l_index"],
|
|
||||||
)
|
|
||||||
jtup = (
|
|
||||||
dict["scan"][str(j)]["h_index"],
|
|
||||||
dict["scan"][str(j)]["k_index"],
|
|
||||||
dict["scan"][str(j)]["l_index"],
|
|
||||||
)
|
|
||||||
if itup != jtup:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
|
|
||||||
if str(itup) not in d:
|
|
||||||
d[str(itup)] = list()
|
|
||||||
d[str(itup)].append((i, j))
|
|
||||||
else:
|
|
||||||
d[str(itup)].append((i, j))
|
|
||||||
else:
|
|
||||||
continue
|
|
||||||
return d
|
|
||||||
|
|
||||||
|
|
||||||
def compare_hkl(dict1, dict2):
|
|
||||||
"""Compares two dictionaries based on hkl indexes and return dictionary with str(h k l) as
|
|
||||||
key and tuple with keys to same scan in dict1 and dict2
|
|
||||||
:arg dict1 : first dictionary
|
|
||||||
:arg dict2 : second dictionary
|
|
||||||
:return d : dict with matches
|
|
||||||
example of one key: '0.0 0.0 -1.0 : ('M1', 'M9')' meaning that 001 hkl scan is M1 in
|
|
||||||
first dict and M9 in second"""
|
|
||||||
d = {}
|
|
||||||
dupl = 0
|
|
||||||
for keys in dict1["scan"]:
|
|
||||||
for key in dict2["scan"]:
|
|
||||||
if (
|
|
||||||
dict1["scan"][str(keys)]["h_index"] == dict2["scan"][str(key)]["h_index"]
|
|
||||||
and dict1["scan"][str(keys)]["k_index"] == dict2["scan"][str(key)]["k_index"]
|
|
||||||
and dict1["scan"][str(keys)]["l_index"] == dict2["scan"][str(key)]["l_index"]
|
|
||||||
):
|
|
||||||
|
|
||||||
if (
|
|
||||||
str(
|
|
||||||
(
|
|
||||||
str(dict1["scan"][str(keys)]["h_index"])
|
|
||||||
+ " "
|
|
||||||
+ str(dict1["scan"][str(keys)]["k_index"])
|
|
||||||
+ " "
|
|
||||||
+ str(dict1["scan"][str(keys)]["l_index"])
|
|
||||||
)
|
|
||||||
)
|
|
||||||
not in d
|
|
||||||
):
|
|
||||||
d[
|
|
||||||
str(
|
|
||||||
str(dict1["scan"][str(keys)]["h_index"])
|
|
||||||
+ " "
|
|
||||||
+ str(dict1["scan"][str(keys)]["k_index"])
|
|
||||||
+ " "
|
|
||||||
+ str(dict1["scan"][str(keys)]["l_index"])
|
|
||||||
)
|
|
||||||
] = (str(keys), str(key))
|
|
||||||
else:
|
|
||||||
dupl = dupl + 1
|
|
||||||
d[
|
|
||||||
str(
|
|
||||||
str(dict1["scan"][str(keys)]["h_index"])
|
|
||||||
+ " "
|
|
||||||
+ str(dict1["scan"][str(keys)]["k_index"])
|
|
||||||
+ " "
|
|
||||||
+ str(dict1["scan"][str(keys)]["l_index"])
|
|
||||||
+ "_dupl"
|
|
||||||
+ str(dupl)
|
|
||||||
)
|
|
||||||
] = (str(keys), str(key))
|
|
||||||
else:
|
|
||||||
continue
|
|
||||||
|
|
||||||
return d
|
|
||||||
|
|
||||||
|
|
||||||
def create_tuples(x, y, y_err):
|
|
||||||
"""creates tuples for sorting and merginng of the data
|
|
||||||
Counts need to be normalized to monitor before"""
|
|
||||||
t = list()
|
|
||||||
for i in range(len(x)):
|
|
||||||
tup = (x[i], y[i], y_err[i])
|
|
||||||
t.append(tup)
|
|
||||||
return t
|
|
||||||
|
|
||||||
|
|
||||||
def normalize(dict, key, monitor):
|
|
||||||
"""Normalizes the scan to monitor, checks if sigma exists, otherwise creates it
|
|
||||||
:arg dict : dictionary to from which to tkae the scan
|
|
||||||
:arg key : which scan to normalize from dict1
|
|
||||||
:arg monitor : final monitor
|
|
||||||
:return counts - normalized counts
|
|
||||||
:return sigma - normalized sigma"""
|
|
||||||
|
|
||||||
counts = np.array(dict["scan"][key]["Counts"])
|
|
||||||
sigma = np.sqrt(counts) if "sigma" not in dict["scan"][key] else dict["scan"][key]["sigma"]
|
|
||||||
monitor_ratio = monitor / dict["scan"][key]["monitor"]
|
|
||||||
scaled_counts = counts * monitor_ratio
|
|
||||||
scaled_sigma = np.array(sigma) * monitor_ratio
|
|
||||||
|
|
||||||
return scaled_counts, scaled_sigma
|
|
||||||
|
|
||||||
|
|
||||||
def merge(dict1, dict2, keys, auto=True, monitor=100000):
|
|
||||||
"""merges the two tuples and sorts them, if om value is same, Counts value is average
|
|
||||||
averaging is propagated into sigma if dict1 == dict2, key[1] is deleted after merging
|
|
||||||
:arg dict1 : dictionary to which scan will be merged
|
|
||||||
:arg dict2 : dictionary from which scan will be merged
|
|
||||||
:arg keys : tuple with key to dict1 and dict2
|
|
||||||
:arg auto : if true, when monitors are same, does not change it, if flase, takes monitor always
|
|
||||||
:arg monitor : final monitor after merging
|
|
||||||
note: dict1 and dict2 can be same dict
|
|
||||||
:return dict1 with merged scan"""
|
|
||||||
if auto:
|
|
||||||
if dict1["scan"][keys[0]]["monitor"] == dict2["scan"][keys[1]]["monitor"]:
|
|
||||||
monitor = dict1["scan"][keys[0]]["monitor"]
|
|
||||||
|
|
||||||
# load om and Counts
|
|
||||||
x1, x2 = dict1["scan"][keys[0]]["om"], dict2["scan"][keys[1]]["om"]
|
|
||||||
cor_y1, y_err1 = normalize(dict1, keys[0], monitor=monitor)
|
|
||||||
cor_y2, y_err2 = normalize(dict2, keys[1], monitor=monitor)
|
|
||||||
# creates touples (om, Counts, sigma) for sorting and further processing
|
|
||||||
tuple_list = create_tuples(x1, cor_y1, y_err1) + create_tuples(x2, cor_y2, y_err2)
|
|
||||||
# Sort the list on om and add 0 0 0 tuple to the last position
|
|
||||||
sorted_t = sorted(tuple_list, key=lambda tup: tup[0])
|
|
||||||
sorted_t.append((0, 0, 0))
|
|
||||||
om, Counts, sigma = [], [], []
|
|
||||||
seen = list()
|
|
||||||
for i in range(len(sorted_t) - 1):
|
|
||||||
if sorted_t[i][0] not in seen:
|
|
||||||
if sorted_t[i][0] != sorted_t[i + 1][0]:
|
|
||||||
om = np.append(om, sorted_t[i][0])
|
|
||||||
Counts = np.append(Counts, sorted_t[i][1])
|
|
||||||
sigma = np.append(sigma, sorted_t[i][2])
|
|
||||||
else:
|
|
||||||
om = np.append(om, sorted_t[i][0])
|
|
||||||
counts1, counts2 = sorted_t[i][1], sorted_t[i + 1][1]
|
|
||||||
sigma1, sigma2 = sorted_t[i][2], sorted_t[i + 1][2]
|
|
||||||
count_err1 = u.ufloat(counts1, sigma1)
|
|
||||||
count_err2 = u.ufloat(counts2, sigma2)
|
|
||||||
avg = (count_err1 + count_err2) / 2
|
|
||||||
Counts = np.append(Counts, avg.n)
|
|
||||||
sigma = np.append(sigma, avg.s)
|
|
||||||
seen.append(sorted_t[i][0])
|
|
||||||
else:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if dict1 == dict2:
|
|
||||||
del dict1["scan"][keys[1]]
|
|
||||||
|
|
||||||
note = (
|
|
||||||
f"This scan was merged with scan {keys[1]} from "
|
|
||||||
f'file {dict2["meta"]["original_filename"]} \n'
|
|
||||||
)
|
|
||||||
if "notes" not in dict1["scan"][str(keys[0])]:
|
|
||||||
dict1["scan"][str(keys[0])]["notes"] = note
|
|
||||||
else:
|
|
||||||
dict1["scan"][str(keys[0])]["notes"] += note
|
|
||||||
|
|
||||||
dict1["scan"][keys[0]]["om"] = om
|
|
||||||
dict1["scan"][keys[0]]["Counts"] = Counts
|
|
||||||
dict1["scan"][keys[0]]["sigma"] = sigma
|
|
||||||
dict1["scan"][keys[0]]["monitor"] = monitor
|
|
||||||
print("merging done")
|
|
||||||
return dict1
|
|
||||||
|
|
||||||
|
|
||||||
def substract_measurement(dict1, dict2, keys, auto=True, monitor=100000):
|
|
||||||
"""Substracts two scan (scan key2 from dict2 from measurent key1 in dict1), expects om to be same
|
|
||||||
:arg dict1 : dictionary to which scan will be merged
|
|
||||||
:arg dict2 : dictionary from which scan will be merged
|
|
||||||
:arg keys : tuple with key to dict1 and dict2
|
|
||||||
:arg auto : if true, when monitors are same, does not change it, if flase, takes monitor always
|
|
||||||
:arg monitor : final monitor after merging
|
|
||||||
:returns d : dict1 with substracted Counts from dict2 and sigma that comes from the substraction"""
|
|
||||||
|
|
||||||
if len(dict1["scan"][keys[0]]["om"]) != len(dict2["scan"][keys[1]]["om"]):
|
|
||||||
raise ValueError("Omegas have different lengths, cannot be substracted")
|
|
||||||
|
|
||||||
if auto:
|
|
||||||
if dict1["scan"][keys[0]]["monitor"] == dict2["scan"][keys[1]]["monitor"]:
|
|
||||||
monitor = dict1["scan"][keys[0]]["monitor"]
|
|
||||||
|
|
||||||
cor_y1, y_err1 = normalize(dict1, keys[0], monitor=monitor)
|
|
||||||
cor_y2, y_err2 = normalize(dict2, keys[1], monitor=monitor)
|
|
||||||
|
|
||||||
dict1_count_err = create_uncertanities(cor_y1, y_err1)
|
|
||||||
dict2_count_err = create_uncertanities(cor_y2, y_err2)
|
|
||||||
|
|
||||||
res = np.subtract(dict1_count_err, dict2_count_err)
|
|
||||||
|
|
||||||
res_nom = []
|
|
||||||
res_err = []
|
|
||||||
for k in range(len(res)):
|
|
||||||
res_nom = np.append(res_nom, res[k].n)
|
|
||||||
res_err = np.append(res_err, res[k].s)
|
|
||||||
|
|
||||||
if len([num for num in res_nom if num < 0]) >= 0.3 * len(res_nom):
|
|
||||||
print(
|
|
||||||
f"Warning! percentage of negative numbers in scan subsracted {keys[0]} is "
|
|
||||||
f"{len([num for num in res_nom if num < 0]) / len(res_nom)}"
|
|
||||||
)
|
|
||||||
|
|
||||||
dict1["scan"][str(keys[0])]["Counts"] = res_nom
|
|
||||||
dict1["scan"][str(keys[0])]["sigma"] = res_err
|
|
||||||
dict1["scan"][str(keys[0])]["monitor"] = monitor
|
|
||||||
note = (
|
|
||||||
f'Scan {keys[1]} from file {dict2["meta"]["original_filename"]} '
|
|
||||||
f"was substracted from this scan \n"
|
|
||||||
)
|
|
||||||
if "notes" not in dict1["scan"][str(keys[0])]:
|
|
||||||
dict1["scan"][str(keys[0])]["notes"] = note
|
|
||||||
else:
|
|
||||||
dict1["scan"][str(keys[0])]["notes"] += note
|
|
||||||
return dict1
|
|
||||||
|
|
||||||
|
|
||||||
def compare_dict(dict1, dict2):
|
|
||||||
"""takes two ccl dictionaries and compare different values for each key
|
|
||||||
:arg dict1 : dictionary 1 (ccl)
|
|
||||||
:arg dict2 : dictionary 2 (ccl)
|
|
||||||
:returns warning : dictionary with keys from primary files (if they differ) with
|
|
||||||
information of how many scan differ and which ones differ
|
|
||||||
:returns report_string string comparing all different values respecively of measurements"""
|
|
||||||
|
|
||||||
if dict1["meta"]["data_type"] != dict2["meta"]["data_type"]:
|
|
||||||
print("select two dicts")
|
|
||||||
return
|
|
||||||
S = []
|
|
||||||
conflicts = {}
|
|
||||||
warnings = {}
|
|
||||||
|
|
||||||
comp = compare_hkl(dict1, dict2)
|
|
||||||
d1 = scan_dict(dict1)
|
|
||||||
d2 = scan_dict(dict2)
|
|
||||||
if not d1:
|
|
||||||
S.append("There are no duplicates in %s (dict1) \n" % dict1["meta"]["original_filename"])
|
|
||||||
else:
|
|
||||||
S.append(
|
|
||||||
"There are %d duplicates in %s (dict1) \n"
|
|
||||||
% (len(d1), dict1["meta"]["original_filename"])
|
|
||||||
)
|
|
||||||
warnings["Duplicates in dict1"] = list()
|
|
||||||
for keys in d1:
|
|
||||||
S.append("Measurements %s with hkl %s \n" % (d1[keys], keys))
|
|
||||||
warnings["Duplicates in dict1"].append(d1[keys])
|
|
||||||
if not d2:
|
|
||||||
S.append("There are no duplicates in %s (dict2) \n" % dict2["meta"]["original_filename"])
|
|
||||||
else:
|
|
||||||
S.append(
|
|
||||||
"There are %d duplicates in %s (dict2) \n"
|
|
||||||
% (len(d2), dict2["meta"]["original_filename"])
|
|
||||||
)
|
|
||||||
warnings["Duplicates in dict2"] = list()
|
|
||||||
for keys in d2:
|
|
||||||
S.append("Measurements %s with hkl %s \n" % (d2[keys], keys))
|
|
||||||
warnings["Duplicates in dict2"].append(d2[keys])
|
|
||||||
|
|
||||||
# compare meta
|
|
||||||
S.append("Different values in meta: \n")
|
|
||||||
different_meta = {
|
|
||||||
k: dict1["meta"][k]
|
|
||||||
for k in dict1["meta"]
|
|
||||||
if k in dict2["meta"] and dict1["meta"][k] != dict2["meta"][k]
|
|
||||||
}
|
|
||||||
exlude_meta_set = ["original_filename", "date", "title"]
|
|
||||||
for keys in different_meta:
|
|
||||||
if keys in exlude_meta_set:
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
if keys not in conflicts:
|
|
||||||
conflicts[keys] = 1
|
|
||||||
else:
|
|
||||||
conflicts[keys] = conflicts[keys] + 1
|
|
||||||
|
|
||||||
S.append(" Different values in %s \n" % str(keys))
|
|
||||||
S.append(" dict1: %s \n" % str(dict1["meta"][str(keys)]))
|
|
||||||
S.append(" dict2: %s \n" % str(dict2["meta"][str(keys)]))
|
|
||||||
|
|
||||||
# compare Measurements
|
|
||||||
S.append(
|
|
||||||
"Number of measurements in %s = %s \n"
|
|
||||||
% (dict1["meta"]["original_filename"], len(dict1["scan"]))
|
|
||||||
)
|
|
||||||
S.append(
|
|
||||||
"Number of measurements in %s = %s \n"
|
|
||||||
% (dict2["meta"]["original_filename"], len(dict2["scan"]))
|
|
||||||
)
|
|
||||||
S.append("Different values in Measurements:\n")
|
|
||||||
select_set = ["om", "Counts", "sigma"]
|
|
||||||
exlude_set = ["time", "Counts", "date", "notes"]
|
|
||||||
for keys1 in comp:
|
|
||||||
for key2 in dict1["scan"][str(comp[str(keys1)][0])]:
|
|
||||||
if key2 in exlude_set:
|
|
||||||
continue
|
|
||||||
if key2 not in select_set:
|
|
||||||
try:
|
|
||||||
if (
|
|
||||||
dict1["scan"][comp[str(keys1)][0]][str(key2)]
|
|
||||||
!= dict2["scan"][str(comp[str(keys1)][1])][str(key2)]
|
|
||||||
):
|
|
||||||
S.append(
|
|
||||||
"Scan value "
|
|
||||||
"%s"
|
|
||||||
", with hkl %s differs in meausrements %s and %s \n"
|
|
||||||
% (key2, keys1, comp[str(keys1)][0], comp[str(keys1)][1])
|
|
||||||
)
|
|
||||||
S.append(
|
|
||||||
" dict1: %s \n"
|
|
||||||
% str(dict1["scan"][comp[str(keys1)][0]][str(key2)])
|
|
||||||
)
|
|
||||||
S.append(
|
|
||||||
" dict2: %s \n"
|
|
||||||
% str(dict2["scan"][comp[str(keys1)][1]][str(key2)])
|
|
||||||
)
|
|
||||||
if key2 not in conflicts:
|
|
||||||
conflicts[key2] = {}
|
|
||||||
conflicts[key2]["amount"] = 1
|
|
||||||
conflicts[key2]["scan"] = str(comp[str(keys1)])
|
|
||||||
else:
|
|
||||||
|
|
||||||
conflicts[key2]["amount"] = conflicts[key2]["amount"] + 1
|
|
||||||
conflicts[key2]["scan"] = (
|
|
||||||
conflicts[key2]["scan"] + " " + (str(comp[str(keys1)]))
|
|
||||||
)
|
|
||||||
except KeyError as e:
|
|
||||||
print("Missing keys, some files were probably merged or substracted")
|
|
||||||
print(e.args)
|
|
||||||
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
comparison = list(dict1["scan"][comp[str(keys1)][0]][str(key2)]) == list(
|
|
||||||
dict2["scan"][comp[str(keys1)][1]][str(key2)]
|
|
||||||
)
|
|
||||||
if len(list(dict1["scan"][comp[str(keys1)][0]][str(key2)])) != len(
|
|
||||||
list(dict2["scan"][comp[str(keys1)][1]][str(key2)])
|
|
||||||
):
|
|
||||||
if str("different length of %s" % key2) not in warnings:
|
|
||||||
warnings[str("different length of %s" % key2)] = list()
|
|
||||||
warnings[str("different length of %s" % key2)].append(
|
|
||||||
(str(comp[keys1][0]), str(comp[keys1][1]))
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
warnings[str("different length of %s" % key2)].append(
|
|
||||||
(str(comp[keys1][0]), str(comp[keys1][1]))
|
|
||||||
)
|
|
||||||
if not comparison:
|
|
||||||
S.append(
|
|
||||||
"Scan value "
|
|
||||||
"%s"
|
|
||||||
" differs in scan %s and %s \n"
|
|
||||||
% (key2, comp[str(keys1)][0], comp[str(keys1)][1])
|
|
||||||
)
|
|
||||||
S.append(
|
|
||||||
" dict1: %s \n"
|
|
||||||
% str(list(dict1["scan"][comp[str(keys1)][0]][str(key2)]))
|
|
||||||
)
|
|
||||||
S.append(
|
|
||||||
" dict2: %s \n"
|
|
||||||
% str(list(dict2["scan"][comp[str(keys1)][1]][str(key2)]))
|
|
||||||
)
|
|
||||||
if key2 not in conflicts:
|
|
||||||
conflicts[key2] = {}
|
|
||||||
conflicts[key2]["amount"] = 1
|
|
||||||
conflicts[key2]["scan"] = str(comp[str(keys1)])
|
|
||||||
else:
|
|
||||||
conflicts[key2]["amount"] = conflicts[key2]["amount"] + 1
|
|
||||||
conflicts[key2]["scan"] = (
|
|
||||||
conflicts[key2]["scan"] + " " + (str(comp[str(keys1)]))
|
|
||||||
)
|
|
||||||
except KeyError as e:
|
|
||||||
print("Missing keys, some files were probably merged or substracted")
|
|
||||||
print(e.args)
|
|
||||||
|
|
||||||
for keys in conflicts:
|
|
||||||
try:
|
|
||||||
conflicts[str(keys)]["scan"] = conflicts[str(keys)]["scan"].split(" ")
|
|
||||||
except:
|
|
||||||
continue
|
|
||||||
report_string = "".join(S)
|
|
||||||
return warnings, conflicts, report_string
|
|
||||||
|
|
||||||
|
|
||||||
def guess_next(dict1, dict2, comp):
|
|
||||||
"""iterates thorough the scans and tries to decide if the scans should be
|
|
||||||
substracted or merged"""
|
|
||||||
threshold = 0.05
|
|
||||||
for keys in comp:
|
|
||||||
if (
|
|
||||||
abs(
|
|
||||||
(
|
|
||||||
dict1["scan"][str(comp[keys][0])]["temperature"]
|
|
||||||
- dict2["scan"][str(comp[keys][1])]["temperature"]
|
|
||||||
)
|
|
||||||
/ dict2["scan"][str(comp[keys][1])]["temperature"]
|
|
||||||
)
|
|
||||||
< threshold
|
|
||||||
and abs(
|
|
||||||
(
|
|
||||||
dict1["scan"][str(comp[keys][0])]["mag_field"]
|
|
||||||
- dict2["scan"][str(comp[keys][1])]["mag_field"]
|
|
||||||
)
|
|
||||||
/ dict2["scan"][str(comp[keys][1])]["mag_field"]
|
|
||||||
)
|
|
||||||
< threshold
|
|
||||||
):
|
|
||||||
comp[keys] = comp[keys] + tuple("m")
|
|
||||||
else:
|
|
||||||
comp[keys] = comp[keys] + tuple("s")
|
|
||||||
|
|
||||||
return comp
|
|
||||||
|
|
||||||
|
|
||||||
def process_dict(dict1, dict2, comp):
|
|
||||||
"""substracts or merges scans, guess_next function must run first """
|
|
||||||
for keys in comp:
|
|
||||||
if comp[keys][2] == "s":
|
|
||||||
substract_measurement(dict1, dict2, comp[keys])
|
|
||||||
elif comp[keys][2] == "m":
|
|
||||||
merge(dict1, dict2, comp[keys])
|
|
||||||
|
|
||||||
return dict1
|
|
@ -1,75 +0,0 @@
|
|||||||
import numpy as np
|
|
||||||
import scipy as sc
|
|
||||||
from scipy.interpolate import interp1d
|
|
||||||
from scipy.signal import savgol_filter
|
|
||||||
|
|
||||||
|
|
||||||
def ccl_findpeaks(
|
|
||||||
scan, int_threshold=0.8, prominence=50, smooth=False, window_size=7, poly_order=3
|
|
||||||
):
|
|
||||||
|
|
||||||
"""function iterates through the dictionary created by load_cclv2 and locates peaks for each scan
|
|
||||||
args: scan - a single scan,
|
|
||||||
|
|
||||||
int_threshold - fraction of threshold_intensity/max_intensity, must be positive num between 0 and 1
|
|
||||||
i.e. will only detect peaks above 75% of max intensity
|
|
||||||
|
|
||||||
prominence - defines a drop of values that must be between two peaks, must be positive number
|
|
||||||
i.e. if promimence is 20, it will detect two neigbouring peaks of 300 and 310 intesities,
|
|
||||||
if none of the itermediate values are lower that 290
|
|
||||||
|
|
||||||
smooth - if true, smooths data by savitzky golay filter, if false - no smoothing
|
|
||||||
|
|
||||||
window_size - window size for savgol filter, must be odd positive integer
|
|
||||||
|
|
||||||
poly_order = order of the polynomial used in savgol filter, must be positive integer smaller than
|
|
||||||
window_size returns: dictionary with following structure:
|
|
||||||
D{M34{ 'num_of_peaks': 1, #num of peaks
|
|
||||||
'peak_indexes': [20], # index of peaks in omega array
|
|
||||||
'peak_heights': [90.], # height of the peaks (if data vere smoothed
|
|
||||||
its the heigh of the peaks in smoothed data)
|
|
||||||
"""
|
|
||||||
if not 0 <= int_threshold <= 1:
|
|
||||||
int_threshold = 0.8
|
|
||||||
print(
|
|
||||||
"Invalid value for int_threshold, select value between 0 and 1, new value set to:",
|
|
||||||
int_threshold,
|
|
||||||
)
|
|
||||||
|
|
||||||
if not isinstance(window_size, int) or (window_size % 2) == 0 or window_size <= 1:
|
|
||||||
window_size = 7
|
|
||||||
print(
|
|
||||||
"Invalid value for window_size, select positive odd integer, new value set to!:",
|
|
||||||
window_size,
|
|
||||||
)
|
|
||||||
|
|
||||||
if not isinstance(poly_order, int) or window_size < poly_order:
|
|
||||||
poly_order = 3
|
|
||||||
print(
|
|
||||||
"Invalid value for poly_order, select positive integer smaller than window_size, new value set to:",
|
|
||||||
poly_order,
|
|
||||||
)
|
|
||||||
|
|
||||||
if not isinstance(prominence, (int, float)) and prominence < 0:
|
|
||||||
prominence = 50
|
|
||||||
print("Invalid value for prominence, select positive number, new value set to:", prominence)
|
|
||||||
|
|
||||||
omega = scan["om"]
|
|
||||||
counts = np.array(scan["Counts"])
|
|
||||||
if smooth:
|
|
||||||
itp = interp1d(omega, counts, kind="linear")
|
|
||||||
absintensity = [abs(number) for number in counts]
|
|
||||||
lowest_intensity = min(absintensity)
|
|
||||||
counts[counts < 0] = lowest_intensity
|
|
||||||
smooth_peaks = savgol_filter(itp(omega), window_size, poly_order)
|
|
||||||
|
|
||||||
else:
|
|
||||||
smooth_peaks = counts
|
|
||||||
|
|
||||||
peaks, properties = sc.signal.find_peaks(
|
|
||||||
smooth_peaks, height=int_threshold * max(smooth_peaks), prominence=prominence
|
|
||||||
)
|
|
||||||
scan["num_of_peaks"] = len(peaks)
|
|
||||||
scan["peak_indexes"] = peaks
|
|
||||||
scan["peak_heights"] = properties["peak_heights"]
|
|
||||||
scan["smooth_peaks"] = smooth_peaks # smoothed curve
|
|
310
pyzebra/ccl_io.py
Normal file
310
pyzebra/ccl_io.py
Normal file
@ -0,0 +1,310 @@
|
|||||||
|
import os
|
||||||
|
import re
|
||||||
|
from collections import defaultdict
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
META_VARS_STR = (
|
||||||
|
"instrument",
|
||||||
|
"title",
|
||||||
|
"sample",
|
||||||
|
"user",
|
||||||
|
"ProposalID",
|
||||||
|
"original_filename",
|
||||||
|
"date",
|
||||||
|
"zebra_mode",
|
||||||
|
"proposal",
|
||||||
|
"proposal_user",
|
||||||
|
"proposal_title",
|
||||||
|
"proposal_email",
|
||||||
|
"detectorDistance",
|
||||||
|
)
|
||||||
|
|
||||||
|
META_VARS_FLOAT = (
|
||||||
|
"omega",
|
||||||
|
"mf",
|
||||||
|
"2-theta",
|
||||||
|
"chi",
|
||||||
|
"phi",
|
||||||
|
"nu",
|
||||||
|
"temp",
|
||||||
|
"wavelenght",
|
||||||
|
"a",
|
||||||
|
"b",
|
||||||
|
"c",
|
||||||
|
"alpha",
|
||||||
|
"beta",
|
||||||
|
"gamma",
|
||||||
|
"cex1",
|
||||||
|
"cex2",
|
||||||
|
"mexz",
|
||||||
|
"moml",
|
||||||
|
"mcvl",
|
||||||
|
"momu",
|
||||||
|
"mcvu",
|
||||||
|
"snv",
|
||||||
|
"snh",
|
||||||
|
"snvm",
|
||||||
|
"snhm",
|
||||||
|
"s1vt",
|
||||||
|
"s1vb",
|
||||||
|
"s1hr",
|
||||||
|
"s1hl",
|
||||||
|
"s2vt",
|
||||||
|
"s2vb",
|
||||||
|
"s2hr",
|
||||||
|
"s2hl",
|
||||||
|
)
|
||||||
|
|
||||||
|
META_UB_MATRIX = ("ub1j", "ub2j", "ub3j")
|
||||||
|
|
||||||
|
CCL_FIRST_LINE = (("idx", int), ("h", float), ("k", float), ("l", float))
|
||||||
|
|
||||||
|
CCL_ANGLES = {
|
||||||
|
"bi": (("twotheta", float), ("omega", float), ("chi", float), ("phi", float)),
|
||||||
|
"nb": (("gamma", float), ("omega", float), ("nu", float), ("skip_angle", float)),
|
||||||
|
}
|
||||||
|
|
||||||
|
CCL_SECOND_LINE = (
|
||||||
|
("n_points", int),
|
||||||
|
("angle_step", float),
|
||||||
|
("monitor", float),
|
||||||
|
("temp", float),
|
||||||
|
("mf", float),
|
||||||
|
("date", str),
|
||||||
|
("time", str),
|
||||||
|
("scan_motor", str),
|
||||||
|
)
|
||||||
|
|
||||||
|
AREA_METHODS = ("fit_area", "int_area")
|
||||||
|
|
||||||
|
|
||||||
|
def load_1D(filepath):
|
||||||
|
"""
|
||||||
|
Loads *.ccl or *.dat file (Distinguishes them based on last 3 chars in string of filepath
|
||||||
|
to add more variables to read, extend the elif list
|
||||||
|
the file must include '#data' and number of points in right place to work properly
|
||||||
|
|
||||||
|
:arg filepath
|
||||||
|
:returns det_variables
|
||||||
|
- dictionary of all detector/scan variables and dictinionary for every scan.
|
||||||
|
Names of these dictionaries are M + scan number. They include HKL indeces, angles,
|
||||||
|
monitors, stepsize and array of counts
|
||||||
|
"""
|
||||||
|
with open(filepath, "r") as infile:
|
||||||
|
_, ext = os.path.splitext(filepath)
|
||||||
|
det_variables = parse_1D(infile, data_type=ext)
|
||||||
|
|
||||||
|
return det_variables
|
||||||
|
|
||||||
|
|
||||||
|
def parse_1D(fileobj, data_type):
|
||||||
|
metadata = {"data_type": data_type}
|
||||||
|
|
||||||
|
# read metadata
|
||||||
|
for line in fileobj:
|
||||||
|
if "=" in line:
|
||||||
|
variable, value = line.split("=", 1)
|
||||||
|
variable = variable.strip()
|
||||||
|
value = value.strip()
|
||||||
|
|
||||||
|
if variable in META_VARS_STR:
|
||||||
|
metadata[variable] = value
|
||||||
|
|
||||||
|
elif variable in META_VARS_FLOAT:
|
||||||
|
if variable == "2-theta": # fix that angle name not to be an expression
|
||||||
|
variable = "twotheta"
|
||||||
|
if variable in ("a", "b", "c", "alpha", "beta", "gamma"):
|
||||||
|
variable += "_cell"
|
||||||
|
metadata[variable] = float(value)
|
||||||
|
|
||||||
|
elif variable in META_UB_MATRIX:
|
||||||
|
if "ub" not in metadata:
|
||||||
|
metadata["ub"] = np.zeros((3, 3))
|
||||||
|
row = int(variable[-2]) - 1
|
||||||
|
metadata["ub"][row, :] = list(map(float, value.split()))
|
||||||
|
|
||||||
|
if "#data" in line:
|
||||||
|
# this is the end of metadata and the start of data section
|
||||||
|
break
|
||||||
|
|
||||||
|
# handle older files that don't contain "zebra_mode" metadata
|
||||||
|
if "zebra_mode" not in metadata:
|
||||||
|
metadata["zebra_mode"] = "nb"
|
||||||
|
|
||||||
|
# read data
|
||||||
|
scan = []
|
||||||
|
if data_type == ".ccl":
|
||||||
|
ccl_first_line = CCL_FIRST_LINE + CCL_ANGLES[metadata["zebra_mode"]]
|
||||||
|
ccl_second_line = CCL_SECOND_LINE
|
||||||
|
|
||||||
|
for line in fileobj:
|
||||||
|
# skip empty/whitespace lines before start of any scan
|
||||||
|
if not line or line.isspace():
|
||||||
|
continue
|
||||||
|
|
||||||
|
s = {}
|
||||||
|
|
||||||
|
# first line
|
||||||
|
for param, (param_name, param_type) in zip(line.split(), ccl_first_line):
|
||||||
|
s[param_name] = param_type(param)
|
||||||
|
|
||||||
|
# second line
|
||||||
|
next_line = next(fileobj)
|
||||||
|
for param, (param_name, param_type) in zip(next_line.split(), ccl_second_line):
|
||||||
|
s[param_name] = param_type(param)
|
||||||
|
|
||||||
|
if s["scan_motor"] != "om":
|
||||||
|
raise Exception("Unsupported variable name in ccl file.")
|
||||||
|
|
||||||
|
# "om" -> "omega"
|
||||||
|
s["scan_motor"] = "omega"
|
||||||
|
# overwrite metadata, because it only refers to the scan center
|
||||||
|
half_dist = (s["n_points"] - 1) / 2 * s["angle_step"]
|
||||||
|
s["omega"] = np.linspace(s["omega"] - half_dist, s["omega"] + half_dist, s["n_points"])
|
||||||
|
|
||||||
|
# subsequent lines with counts
|
||||||
|
counts = []
|
||||||
|
while len(counts) < s["n_points"]:
|
||||||
|
counts.extend(map(float, next(fileobj).split()))
|
||||||
|
s["Counts"] = np.array(counts)
|
||||||
|
|
||||||
|
if s["h"].is_integer() and s["k"].is_integer() and s["l"].is_integer():
|
||||||
|
s["h"], s["k"], s["l"] = map(int, (s["h"], s["k"], s["l"]))
|
||||||
|
|
||||||
|
scan.append({**metadata, **s})
|
||||||
|
|
||||||
|
elif data_type == ".dat":
|
||||||
|
# TODO: this might need to be adapted in the future, when "gamma" will be added to dat files
|
||||||
|
if metadata["zebra_mode"] == "nb":
|
||||||
|
metadata["gamma"] = metadata["twotheta"]
|
||||||
|
|
||||||
|
s = defaultdict(list)
|
||||||
|
|
||||||
|
match = re.search("Scanning Variables: (.*), Steps: (.*)", next(fileobj))
|
||||||
|
if match.group(1) == "h, k, l":
|
||||||
|
steps = match.group(2).split()
|
||||||
|
for step, ind in zip(steps, "hkl"):
|
||||||
|
if float(step) != 0:
|
||||||
|
scan_motor = ind
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
scan_motor = match.group(1)
|
||||||
|
|
||||||
|
s["scan_motor"] = scan_motor
|
||||||
|
|
||||||
|
match = re.search("(.*) Points, Mode: (.*), Preset (.*)", next(fileobj))
|
||||||
|
if match.group(2) != "Monitor":
|
||||||
|
raise Exception("Unknown mode in dat file.")
|
||||||
|
s["monitor"] = float(match.group(3))
|
||||||
|
|
||||||
|
col_names = next(fileobj).split()
|
||||||
|
|
||||||
|
for line in fileobj:
|
||||||
|
if "END-OF-DATA" in line:
|
||||||
|
# this is the end of data
|
||||||
|
break
|
||||||
|
|
||||||
|
for name, val in zip(col_names, line.split()):
|
||||||
|
s[name].append(float(val))
|
||||||
|
|
||||||
|
for name in col_names:
|
||||||
|
s[name] = np.array(s[name])
|
||||||
|
|
||||||
|
# "om" -> "omega"
|
||||||
|
if s["scan_motor"] == "om":
|
||||||
|
s["scan_motor"] = "omega"
|
||||||
|
s["omega"] = s["om"]
|
||||||
|
del s["om"]
|
||||||
|
|
||||||
|
# "tt" -> "temp"
|
||||||
|
elif s["scan_motor"] == "tt":
|
||||||
|
s["scan_motor"] = "temp"
|
||||||
|
s["temp"] = s["tt"]
|
||||||
|
del s["tt"]
|
||||||
|
|
||||||
|
# "mf" stays "mf"
|
||||||
|
# "phi" stays "phi"
|
||||||
|
|
||||||
|
if "h" not in s:
|
||||||
|
s["h"] = s["k"] = s["l"] = float("nan")
|
||||||
|
|
||||||
|
for param in ("mf", "temp"):
|
||||||
|
if param not in metadata:
|
||||||
|
s[param] = 0
|
||||||
|
|
||||||
|
s["idx"] = 1
|
||||||
|
|
||||||
|
scan.append({**metadata, **s})
|
||||||
|
|
||||||
|
else:
|
||||||
|
print("Unknown file extention")
|
||||||
|
|
||||||
|
return scan
|
||||||
|
|
||||||
|
|
||||||
|
def export_1D(data, path, area_method=AREA_METHODS[0], lorentz=False, hkl_precision=2):
|
||||||
|
"""Exports data in the .comm/.incomm format
|
||||||
|
|
||||||
|
Scans with integer/real hkl values are saved in .comm/.incomm files correspondingly. If no scans
|
||||||
|
are present for a particular output format, that file won't be created.
|
||||||
|
"""
|
||||||
|
zebra_mode = data[0]["zebra_mode"]
|
||||||
|
file_content = {".comm": [], ".incomm": []}
|
||||||
|
|
||||||
|
for scan in data:
|
||||||
|
if "fit" not in scan:
|
||||||
|
continue
|
||||||
|
|
||||||
|
idx_str = f"{scan['idx']:6}"
|
||||||
|
|
||||||
|
h, k, l = scan["h"], scan["k"], scan["l"]
|
||||||
|
hkl_are_integers = isinstance(h, int) # if True, other indices are of type 'int' too
|
||||||
|
if hkl_are_integers:
|
||||||
|
hkl_str = f"{h:6}{k:6}{l:6}"
|
||||||
|
else:
|
||||||
|
hkl_str = f"{h:8.{hkl_precision}f}{k:8.{hkl_precision}f}{l:8.{hkl_precision}f}"
|
||||||
|
|
||||||
|
for name, param in scan["fit"].params.items():
|
||||||
|
if "amplitude" in name:
|
||||||
|
area_n = param.value
|
||||||
|
area_s = param.stderr
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
area_n = 0
|
||||||
|
area_s = 0
|
||||||
|
|
||||||
|
if area_n is None or area_s is None:
|
||||||
|
print(f"Couldn't export scan: {scan['idx']}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# apply lorentz correction to area
|
||||||
|
if lorentz:
|
||||||
|
if zebra_mode == "bi":
|
||||||
|
twotheta = np.deg2rad(scan["twotheta"])
|
||||||
|
corr_factor = np.sin(twotheta)
|
||||||
|
else: # zebra_mode == "nb":
|
||||||
|
gamma = np.deg2rad(scan["gamma"])
|
||||||
|
nu = np.deg2rad(scan["nu"])
|
||||||
|
corr_factor = np.sin(gamma) * np.cos(nu)
|
||||||
|
|
||||||
|
area_n = np.abs(area_n * corr_factor)
|
||||||
|
area_s = np.abs(area_s * corr_factor)
|
||||||
|
|
||||||
|
area_str = f"{area_n:10.2f}{area_s:10.2f}"
|
||||||
|
|
||||||
|
ang_str = ""
|
||||||
|
for angle, _ in CCL_ANGLES[zebra_mode]:
|
||||||
|
if angle == scan["scan_motor"]:
|
||||||
|
angle_center = (np.min(scan[angle]) + np.max(scan[angle])) / 2
|
||||||
|
else:
|
||||||
|
angle_center = scan[angle]
|
||||||
|
ang_str = ang_str + f"{angle_center:8g}"
|
||||||
|
|
||||||
|
ref = file_content[".comm"] if hkl_are_integers else file_content[".incomm"]
|
||||||
|
ref.append(idx_str + hkl_str + area_str + ang_str + "\n")
|
||||||
|
|
||||||
|
for ext, content in file_content.items():
|
||||||
|
if content:
|
||||||
|
with open(path + ext, "w") as out_file:
|
||||||
|
out_file.writelines(content)
|
139
pyzebra/ccl_process.py
Normal file
139
pyzebra/ccl_process.py
Normal file
@ -0,0 +1,139 @@
|
|||||||
|
import itertools
|
||||||
|
import os
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from lmfit.models import GaussianModel, LinearModel, PseudoVoigtModel, VoigtModel
|
||||||
|
|
||||||
|
from .ccl_io import CCL_ANGLES
|
||||||
|
|
||||||
|
PARAM_PRECISIONS = {
|
||||||
|
"twotheta": 0.1,
|
||||||
|
"chi": 0.1,
|
||||||
|
"nu": 0.1,
|
||||||
|
"phi": 0.05,
|
||||||
|
"omega": 0.05,
|
||||||
|
"gamma": 0.05,
|
||||||
|
"temp": 1,
|
||||||
|
"mf": 0.001,
|
||||||
|
"ub": 0.01,
|
||||||
|
}
|
||||||
|
|
||||||
|
MAX_RANGE_GAP = {
|
||||||
|
"omega": 0.5,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_dataset(dataset, monitor=100_000):
|
||||||
|
for scan in dataset:
|
||||||
|
monitor_ratio = monitor / scan["monitor"]
|
||||||
|
scan["Counts"] *= monitor_ratio
|
||||||
|
scan["monitor"] = monitor
|
||||||
|
|
||||||
|
|
||||||
|
def merge_duplicates(dataset):
|
||||||
|
for scan_i, scan_j in itertools.combinations(dataset, 2):
|
||||||
|
if _parameters_match(scan_i, scan_j):
|
||||||
|
merge_scans(scan_i, scan_j)
|
||||||
|
|
||||||
|
|
||||||
|
def _parameters_match(scan1, scan2):
|
||||||
|
zebra_mode = scan1["zebra_mode"]
|
||||||
|
if zebra_mode != scan2["zebra_mode"]:
|
||||||
|
return False
|
||||||
|
|
||||||
|
for param in ("ub", "temp", "mf", *(vars[0] for vars in CCL_ANGLES[zebra_mode])):
|
||||||
|
if param.startswith("skip"):
|
||||||
|
# ignore skip parameters, like the last angle in 'nb' zebra mode
|
||||||
|
continue
|
||||||
|
|
||||||
|
if param == scan1["scan_motor"] == scan2["scan_motor"]:
|
||||||
|
# check if ranges of variable parameter overlap
|
||||||
|
range1 = scan1[param]
|
||||||
|
range2 = scan2[param]
|
||||||
|
# maximum gap between ranges of the scanning parameter (default 0)
|
||||||
|
max_range_gap = MAX_RANGE_GAP.get(param, 0)
|
||||||
|
if max(range1[0] - range2[-1], range2[0] - range1[-1]) > max_range_gap:
|
||||||
|
return False
|
||||||
|
|
||||||
|
elif np.max(np.abs(scan1[param] - scan2[param])) > PARAM_PRECISIONS[param]:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def merge_datasets(dataset1, dataset2):
|
||||||
|
for scan_j in dataset2:
|
||||||
|
for scan_i in dataset1:
|
||||||
|
if _parameters_match(scan_i, scan_j):
|
||||||
|
merge_scans(scan_i, scan_j)
|
||||||
|
break
|
||||||
|
|
||||||
|
dataset1.append(scan_j)
|
||||||
|
|
||||||
|
|
||||||
|
def merge_scans(scan1, scan2):
|
||||||
|
omega = np.concatenate((scan1["omega"], scan2["omega"]))
|
||||||
|
counts = np.concatenate((scan1["Counts"], scan2["Counts"]))
|
||||||
|
|
||||||
|
index = np.argsort(omega)
|
||||||
|
|
||||||
|
scan1["omega"] = omega[index]
|
||||||
|
scan1["Counts"] = counts[index]
|
||||||
|
|
||||||
|
scan2["active"] = False
|
||||||
|
|
||||||
|
fname1 = os.path.basename(scan1["original_filename"])
|
||||||
|
fname2 = os.path.basename(scan2["original_filename"])
|
||||||
|
print(f'Merging scans: {scan1["idx"]} ({fname1}) <-- {scan2["idx"]} ({fname2})')
|
||||||
|
|
||||||
|
|
||||||
|
def fit_scan(scan, model_dict, fit_from=None, fit_to=None):
|
||||||
|
if fit_from is None:
|
||||||
|
fit_from = -np.inf
|
||||||
|
if fit_to is None:
|
||||||
|
fit_to = np.inf
|
||||||
|
|
||||||
|
y_fit = scan["Counts"]
|
||||||
|
x_fit = scan[scan["scan_motor"]]
|
||||||
|
|
||||||
|
# apply fitting range
|
||||||
|
fit_ind = (fit_from <= x_fit) & (x_fit <= fit_to)
|
||||||
|
y_fit = y_fit[fit_ind]
|
||||||
|
x_fit = x_fit[fit_ind]
|
||||||
|
|
||||||
|
model = None
|
||||||
|
for model_index, (model_name, model_param) in enumerate(model_dict.items()):
|
||||||
|
model_name, _ = model_name.split("-")
|
||||||
|
prefix = f"f{model_index}_"
|
||||||
|
|
||||||
|
if model_name == "linear":
|
||||||
|
_model = LinearModel(prefix=prefix)
|
||||||
|
elif model_name == "gaussian":
|
||||||
|
_model = GaussianModel(prefix=prefix)
|
||||||
|
elif model_name == "voigt":
|
||||||
|
_model = VoigtModel(prefix=prefix)
|
||||||
|
elif model_name == "pvoigt":
|
||||||
|
_model = PseudoVoigtModel(prefix=prefix)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unknown model name: '{model_name}'")
|
||||||
|
|
||||||
|
_init_guess = _model.guess(y_fit, x=x_fit)
|
||||||
|
|
||||||
|
for param_index, param_name in enumerate(model_param["param"]):
|
||||||
|
param_hints = {}
|
||||||
|
for hint_name in ("value", "vary", "min", "max"):
|
||||||
|
tmp = model_param[hint_name][param_index]
|
||||||
|
if tmp is None:
|
||||||
|
param_hints[hint_name] = getattr(_init_guess[prefix + param_name], hint_name)
|
||||||
|
else:
|
||||||
|
param_hints[hint_name] = tmp
|
||||||
|
|
||||||
|
_model.set_param_hint(param_name, **param_hints)
|
||||||
|
|
||||||
|
if model is None:
|
||||||
|
model = _model
|
||||||
|
else:
|
||||||
|
model += _model
|
||||||
|
|
||||||
|
weights = [1 / np.sqrt(val) if val != 0 else 1 for val in y_fit]
|
||||||
|
scan["fit"] = model.fit(y_fit, x=x_fit, weights=weights)
|
@ -1,80 +0,0 @@
|
|||||||
import numpy as np
|
|
||||||
|
|
||||||
|
|
||||||
def correction(value, lorentz=True, zebra_mode="--", ang1=0, ang2=0):
|
|
||||||
if lorentz is False:
|
|
||||||
return value
|
|
||||||
else:
|
|
||||||
if zebra_mode == "bi":
|
|
||||||
corr_value = np.abs(value * np.sin(ang1))
|
|
||||||
return corr_value
|
|
||||||
elif zebra_mode == "nb":
|
|
||||||
corr_value = np.abs(value * np.sin(ang1) * np.cos(ang2))
|
|
||||||
return corr_value
|
|
||||||
|
|
||||||
|
|
||||||
def export_comm(data, path, lorentz=False):
|
|
||||||
"""exports data in the *.comm format
|
|
||||||
:param lorentz: perform Lorentz correction
|
|
||||||
:param path: path to file + name
|
|
||||||
:arg data - data to export, is dict after peak fitting
|
|
||||||
|
|
||||||
"""
|
|
||||||
zebra_mode = data["meta"]["zebra_mode"]
|
|
||||||
align = ">"
|
|
||||||
if data["meta"]["indices"] == "hkl":
|
|
||||||
extension = ".comm"
|
|
||||||
padding = [6, 4, 10, 8]
|
|
||||||
elif data["meta"]["indices"] == "real":
|
|
||||||
extension = ".incomm"
|
|
||||||
padding = [4, 6, 10, 8]
|
|
||||||
|
|
||||||
with open(str(path + extension), "w") as out_file:
|
|
||||||
for key, scan in data["scan"].items():
|
|
||||||
if "fit" not in scan:
|
|
||||||
print("Scan skipped - no fit value for:", key)
|
|
||||||
continue
|
|
||||||
scan_number_str = f"{key:{align}{padding[0]}}"
|
|
||||||
h_str = f'{int(scan["h_index"]):{padding[1]}}'
|
|
||||||
k_str = f'{int(scan["k_index"]):{padding[1]}}'
|
|
||||||
l_str = f'{int(scan["l_index"]):{padding[1]}}'
|
|
||||||
if data["meta"]["area_method"] == "fit":
|
|
||||||
area = float(scan["fit"]["fit_area"].n)
|
|
||||||
sigma_str = (
|
|
||||||
f'{"{:8.2f}".format(float(scan["fit"]["fit_area"].s)):{align}{padding[2]}}'
|
|
||||||
)
|
|
||||||
elif data["meta"]["area_method"] == "integ":
|
|
||||||
area = float(scan["fit"]["int_area"].n)
|
|
||||||
sigma_str = (
|
|
||||||
f'{"{:8.2f}".format(float(scan["fit"]["int_area"].s)):{align}{padding[2]}}'
|
|
||||||
)
|
|
||||||
|
|
||||||
if zebra_mode == "bi":
|
|
||||||
area = correction(area, lorentz, zebra_mode, scan["twotheta_angle"])
|
|
||||||
int_str = f'{"{:8.2f}".format(area):{align}{padding[2]}}'
|
|
||||||
angle_str1 = f'{scan["twotheta_angle"]:{padding[3]}}'
|
|
||||||
angle_str2 = f'{scan["omega_angle"]:{padding[3]}}'
|
|
||||||
angle_str3 = f'{scan["chi_angle"]:{padding[3]}}'
|
|
||||||
angle_str4 = f'{scan["phi_angle"]:{padding[3]}}'
|
|
||||||
elif zebra_mode == "nb":
|
|
||||||
area = correction(area, lorentz, zebra_mode, scan["gamma_angle"], scan["nu_angle"])
|
|
||||||
int_str = f'{"{:8.2f}".format(area):{align}{padding[2]}}'
|
|
||||||
angle_str1 = f'{scan["gamma_angle"]:{padding[3]}}'
|
|
||||||
angle_str2 = f'{scan["omega_angle"]:{padding[3]}}'
|
|
||||||
angle_str3 = f'{scan["nu_angle"]:{padding[3]}}'
|
|
||||||
angle_str4 = f'{scan["unkwn_angle"]:{padding[3]}}'
|
|
||||||
|
|
||||||
line = (
|
|
||||||
scan_number_str
|
|
||||||
+ h_str
|
|
||||||
+ l_str
|
|
||||||
+ k_str
|
|
||||||
+ int_str
|
|
||||||
+ sigma_str
|
|
||||||
+ angle_str1
|
|
||||||
+ angle_str2
|
|
||||||
+ angle_str3
|
|
||||||
+ angle_str4
|
|
||||||
+ "\n"
|
|
||||||
)
|
|
||||||
out_file.write(line)
|
|
227
pyzebra/fit2.py
227
pyzebra/fit2.py
@ -1,227 +0,0 @@
|
|||||||
import numpy as np
|
|
||||||
import uncertainties as u
|
|
||||||
from lmfit import Model, Parameters
|
|
||||||
from scipy.integrate import simps
|
|
||||||
|
|
||||||
|
|
||||||
def bin_data(array, binsize):
|
|
||||||
if isinstance(binsize, int) and 0 < binsize < len(array):
|
|
||||||
return [
|
|
||||||
np.mean(array[binsize * i : binsize * i + binsize])
|
|
||||||
for i in range(int(np.ceil(len(array) / binsize)))
|
|
||||||
]
|
|
||||||
else:
|
|
||||||
print("Binsize need to be positive integer smaller than lenght of array")
|
|
||||||
return array
|
|
||||||
|
|
||||||
|
|
||||||
def find_nearest(array, value):
|
|
||||||
# find nearest value and return index
|
|
||||||
array = np.asarray(array)
|
|
||||||
idx = (np.abs(array - value)).argmin()
|
|
||||||
return idx
|
|
||||||
|
|
||||||
|
|
||||||
def create_uncertanities(y, y_err):
|
|
||||||
# create array with uncertanities for error propagation
|
|
||||||
combined = np.array([])
|
|
||||||
for i in range(len(y)):
|
|
||||||
part = u.ufloat(y[i], y_err[i])
|
|
||||||
combined = np.append(combined, part)
|
|
||||||
return combined
|
|
||||||
|
|
||||||
|
|
||||||
def fitccl(
|
|
||||||
scan,
|
|
||||||
guess,
|
|
||||||
vary,
|
|
||||||
constraints_min,
|
|
||||||
constraints_max,
|
|
||||||
numfit_min=None,
|
|
||||||
numfit_max=None,
|
|
||||||
binning=None,
|
|
||||||
):
|
|
||||||
"""Made for fitting of ccl date where 1 peak is expected. Allows for combination of gaussian and linear model combination
|
|
||||||
:param scan: scan in the data dict (i.e. M123)
|
|
||||||
:param guess: initial guess for the fitting, if none, some values are added automatically in order (see below)
|
|
||||||
:param vary: True if parameter can vary during fitting, False if it to be fixed
|
|
||||||
:param numfit_min: minimal value on x axis for numerical integration - if none is centre of gaussian minus 3 sigma
|
|
||||||
:param numfit_max: maximal value on x axis for numerical integration - if none is centre of gaussian plus 3 sigma
|
|
||||||
:param constraints_min: min constranits value for fit
|
|
||||||
:param constraints_max: max constranits value for fit
|
|
||||||
:param binning : binning of the data
|
|
||||||
:return data dict with additional values
|
|
||||||
order for guess, vary, constraints_min, constraints_max:
|
|
||||||
[Gaussian centre, Gaussian sigma, Gaussian amplitude, background slope, background intercept]
|
|
||||||
examples:
|
|
||||||
guess = [None, None, 100, 0, None]
|
|
||||||
vary = [True, True, True, True, True]
|
|
||||||
constraints_min = [23, None, 50, 0, 0]
|
|
||||||
constraints_min = [80, None, 1000, 0, 100]
|
|
||||||
"""
|
|
||||||
if len(scan["peak_indexes"]) > 1:
|
|
||||||
# return in case of more than 1 peaks
|
|
||||||
print("More than 1 peak, scan skipped")
|
|
||||||
return
|
|
||||||
if binning is None or binning == 0 or binning == 1:
|
|
||||||
x = list(scan["om"])
|
|
||||||
y = list(scan["Counts"])
|
|
||||||
y_err = list(np.sqrt(y)) if scan.get("sigma", None) is None else list(scan["sigma"])
|
|
||||||
print(scan["peak_indexes"])
|
|
||||||
if not scan["peak_indexes"]:
|
|
||||||
centre = np.mean(x)
|
|
||||||
else:
|
|
||||||
centre = x[int(scan["peak_indexes"])]
|
|
||||||
else:
|
|
||||||
x = list(scan["om"])
|
|
||||||
if not scan["peak_indexes"]:
|
|
||||||
centre = np.mean(x)
|
|
||||||
else:
|
|
||||||
centre = x[int(scan["peak_indexes"])]
|
|
||||||
x = bin_data(x, binning)
|
|
||||||
y = list(scan["Counts"])
|
|
||||||
y_err = list(np.sqrt(y)) if scan.get("sigma", None) is None else list(scan["sigma"])
|
|
||||||
combined = bin_data(create_uncertanities(y, y_err), binning)
|
|
||||||
y = [combined[i].n for i in range(len(combined))]
|
|
||||||
y_err = [combined[i].s for i in range(len(combined))]
|
|
||||||
|
|
||||||
if len(scan["peak_indexes"]) == 0:
|
|
||||||
# Case for no peak, gaussian in centre, sigma as 20% of range
|
|
||||||
print("No peak")
|
|
||||||
peak_index = find_nearest(x, np.mean(x))
|
|
||||||
guess[0] = centre if guess[0] is None else guess[0]
|
|
||||||
guess[1] = (x[-1] - x[0]) / 5 if guess[1] is None else guess[1]
|
|
||||||
guess[2] = 50 if guess[2] is None else guess[2]
|
|
||||||
guess[3] = 0 if guess[3] is None else guess[3]
|
|
||||||
guess[4] = np.mean(y) if guess[4] is None else guess[4]
|
|
||||||
constraints_min[2] = 0
|
|
||||||
|
|
||||||
elif len(scan["peak_indexes"]) == 1:
|
|
||||||
# case for one peak, takse into account users guesses
|
|
||||||
print("one peak")
|
|
||||||
peak_height = scan["peak_heights"]
|
|
||||||
guess[0] = centre if guess[0] is None else guess[0]
|
|
||||||
guess[1] = 0.1 if guess[1] is None else guess[1]
|
|
||||||
guess[2] = float(peak_height / 10) if guess[2] is None else float(guess[2])
|
|
||||||
guess[3] = 0 if guess[3] is None else guess[3]
|
|
||||||
guess[4] = np.median(x) if guess[4] is None else guess[4]
|
|
||||||
constraints_min[0] = np.min(x) if constraints_min[0] is None else constraints_min[0]
|
|
||||||
constraints_max[0] = np.max(x) if constraints_max[0] is None else constraints_max[0]
|
|
||||||
|
|
||||||
def gaussian(x, g_cen, g_width, g_amp):
|
|
||||||
"""1-d gaussian: gaussian(x, amp, cen, wid)"""
|
|
||||||
return (g_amp / (np.sqrt(2 * np.pi) * g_width)) * np.exp(
|
|
||||||
-((x - g_cen) ** 2) / (2 * g_width ** 2)
|
|
||||||
)
|
|
||||||
|
|
||||||
def background(x, slope, intercept):
|
|
||||||
"""background"""
|
|
||||||
return slope * (x - centre) + intercept
|
|
||||||
|
|
||||||
mod = Model(gaussian) + Model(background)
|
|
||||||
params = Parameters()
|
|
||||||
params.add_many(
|
|
||||||
("g_cen", guess[0], bool(vary[0]), np.min(x), np.max(x), None, None),
|
|
||||||
("g_width", guess[1], bool(vary[1]), constraints_min[1], constraints_max[1], None, None),
|
|
||||||
("g_amp", guess[2], bool(vary[2]), constraints_min[2], constraints_max[2], None, None),
|
|
||||||
("slope", guess[3], bool(vary[3]), constraints_min[3], constraints_max[3], None, None),
|
|
||||||
("intercept", guess[4], bool(vary[4]), constraints_min[4], constraints_max[4], None, None),
|
|
||||||
)
|
|
||||||
# the weighted fit
|
|
||||||
try:
|
|
||||||
result = mod.fit(
|
|
||||||
y, params, weights=[np.abs(1 / val) for val in y_err], x=x, calc_covar=True,
|
|
||||||
)
|
|
||||||
except ValueError:
|
|
||||||
return
|
|
||||||
|
|
||||||
if result.params["g_amp"].stderr is None:
|
|
||||||
result.params["g_amp"].stderr = result.params["g_amp"].value
|
|
||||||
elif result.params["g_amp"].stderr > result.params["g_amp"].value:
|
|
||||||
result.params["g_amp"].stderr = result.params["g_amp"].value
|
|
||||||
|
|
||||||
# u.ufloat to work with uncertanities
|
|
||||||
fit_area = u.ufloat(result.params["g_amp"].value, result.params["g_amp"].stderr)
|
|
||||||
comps = result.eval_components()
|
|
||||||
|
|
||||||
if len(scan["peak_indexes"]) == 0:
|
|
||||||
# for case of no peak, there is no reason to integrate, therefore fit and int are equal
|
|
||||||
int_area = fit_area
|
|
||||||
|
|
||||||
elif len(scan["peak_indexes"]) == 1:
|
|
||||||
gauss_3sigmamin = find_nearest(
|
|
||||||
x, result.params["g_cen"].value - 3 * result.params["g_width"].value
|
|
||||||
)
|
|
||||||
gauss_3sigmamax = find_nearest(
|
|
||||||
x, result.params["g_cen"].value + 3 * result.params["g_width"].value
|
|
||||||
)
|
|
||||||
numfit_min = gauss_3sigmamin if numfit_min is None else find_nearest(x, numfit_min)
|
|
||||||
numfit_max = gauss_3sigmamax if numfit_max is None else find_nearest(x, numfit_max)
|
|
||||||
|
|
||||||
it = -1
|
|
||||||
while abs(numfit_max - numfit_min) < 3:
|
|
||||||
# in the case the peak is very thin and numerical integration would be on zero omega
|
|
||||||
# difference, finds closes values
|
|
||||||
it = it + 1
|
|
||||||
numfit_min = find_nearest(
|
|
||||||
x,
|
|
||||||
result.params["g_cen"].value - 3 * (1 + it / 10) * result.params["g_width"].value,
|
|
||||||
)
|
|
||||||
numfit_max = find_nearest(
|
|
||||||
x,
|
|
||||||
result.params["g_cen"].value + 3 * (1 + it / 10) * result.params["g_width"].value,
|
|
||||||
)
|
|
||||||
|
|
||||||
if x[numfit_min] < np.min(x):
|
|
||||||
# makes sure that the values supplied by user lay in the omega range
|
|
||||||
# can be ommited for users who know what they're doing
|
|
||||||
numfit_min = gauss_3sigmamin
|
|
||||||
print("Minimal integration value outside of x range")
|
|
||||||
elif x[numfit_min] >= x[numfit_max]:
|
|
||||||
numfit_min = gauss_3sigmamin
|
|
||||||
print("Minimal integration value higher than maximal")
|
|
||||||
else:
|
|
||||||
pass
|
|
||||||
if x[numfit_max] > np.max(x):
|
|
||||||
numfit_max = gauss_3sigmamax
|
|
||||||
print("Maximal integration value outside of x range")
|
|
||||||
elif x[numfit_max] <= x[numfit_min]:
|
|
||||||
numfit_max = gauss_3sigmamax
|
|
||||||
print("Maximal integration value lower than minimal")
|
|
||||||
else:
|
|
||||||
pass
|
|
||||||
|
|
||||||
count_errors = create_uncertanities(y, y_err)
|
|
||||||
# create error vector for numerical integration propagation
|
|
||||||
num_int_area = simps(count_errors[numfit_min:numfit_max], x[numfit_min:numfit_max])
|
|
||||||
slope_err = u.ufloat(result.params["slope"].value, result.params["slope"].stderr)
|
|
||||||
# pulls the nominal and error values from fit (slope)
|
|
||||||
intercept_err = u.ufloat(
|
|
||||||
result.params["intercept"].value, result.params["intercept"].stderr
|
|
||||||
)
|
|
||||||
# pulls the nominal and error values from fit (intercept)
|
|
||||||
|
|
||||||
background_errors = np.array([])
|
|
||||||
for j in range(len(x[numfit_min:numfit_max])):
|
|
||||||
# creates nominal and error vector for numerical integration of background
|
|
||||||
bg = slope_err * (x[j] - centre) + intercept_err
|
|
||||||
background_errors = np.append(background_errors, bg)
|
|
||||||
|
|
||||||
num_int_background = simps(background_errors, x[numfit_min:numfit_max])
|
|
||||||
int_area = num_int_area - num_int_background
|
|
||||||
|
|
||||||
d = {}
|
|
||||||
for pars in result.params:
|
|
||||||
d[str(pars)] = (result.params[str(pars)].value, result.params[str(pars)].vary)
|
|
||||||
print(result.fit_report())
|
|
||||||
|
|
||||||
print((result.params["g_amp"].value - int_area.n) / result.params["g_amp"].value)
|
|
||||||
|
|
||||||
d["ratio"] = (result.params["g_amp"].value - int_area.n) / result.params["g_amp"].value
|
|
||||||
d["int_area"] = int_area
|
|
||||||
d["fit_area"] = u.ufloat(result.params["g_amp"].value, result.params["g_amp"].stderr)
|
|
||||||
d["full_report"] = result.fit_report()
|
|
||||||
d["result"] = result
|
|
||||||
d["comps"] = comps
|
|
||||||
d["numfit"] = [numfit_min, numfit_max]
|
|
||||||
scan["fit"] = d
|
|
@ -41,7 +41,7 @@ def read_detector_data(filepath):
|
|||||||
filepath (str): File path of an h5 file.
|
filepath (str): File path of an h5 file.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
ndarray: A 3D array of data, rot_angle, pol_angle, tilt_angle.
|
ndarray: A 3D array of data, omega, gamma, nu.
|
||||||
"""
|
"""
|
||||||
with h5py.File(filepath, "r") as h5f:
|
with h5py.File(filepath, "r") as h5f:
|
||||||
data = h5f["/entry1/area_detector2/data"][:]
|
data = h5f["/entry1/area_detector2/data"][:]
|
||||||
@ -52,15 +52,37 @@ def read_detector_data(filepath):
|
|||||||
|
|
||||||
det_data = {"data": data}
|
det_data = {"data": data}
|
||||||
|
|
||||||
det_data["rot_angle"] = h5f["/entry1/area_detector2/rotation_angle"][:] # om, sometimes ph
|
if "/entry1/zebra_mode" in h5f:
|
||||||
det_data["pol_angle"] = h5f["/entry1/ZEBRA/area_detector2/polar_angle"][:] # gammad
|
det_data["zebra_mode"] = h5f["/entry1/zebra_mode"][0].decode()
|
||||||
det_data["tlt_angle"] = h5f["/entry1/ZEBRA/area_detector2/tilt_angle"][:] # nud
|
else:
|
||||||
|
det_data["zebra_mode"] = "nb"
|
||||||
|
|
||||||
|
# om, sometimes ph
|
||||||
|
if det_data["zebra_mode"] == "nb":
|
||||||
|
det_data["omega"] = h5f["/entry1/area_detector2/rotation_angle"][:]
|
||||||
|
else: # bi
|
||||||
|
det_data["omega"] = h5f["/entry1/sample/rotation_angle"][:]
|
||||||
|
|
||||||
|
det_data["gamma"] = h5f["/entry1/ZEBRA/area_detector2/polar_angle"][:] # gammad
|
||||||
|
det_data["nu"] = h5f["/entry1/ZEBRA/area_detector2/tilt_angle"][:] # nud
|
||||||
det_data["ddist"] = h5f["/entry1/ZEBRA/area_detector2/distance"][:]
|
det_data["ddist"] = h5f["/entry1/ZEBRA/area_detector2/distance"][:]
|
||||||
det_data["wave"] = h5f["/entry1/ZEBRA/monochromator/wavelength"][:]
|
det_data["wave"] = h5f["/entry1/ZEBRA/monochromator/wavelength"][:]
|
||||||
det_data["chi_angle"] = h5f["/entry1/sample/chi"][:] # ch
|
det_data["chi"] = h5f["/entry1/sample/chi"][:] # ch
|
||||||
det_data["phi_angle"] = h5f["/entry1/sample/phi"][:] # ph
|
det_data["phi"] = h5f["/entry1/sample/phi"][:] # ph
|
||||||
det_data["UB"] = h5f["/entry1/sample/UB"][:].reshape(3, 3)
|
det_data["ub"] = h5f["/entry1/sample/UB"][:].reshape(3, 3)
|
||||||
det_data["magnetic_field"] = h5f["/entry1/sample/magnetic_field"][:]
|
|
||||||
det_data["temperature"] = h5f["/entry1/sample/temperature"][:]
|
for var in ("omega", "gamma", "nu", "chi", "phi"):
|
||||||
|
if abs(det_data[var][0] - det_data[var][-1]) > 0.1:
|
||||||
|
det_data["scan_motor"] = var
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise ValueError("No angles that vary")
|
||||||
|
|
||||||
|
# optional parameters
|
||||||
|
if "/entry1/sample/magnetic_field" in h5f:
|
||||||
|
det_data["mf"] = h5f["/entry1/sample/magnetic_field"][:]
|
||||||
|
|
||||||
|
if "/entry1/sample/temperature" in h5f:
|
||||||
|
det_data["temp"] = h5f["/entry1/sample/temperature"][:]
|
||||||
|
|
||||||
return det_data
|
return det_data
|
||||||
|
@ -1,221 +0,0 @@
|
|||||||
import os
|
|
||||||
import re
|
|
||||||
from collections import defaultdict
|
|
||||||
from decimal import Decimal
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
META_VARS_STR = (
|
|
||||||
"instrument",
|
|
||||||
"title",
|
|
||||||
"sample",
|
|
||||||
"user",
|
|
||||||
"ProposalID",
|
|
||||||
"original_filename",
|
|
||||||
"date",
|
|
||||||
"zebra_mode",
|
|
||||||
"proposal",
|
|
||||||
"proposal_user",
|
|
||||||
"proposal_title",
|
|
||||||
"proposal_email",
|
|
||||||
"detectorDistance",
|
|
||||||
)
|
|
||||||
META_VARS_FLOAT = (
|
|
||||||
"omega",
|
|
||||||
"mf",
|
|
||||||
"2-theta",
|
|
||||||
"chi",
|
|
||||||
"phi",
|
|
||||||
"nu",
|
|
||||||
"temp",
|
|
||||||
"wavelenght",
|
|
||||||
"a",
|
|
||||||
"b",
|
|
||||||
"c",
|
|
||||||
"alpha",
|
|
||||||
"beta",
|
|
||||||
"gamma",
|
|
||||||
"cex1",
|
|
||||||
"cex2",
|
|
||||||
"mexz",
|
|
||||||
"moml",
|
|
||||||
"mcvl",
|
|
||||||
"momu",
|
|
||||||
"mcvu",
|
|
||||||
"snv",
|
|
||||||
"snh",
|
|
||||||
"snvm",
|
|
||||||
"snhm",
|
|
||||||
"s1vt",
|
|
||||||
"s1vb",
|
|
||||||
"s1hr",
|
|
||||||
"s1hl",
|
|
||||||
"s2vt",
|
|
||||||
"s2vb",
|
|
||||||
"s2hr",
|
|
||||||
"s2hl",
|
|
||||||
)
|
|
||||||
META_UB_MATRIX = ("ub1j", "ub2j", "ub3j")
|
|
||||||
|
|
||||||
CCL_FIRST_LINE = (
|
|
||||||
# the first element is `scan_number`, which we don't save to metadata
|
|
||||||
("h_index", float),
|
|
||||||
("k_index", float),
|
|
||||||
("l_index", float),
|
|
||||||
)
|
|
||||||
|
|
||||||
CCL_FIRST_LINE_BI = (
|
|
||||||
*CCL_FIRST_LINE,
|
|
||||||
("twotheta_angle", float),
|
|
||||||
("omega_angle", float),
|
|
||||||
("chi_angle", float),
|
|
||||||
("phi_angle", float),
|
|
||||||
)
|
|
||||||
|
|
||||||
CCL_FIRST_LINE_NB = (
|
|
||||||
*CCL_FIRST_LINE,
|
|
||||||
("gamma_angle", float),
|
|
||||||
("omega_angle", float),
|
|
||||||
("nu_angle", float),
|
|
||||||
("unkwn_angle", float),
|
|
||||||
)
|
|
||||||
|
|
||||||
CCL_SECOND_LINE = (
|
|
||||||
("number_of_measurements", int),
|
|
||||||
("angle_step", float),
|
|
||||||
("monitor", float),
|
|
||||||
("temperature", float),
|
|
||||||
("mag_field", float),
|
|
||||||
("date", str),
|
|
||||||
("time", str),
|
|
||||||
("scan_type", str),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def load_1D(filepath):
|
|
||||||
"""
|
|
||||||
Loads *.ccl or *.dat file (Distinguishes them based on last 3 chars in string of filepath
|
|
||||||
to add more variables to read, extend the elif list
|
|
||||||
the file must include '#data' and number of points in right place to work properly
|
|
||||||
|
|
||||||
:arg filepath
|
|
||||||
:returns det_variables
|
|
||||||
- dictionary of all detector/scan variables and dictinionary for every scan.
|
|
||||||
Names of these dictionaries are M + scan number. They include HKL indeces, angles,
|
|
||||||
monitors, stepsize and array of counts
|
|
||||||
"""
|
|
||||||
with open(filepath, "r") as infile:
|
|
||||||
_, ext = os.path.splitext(filepath)
|
|
||||||
det_variables = parse_1D(infile, data_type=ext)
|
|
||||||
|
|
||||||
return det_variables
|
|
||||||
|
|
||||||
|
|
||||||
def parse_1D(fileobj, data_type):
|
|
||||||
# read metadata
|
|
||||||
metadata = {}
|
|
||||||
for line in fileobj:
|
|
||||||
if "=" in line:
|
|
||||||
variable, value = line.split("=")
|
|
||||||
variable = variable.strip()
|
|
||||||
if variable in META_VARS_FLOAT:
|
|
||||||
metadata[variable] = float(value)
|
|
||||||
elif variable in META_VARS_STR:
|
|
||||||
metadata[variable] = str(value)[:-1].strip()
|
|
||||||
elif variable in META_UB_MATRIX:
|
|
||||||
metadata[variable] = re.findall(r"[-+]?\d*\.\d+|\d+", str(value))
|
|
||||||
|
|
||||||
if "#data" in line:
|
|
||||||
# this is the end of metadata and the start of data section
|
|
||||||
break
|
|
||||||
|
|
||||||
# read data
|
|
||||||
scan = {}
|
|
||||||
if data_type == ".ccl":
|
|
||||||
decimal = list()
|
|
||||||
|
|
||||||
if metadata["zebra_mode"] == "bi":
|
|
||||||
ccl_first_line = CCL_FIRST_LINE_BI
|
|
||||||
elif metadata["zebra_mode"] == "nb":
|
|
||||||
ccl_first_line = CCL_FIRST_LINE_NB
|
|
||||||
ccl_second_line = CCL_SECOND_LINE
|
|
||||||
|
|
||||||
for line in fileobj:
|
|
||||||
d = {}
|
|
||||||
|
|
||||||
# first line
|
|
||||||
scan_number, *params = line.split()
|
|
||||||
for param, (param_name, param_type) in zip(params, ccl_first_line):
|
|
||||||
d[param_name] = param_type(param)
|
|
||||||
|
|
||||||
decimal.append(bool(Decimal(d["h_index"]) % 1 == 0))
|
|
||||||
decimal.append(bool(Decimal(d["k_index"]) % 1 == 0))
|
|
||||||
decimal.append(bool(Decimal(d["l_index"]) % 1 == 0))
|
|
||||||
|
|
||||||
# second line
|
|
||||||
next_line = next(fileobj)
|
|
||||||
params = next_line.split()
|
|
||||||
for param, (param_name, param_type) in zip(params, ccl_second_line):
|
|
||||||
d[param_name] = param_type(param)
|
|
||||||
|
|
||||||
d["om"] = np.linspace(
|
|
||||||
d["omega_angle"] - (d["number_of_measurements"] / 2) * d["angle_step"],
|
|
||||||
d["omega_angle"] + (d["number_of_measurements"] / 2) * d["angle_step"],
|
|
||||||
d["number_of_measurements"],
|
|
||||||
)
|
|
||||||
|
|
||||||
# subsequent lines with counts
|
|
||||||
counts = []
|
|
||||||
while len(counts) < d["number_of_measurements"]:
|
|
||||||
counts.extend(map(int, next(fileobj).split()))
|
|
||||||
d["Counts"] = counts
|
|
||||||
|
|
||||||
scan[int(scan_number)] = d
|
|
||||||
|
|
||||||
if all(decimal):
|
|
||||||
metadata["indices"] = "hkl"
|
|
||||||
else:
|
|
||||||
metadata["indices"] = "real"
|
|
||||||
|
|
||||||
elif data_type == ".dat":
|
|
||||||
# skip the first 2 rows, the third row contans the column names
|
|
||||||
next(fileobj)
|
|
||||||
next(fileobj)
|
|
||||||
col_names = next(fileobj).split()
|
|
||||||
data_cols = defaultdict(list)
|
|
||||||
|
|
||||||
for line in fileobj:
|
|
||||||
if "END-OF-DATA" in line:
|
|
||||||
# this is the end of data
|
|
||||||
break
|
|
||||||
|
|
||||||
for name, val in zip(col_names, line.split()):
|
|
||||||
data_cols[name].append(float(val))
|
|
||||||
|
|
||||||
try:
|
|
||||||
data_cols["h_index"] = float(metadata["title"].split()[-3])
|
|
||||||
data_cols["k_index"] = float(metadata["title"].split()[-2])
|
|
||||||
data_cols["l_index"] = float(metadata["title"].split()[-1])
|
|
||||||
except (ValueError, IndexError):
|
|
||||||
print("seems hkl is not in title")
|
|
||||||
|
|
||||||
data_cols["temperature"] = metadata["temp"]
|
|
||||||
data_cols["mag_field"] = metadata["mf"]
|
|
||||||
data_cols["omega_angle"] = metadata["omega"]
|
|
||||||
data_cols["number_of_measurements"] = len(data_cols["om"])
|
|
||||||
data_cols["monitor"] = data_cols["Monitor1"][0]
|
|
||||||
data_cols["twotheta_angle"] = metadata["2-theta"]
|
|
||||||
data_cols["chi_angle"] = metadata["chi"]
|
|
||||||
data_cols["phi_angle"] = metadata["phi"]
|
|
||||||
data_cols["nu_angle"] = metadata["nu"]
|
|
||||||
|
|
||||||
scan[1] = dict(data_cols)
|
|
||||||
|
|
||||||
else:
|
|
||||||
print("Unknown file extention")
|
|
||||||
|
|
||||||
# utility information
|
|
||||||
metadata["data_type"] = data_type
|
|
||||||
metadata["area_method"] = "fit"
|
|
||||||
|
|
||||||
return {"meta": metadata, "scan": scan}
|
|
@ -1,202 +0,0 @@
|
|||||||
from load_1D import load_1D
|
|
||||||
from ccl_dict_operation import add_dict
|
|
||||||
import pandas as pd
|
|
||||||
from mpl_toolkits.mplot3d import Axes3D # dont delete, otherwise waterfall wont work
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
import matplotlib as mpl
|
|
||||||
import numpy as np
|
|
||||||
import pickle
|
|
||||||
import scipy.io as sio
|
|
||||||
|
|
||||||
|
|
||||||
def load_dats(filepath):
|
|
||||||
"""reads the txt file, get headers and data
|
|
||||||
:arg filepath to txt file or list of filepaths to the files
|
|
||||||
:return ccl like dictionary"""
|
|
||||||
if isinstance(filepath, str):
|
|
||||||
data_type = "txt"
|
|
||||||
file_list = list()
|
|
||||||
with open(filepath, "r") as infile:
|
|
||||||
col_names = next(infile).split(",")
|
|
||||||
col_names = [col_names[i].rstrip() for i in range(len(col_names))]
|
|
||||||
for line in infile:
|
|
||||||
if "END" in line:
|
|
||||||
break
|
|
||||||
file_list.append(tuple(line.split(",")))
|
|
||||||
elif isinstance(filepath, list):
|
|
||||||
data_type = "list"
|
|
||||||
file_list = filepath
|
|
||||||
dict1 = {}
|
|
||||||
for i in range(len(file_list)):
|
|
||||||
if not dict1:
|
|
||||||
if data_type == "txt":
|
|
||||||
dict1 = load_1D(file_list[0][0])
|
|
||||||
else:
|
|
||||||
dict1 = load_1D(file_list[0])
|
|
||||||
else:
|
|
||||||
if data_type == "txt":
|
|
||||||
dict1 = add_dict(dict1, load_1D(file_list[i][0]))
|
|
||||||
else:
|
|
||||||
dict1 = add_dict(dict1, load_1D(file_list[i]))
|
|
||||||
dict1["scan"][i + 1]["params"] = {}
|
|
||||||
if data_type == "txt":
|
|
||||||
for x in range(len(col_names) - 1):
|
|
||||||
dict1["scan"][i + 1]["params"][col_names[x + 1]] = file_list[i][x + 1]
|
|
||||||
|
|
||||||
return dict1
|
|
||||||
|
|
||||||
|
|
||||||
def create_dataframe(dict1):
|
|
||||||
"""Creates pandas dataframe from the dictionary
|
|
||||||
:arg ccl like dictionary
|
|
||||||
:return pandas dataframe"""
|
|
||||||
# create dictionary to which we pull only wanted items before transforming it to pd.dataframe
|
|
||||||
pull_dict = {}
|
|
||||||
pull_dict["filenames"] = list()
|
|
||||||
for key in dict1["scan"][1]["params"]:
|
|
||||||
pull_dict[key] = list()
|
|
||||||
pull_dict["temperature"] = list()
|
|
||||||
pull_dict["mag_field"] = list()
|
|
||||||
pull_dict["fit_area"] = list()
|
|
||||||
pull_dict["int_area"] = list()
|
|
||||||
pull_dict["om"] = list()
|
|
||||||
pull_dict["Counts"] = list()
|
|
||||||
|
|
||||||
# populate the dict
|
|
||||||
for keys in dict1["scan"]:
|
|
||||||
if "file_of_origin" in dict1["scan"][keys]:
|
|
||||||
pull_dict["filenames"].append(dict1["scan"][keys]["file_of_origin"].split("/")[-1])
|
|
||||||
else:
|
|
||||||
pull_dict["filenames"].append(dict1["meta"]["original_filename"].split("/")[-1])
|
|
||||||
for key in dict1["scan"][keys]["params"]:
|
|
||||||
pull_dict[str(key)].append(float(dict1["scan"][keys]["params"][key]))
|
|
||||||
pull_dict["temperature"].append(dict1["scan"][keys]["temperature"])
|
|
||||||
pull_dict["mag_field"].append(dict1["scan"][keys]["mag_field"])
|
|
||||||
pull_dict["fit_area"].append(dict1["scan"][keys]["fit"]["fit_area"])
|
|
||||||
pull_dict["int_area"].append(dict1["scan"][keys]["fit"]["int_area"])
|
|
||||||
pull_dict["om"].append(dict1["scan"][keys]["om"])
|
|
||||||
pull_dict["Counts"].append(dict1["scan"][keys]["Counts"])
|
|
||||||
|
|
||||||
return pd.DataFrame(data=pull_dict)
|
|
||||||
|
|
||||||
|
|
||||||
def sort_dataframe(dataframe, sorting_parameter):
|
|
||||||
"""sorts the data frame and resets index"""
|
|
||||||
data = dataframe.sort_values(by=sorting_parameter)
|
|
||||||
data = data.reset_index(drop=True)
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
def make_graph(data, sorting_parameter, style):
|
|
||||||
"""Makes the graph from the data based on style and sorting parameter
|
|
||||||
:arg data : pandas dataframe with data after sorting
|
|
||||||
:arg sorting_parameter to pull the correct variable and name
|
|
||||||
:arg style of the graph - waterfall, scatter, heatmap
|
|
||||||
:return matplotlib figure"""
|
|
||||||
if style == "waterfall":
|
|
||||||
mpl.rcParams["legend.fontsize"] = 10
|
|
||||||
fig = plt.figure()
|
|
||||||
ax = fig.gca(projection="3d")
|
|
||||||
for i in range(len(data)):
|
|
||||||
x = data["om"][i]
|
|
||||||
z = data["Counts"][i]
|
|
||||||
yy = [data[sorting_parameter][i]] * len(x)
|
|
||||||
ax.plot(x, yy, z, label=str("%s = %f" % (sorting_parameter, yy[i])))
|
|
||||||
|
|
||||||
ax.legend()
|
|
||||||
ax.set_xlabel("Omega")
|
|
||||||
ax.set_ylabel(sorting_parameter)
|
|
||||||
ax.set_zlabel("counts")
|
|
||||||
|
|
||||||
elif style == "scatter":
|
|
||||||
fig = plt.figure()
|
|
||||||
plt.errorbar(
|
|
||||||
data[sorting_parameter],
|
|
||||||
[data["fit_area"][i].n for i in range(len(data["fit_area"]))],
|
|
||||||
[data["fit_area"][i].s for i in range(len(data["fit_area"]))],
|
|
||||||
capsize=5,
|
|
||||||
ecolor="green",
|
|
||||||
)
|
|
||||||
plt.xlabel(str(sorting_parameter))
|
|
||||||
plt.ylabel("Intesity")
|
|
||||||
|
|
||||||
elif style == "heat":
|
|
||||||
new_om = list()
|
|
||||||
for i in range(len(data)):
|
|
||||||
new_om = np.append(new_om, np.around(data["om"][i], 2), axis=0)
|
|
||||||
unique_om = np.unique(new_om)
|
|
||||||
color_matrix = np.zeros(shape=(len(data), len(unique_om)))
|
|
||||||
for i in range(len(data)):
|
|
||||||
for j in range(len(data["om"][i])):
|
|
||||||
if np.around(data["om"][i][j], 2) in np.unique(new_om):
|
|
||||||
color_matrix[i, j] = data["Counts"][i][j]
|
|
||||||
else:
|
|
||||||
continue
|
|
||||||
|
|
||||||
fig = plt.figure()
|
|
||||||
plt.pcolormesh(unique_om, data[sorting_parameter], color_matrix, shading="gouraud")
|
|
||||||
plt.xlabel("omega")
|
|
||||||
plt.ylabel(sorting_parameter)
|
|
||||||
plt.colorbar()
|
|
||||||
plt.clim(color_matrix.mean(), color_matrix.max())
|
|
||||||
|
|
||||||
return fig
|
|
||||||
|
|
||||||
|
|
||||||
def save_dict(obj, name):
|
|
||||||
""" saves dictionary as pickle file in binary format
|
|
||||||
:arg obj - object to save
|
|
||||||
:arg name - name of the file
|
|
||||||
NOTE: path should be added later"""
|
|
||||||
with open(name + ".pkl", "wb") as f:
|
|
||||||
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
|
|
||||||
|
|
||||||
|
|
||||||
def load_dict(name):
|
|
||||||
"""load dictionary from picle file
|
|
||||||
:arg name - name of the file to load
|
|
||||||
NOTE: expect the file in the same folder, path should be added later
|
|
||||||
:return dictionary"""
|
|
||||||
with open(name + ".pkl", "rb") as f:
|
|
||||||
return pickle.load(f)
|
|
||||||
|
|
||||||
|
|
||||||
# pickle, mat, h5, txt, csv, json
|
|
||||||
def save_table(data, filetype, name, path=None):
|
|
||||||
print("Saving: ", filetype)
|
|
||||||
path = "" if path is None else path
|
|
||||||
if filetype == "pickle":
|
|
||||||
# to work with uncertanities, see uncertanity module
|
|
||||||
with open(path + name + ".pkl", "wb") as f:
|
|
||||||
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
|
|
||||||
if filetype == "mat":
|
|
||||||
# matlab doesent allow some special character to be in var names, also cant start with
|
|
||||||
# numbers, in need, add some to the romove_character list
|
|
||||||
data["fit_area_nom"] = [data["fit_area"][i].n for i in range(len(data["fit_area"]))]
|
|
||||||
data["fit_area_err"] = [data["fit_area"][i].s for i in range(len(data["fit_area"]))]
|
|
||||||
data["int_area_nom"] = [data["int_area"][i].n for i in range(len(data["int_area"]))]
|
|
||||||
data["int_area_err"] = [data["int_area"][i].s for i in range(len(data["int_area"]))]
|
|
||||||
data = data.drop(columns=["fit_area", "int_area"])
|
|
||||||
remove_characters = [" ", "[", "]", "{", "}", "(", ")"]
|
|
||||||
for character in remove_characters:
|
|
||||||
data.columns = [
|
|
||||||
data.columns[i].replace(character, "") for i in range(len(data.columns))
|
|
||||||
]
|
|
||||||
sio.savemat((path + name + ".mat"), {name: col.values for name, col in data.items()})
|
|
||||||
if filetype == "csv" or "txt":
|
|
||||||
data["fit_area_nom"] = [data["fit_area"][i].n for i in range(len(data["fit_area"]))]
|
|
||||||
data["fit_area_err"] = [data["fit_area"][i].s for i in range(len(data["fit_area"]))]
|
|
||||||
data["int_area_nom"] = [data["int_area"][i].n for i in range(len(data["int_area"]))]
|
|
||||||
data["int_area_err"] = [data["int_area"][i].s for i in range(len(data["int_area"]))]
|
|
||||||
data = data.drop(columns=["fit_area", "int_area", "om", "Counts"])
|
|
||||||
if filetype == "csv":
|
|
||||||
data.to_csv(path + name + ".csv")
|
|
||||||
if filetype == "txt":
|
|
||||||
with open((path + name + ".txt"), "w") as outfile:
|
|
||||||
data.to_string(outfile)
|
|
||||||
if filetype == "h5":
|
|
||||||
hdf = pd.HDFStore((path + name + ".h5"))
|
|
||||||
hdf.put("data", data)
|
|
||||||
hdf.close()
|
|
||||||
if filetype == "json":
|
|
||||||
data.to_json((path + name + ".json"))
|
|
@ -407,24 +407,24 @@ def box_int(file, box):
|
|||||||
|
|
||||||
dat = pyzebra.read_detector_data(file)
|
dat = pyzebra.read_detector_data(file)
|
||||||
|
|
||||||
sttC = dat["pol_angle"][0]
|
sttC = dat["gamma"][0]
|
||||||
om = dat["rot_angle"]
|
om = dat["omega"]
|
||||||
nuC = dat["tlt_angle"][0]
|
nuC = dat["nu"][0]
|
||||||
ddist = dat["ddist"]
|
ddist = dat["ddist"]
|
||||||
|
|
||||||
# defining indices
|
# defining indices
|
||||||
x0, xN, y0, yN, fr0, frN = box
|
x0, xN, y0, yN, fr0, frN = box
|
||||||
|
|
||||||
# omega fit
|
# omega fit
|
||||||
om = dat["rot_angle"][fr0:frN]
|
om = dat["omega"][fr0:frN]
|
||||||
cnts = np.sum(dat["data"][fr0:frN, y0:yN, x0:xN], axis=(1, 2))
|
cnts = np.sum(dat["data"][fr0:frN, y0:yN, x0:xN], axis=(1, 2))
|
||||||
|
|
||||||
p0 = [1.0, 0.0, 1.0]
|
p0 = [1.0, 0.0, 1.0]
|
||||||
coeff, var_matrix = curve_fit(gauss, range(len(cnts)), cnts, p0=p0)
|
coeff, var_matrix = curve_fit(gauss, range(len(cnts)), cnts, p0=p0)
|
||||||
|
|
||||||
frC = fr0 + coeff[1]
|
frC = fr0 + coeff[1]
|
||||||
omF = dat["rot_angle"][math.floor(frC)]
|
omF = dat["omega"][math.floor(frC)]
|
||||||
omC = dat["rot_angle"][math.ceil(frC)]
|
omC = dat["omega"][math.ceil(frC)]
|
||||||
frStep = frC - math.floor(frC)
|
frStep = frC - math.floor(frC)
|
||||||
omStep = omC - omF
|
omStep = omC - omF
|
||||||
omP = omF + omStep * frStep
|
omP = omF + omStep * frStep
|
||||||
|
4
scripts/pyzebra-start.sh
Normal file
4
scripts/pyzebra-start.sh
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
source /home/pyzebra/miniconda3/etc/profile.d/conda.sh
|
||||||
|
|
||||||
|
conda activate prod
|
||||||
|
pyzebra --port=80 --allow-websocket-origin=pyzebra.psi.ch:80
|
4
scripts/pyzebra-test-start.sh
Normal file
4
scripts/pyzebra-test-start.sh
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
source /home/pyzebra/miniconda3/etc/profile.d/conda.sh
|
||||||
|
|
||||||
|
conda activate test
|
||||||
|
python ~/pyzebra/pyzebra/app/cli.py --allow-websocket-origin=pyzebra.psi.ch:5006
|
11
scripts/pyzebra-test.service
Normal file
11
scripts/pyzebra-test.service
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=pyzebra-test web server (runs on port 5006)
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
User=pyzebra
|
||||||
|
ExecStart=/bin/bash /usr/local/sbin/pyzebra-test-start.sh
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
10
scripts/pyzebra.service
Normal file
10
scripts/pyzebra.service
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=pyzebra web server
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
ExecStart=/bin/bash /usr/local/sbin/pyzebra-start.sh
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
Reference in New Issue
Block a user