public release 3.0.0 - see README and CHANGES for details
This commit is contained in:
@ -32,7 +32,7 @@ DOXYFILE_ENCODING = UTF-8
|
||||
# title of most generated pages and in a few other places.
|
||||
# The default value is: My Project.
|
||||
|
||||
PROJECT_NAME = "PEARL MSCO"
|
||||
PROJECT_NAME = "PMSCO"
|
||||
|
||||
# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
|
||||
# could be handy for archiving the generated documentation or if some version
|
||||
@ -765,8 +765,10 @@ src/concepts-tasks.dox \
|
||||
src/concepts-emitter.dox \
|
||||
src/concepts-atomscat.dox \
|
||||
src/installation.dox \
|
||||
src/project.dox \
|
||||
src/execution.dox \
|
||||
src/commandline.dox \
|
||||
src/runfile.dox \
|
||||
src/optimizers.dox \
|
||||
../pmsco \
|
||||
../projects \
|
||||
@ -889,7 +891,7 @@ INPUT_FILTER =
|
||||
# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
|
||||
# patterns match the file name, INPUT_FILTER is applied.
|
||||
|
||||
FILTER_PATTERNS = *.py=/usr/bin/doxypy
|
||||
FILTER_PATTERNS = *.py=./py_filter.sh
|
||||
|
||||
# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
|
||||
# INPUT_FILTER) will also be used to filter the input files that are used for
|
||||
@ -2083,12 +2085,6 @@ EXTERNAL_GROUPS = YES
|
||||
|
||||
EXTERNAL_PAGES = YES
|
||||
|
||||
# The PERL_PATH should be the absolute path and name of the perl script
|
||||
# interpreter (i.e. the result of 'which perl').
|
||||
# The default file (with absolute path) is: /usr/bin/perl.
|
||||
|
||||
PERL_PATH = /usr/bin/perl
|
||||
|
||||
#---------------------------------------------------------------------------
|
||||
# Configuration options related to the dot tool
|
||||
#---------------------------------------------------------------------------
|
||||
@ -2102,15 +2098,6 @@ PERL_PATH = /usr/bin/perl
|
||||
|
||||
CLASS_DIAGRAMS = YES
|
||||
|
||||
# You can define message sequence charts within doxygen comments using the \msc
|
||||
# command. Doxygen will then run the mscgen tool (see:
|
||||
# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
|
||||
# documentation. The MSCGEN_PATH tag allows you to specify the directory where
|
||||
# the mscgen tool resides. If left empty the tool is assumed to be found in the
|
||||
# default search path.
|
||||
|
||||
MSCGEN_PATH =
|
||||
|
||||
# You can include diagrams made with dia in doxygen documentation. Doxygen will
|
||||
# then run dia to produce the diagram and insert it in the documentation. The
|
||||
# DIA_PATH tag allows you to specify the directory where the dia binary resides.
|
||||
|
2
docs/py_filter.sh
Executable file
2
docs/py_filter.sh
Executable file
@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
python -m doxypypy.doxypypy -a -c $1
|
@ -1,7 +1,17 @@
|
||||
to compile the source code documentation, you need the following packages (naming according to Debian):
|
||||
To compile the source code documentation in HTML format,
|
||||
you need the following packages.
|
||||
They are available from Linux distributions unless noted otherwise.
|
||||
|
||||
GNU make
|
||||
doxygen
|
||||
doxygen-gui (optional)
|
||||
doxypy
|
||||
python
|
||||
doxypypy (pip)
|
||||
graphviz
|
||||
latex (optional)
|
||||
java JRE
|
||||
plantuml (download from plantuml.com)
|
||||
|
||||
export the location of plantuml.jar in the PLANTUML_JAR_PATH environment variable.
|
||||
|
||||
go to the `docs` directory and execute `make html`.
|
||||
|
||||
open `docs/html/index.html` in your browser.
|
||||
|
@ -22,7 +22,7 @@ Do not include the extension <code>.py</code> or a trailing slash.
|
||||
Common args and project args are described below.
|
||||
|
||||
|
||||
\subsection sec_common_args Common Arguments
|
||||
\subsection sec_command_common Common Arguments
|
||||
|
||||
All common arguments are optional and default to more or less reasonable values if omitted.
|
||||
They can be added to the command line in arbitrary order.
|
||||
@ -34,7 +34,7 @@ The following table is ordered by importance.
|
||||
| -h , --help | | Display a command line summary and exit. |
|
||||
| -m , --mode | single (default), grid, swarm, genetic | Operation mode. |
|
||||
| -d, --data-dir | file system path | Directory path for experimental data files (if required by project). Default: current working directory. |
|
||||
| -o, --output-file | file system path | Base path and/or name for intermediate and output files. Default: pmsco_data |
|
||||
| -o, --output-file | file system path | Base path and/or name for intermediate and output files. Default: pmsco0 |
|
||||
| -t, --time-limit | decimal number | Wall time limit in hours. The optimizers try to finish before the limit. Default: 24.0. |
|
||||
| -k, --keep-files | list of file categories | Output file categories to keep after the calculation. Multiple values can be specified and must be separated by spaces. By default, cluster and model (simulated data) of a limited number of best models are kept. See @ref sec_file_categories below. |
|
||||
| --log-level | DEBUG, INFO, WARNING (default), ERROR, CRITICAL | Minimum level of messages that should be added to the log. |
|
||||
@ -45,7 +45,7 @@ The following table is ordered by importance.
|
||||
| --table-file | file system path | Name of the model table file in table scan mode. |
|
||||
|
||||
|
||||
\subsubsection sec_file_categories File Categories
|
||||
\subsubsection sec_command_files File Categories
|
||||
|
||||
The following category names can be used with the `--keep-files` option.
|
||||
Multiple names can be specified and must be separated by spaces.
|
||||
@ -79,7 +79,7 @@ you have to add the file categories that you want to keep, e.g.,
|
||||
Do not specify `rfac` alone as this will effectively not return any file.
|
||||
|
||||
|
||||
\subsection sec_project_args Project Arguments
|
||||
\subsection sec_command_project_args Project Arguments
|
||||
|
||||
The following table lists a few recommended options that are handled by the project code.
|
||||
Project options that are not listed here should use the long form to avoid conflicts in future versions.
|
||||
@ -90,7 +90,7 @@ Project options that are not listed here should use the long form to avoid confl
|
||||
| -s, --scans | project-dependent | Nick names of scans to use in calculation. The nick name selects the experimental data file and the initial state of the photoelectron. Multiple values can be specified and must be separated by spaces. |
|
||||
|
||||
|
||||
\subsection sec_scanfile Experimental Scan Files
|
||||
\subsection sec_command_scanfile Experimental Scan Files
|
||||
|
||||
The recommended way of specifying experimental scan files is using nick names (dictionary keys) and the @c --scans option.
|
||||
A dictionary in the module code defines the corresponding file name, chemical species of the emitter and initial state of the photoelectron.
|
||||
@ -99,7 +99,7 @@ This way, the file names and photoelectron parameters are versioned with the cod
|
||||
whereas command line arguments may easily get forgotten in the records.
|
||||
|
||||
|
||||
\subsection sec_project_example Argument Handling
|
||||
\subsection sec_command_example Argument Handling
|
||||
|
||||
To handle command line arguments in a project module,
|
||||
the module must define a <code>parse_project_args</code> and a <code>set_project_args</code> function.
|
||||
|
@ -8,28 +8,30 @@ The code for a PMSCO job consists of the following components.
|
||||
|
||||
skinparam componentStyle uml2
|
||||
|
||||
component "project" as project
|
||||
component "PMSCO" as pmsco
|
||||
component "project" as project
|
||||
component "scattering code\n(calculator)" as calculator
|
||||
|
||||
interface "command line" as cli
|
||||
interface "input files" as input
|
||||
interface "output files" as output
|
||||
interface "experimental data" as data
|
||||
interface "results" as results
|
||||
interface "output files" as output
|
||||
|
||||
cli --> pmsco
|
||||
data -> project
|
||||
project ..> pmsco
|
||||
pmsco ..> project
|
||||
pmsco ..> calculator
|
||||
cli --> project
|
||||
input -> calculator
|
||||
calculator -> output
|
||||
pmsco -> results
|
||||
|
||||
@enduml
|
||||
|
||||
The main entry point is the _PMSCO_ module.
|
||||
It implements a task loop to carry out the structural optimization
|
||||
and provides an interface between calculation programs and project-specific code.
|
||||
It also provides common utility classes and functions for the handling project data.
|
||||
|
||||
The _project_ consists of program code, system and experimental parameters
|
||||
The _project_ consists of program code and parameters
|
||||
that are specific to a particular experiment and calculation job.
|
||||
The project code reads experimental data, defines the parameter dictionary of the model,
|
||||
and contains code to generate the cluster, parameter and phase files for the scattering code.
|
||||
@ -40,10 +42,6 @@ which accepts detailed input files
|
||||
(parameters, atomic coordinates, emitter specification, scattering phases)
|
||||
and outputs an intensity distribution of photoelectrons versus energy and/or angle.
|
||||
|
||||
The _PMSCO core_ interfaces between the project and the calculator.
|
||||
It carries out the structural optimization and manages the calculation tasks.
|
||||
It generates and sends input files to the calculator and reads back the output.
|
||||
|
||||
|
||||
\section sec_control_flow Control flow
|
||||
|
||||
|
@ -2,10 +2,15 @@
|
||||
\section sec_run Running PMSCO
|
||||
|
||||
To run PMSCO you need the PMSCO code and its dependencies (cf. @ref pag_install),
|
||||
a code module that contains the project-specific code,
|
||||
a customized code module that contains the project-specific code,
|
||||
and one or several files containing the scan parameters and experimental data.
|
||||
Please check the <code>projects</code> folder for examples of project modules.
|
||||
For a detailed description of the command line, see @ref pag_command.
|
||||
|
||||
The run-time arguments can either be passed on the command line
|
||||
(@ref pag_command - the older and less flexible way)
|
||||
or in a JSON-formatted run-file
|
||||
(@ref pag_runfile - the recommended new and flexible way).
|
||||
For beginners, it's also possible to hard-code all project parameters in the custom project module.
|
||||
|
||||
|
||||
\subsection sec_run_single Single Process
|
||||
@ -14,40 +19,28 @@ Run PMSCO from the command prompt:
|
||||
|
||||
@code{.sh}
|
||||
cd work-dir
|
||||
python pmsco-dir project-dir/project.py [pmsco-arguments] [project-arguments]
|
||||
python pmsco-dir -r run-file
|
||||
@endcode
|
||||
|
||||
where <code>work-dir</code> is the destination directory for output files,
|
||||
<code>pmsco-dir</code> is the directory containing the <code>__main__.py</code> file,
|
||||
<code>project.py</code> is the specific project module,
|
||||
and <code>project-dir</code> is the directory where the project file is located.
|
||||
PMSCO is run in one process which handles all calculations sequentially.
|
||||
<code>run-file</code> is a json-formatted configuration file that defines run-time parameters.
|
||||
The format and content of the run-file is described in a separate section.
|
||||
|
||||
The command line arguments are divided into common arguments interpreted by the main pmsco code (pmsco.py),
|
||||
and project-specific arguments interpreted by the project module.
|
||||
In this form, PMSCO is run in one process which handles all calculations sequentially.
|
||||
|
||||
Example command line for a single EDAC calculation of the two-atom project:
|
||||
@code{.sh}
|
||||
cd work/twoatom
|
||||
python ../../pmsco ../../projects/twoatom/twoatom.py -s ea -o twoatom-demo -m single
|
||||
python ../../pmsco -r twoatom-hemi.json
|
||||
@endcode
|
||||
|
||||
This command line executes the main pmsco module <code>pmsco.py</code>.
|
||||
The main module loads the project file <code>twoatom.py</code> as a plug-in
|
||||
and starts processing the common arguments.
|
||||
The <code>twoatom.py</code> module contains only project-specific code
|
||||
with several defined entry-points called from the main module.
|
||||
The information which project to load is contained in the <code>twoatom-hemi.json</code> file,
|
||||
along with all common and specific project arguments.
|
||||
|
||||
In the command line above, the <code>-o twoatom-demo</code> and <code>-m single</code> arguments
|
||||
are interpreted by the pmsco module.
|
||||
<code>-o</code> sets the base name of output files,
|
||||
and <code>-m</code> selects the operation mode to a single calculation.
|
||||
|
||||
The scan argument is interpreted by the project module.
|
||||
It refers to a dictionary entry that declares the scan file, the emitting atomic species, and the initial state.
|
||||
In this example, the project looks for the <code>twoatom_energy_alpha.etpai</code> scan file in the project directory,
|
||||
and calculates the modulation function for a N 1s initial state.
|
||||
The kinetic energy and emission angles are contained in the scan file.
|
||||
This example can be run for testing.
|
||||
All necessary parameters and data files are included in the code repository.
|
||||
|
||||
|
||||
\subsection sec_run_parallel Parallel Processes
|
||||
@ -61,29 +54,45 @@ The slave processes will run the scattering calculations, while the master coord
|
||||
and optimizes the model parameters (depending on the operation mode).
|
||||
|
||||
For optimum performance, the number of processes should not exceed the number of available processors.
|
||||
To start a two-hour optimization job with multiple processes on an quad-core workstation with hyperthreading:
|
||||
To start an optimization job with multiple processes on an quad-core workstation with hyperthreading:
|
||||
@code{.sh}
|
||||
cd work/my_project
|
||||
mpiexec -np 8 pmsco-dir/pmsco project-dir/project.py -o my_job_0001 -t 2 -m swarm
|
||||
mpiexec -np 8 --use-hwthread-cpus python pmsco-dir -r run-file
|
||||
@endcode
|
||||
|
||||
The `--use-hwthread` option may be necessary on certain hyperthreading architectures.
|
||||
|
||||
|
||||
\subsection sec_run_hpc High-Performance Cluster
|
||||
|
||||
The script @c bin/qpmsco.ra.sh takes care of submitting a PMSCO job to the slurm queue of the Ra cluster at PSI.
|
||||
The script can be adapted to other machines running the slurm resource manager.
|
||||
The script generates a job script based on @c pmsco.ra.template,
|
||||
substituting the necessary environment and parameters,
|
||||
and submits it to the queue.
|
||||
PMSCO is ready to run with resource managers on cluster machines.
|
||||
Code for submitting jobs to the slurm queue of the Ra cluster at PSI is included in the pmsco.schedule module
|
||||
(see also the PEARL wiki pages in the PSI intranet).
|
||||
The job parameters are entered in a separate section of the run file, cf. @pag_runfile for details.
|
||||
Other machines can be supported by sub-classing pmsco.schedule.JobSchedule or pmsco.schedule.SlurmSchedule.
|
||||
|
||||
Execute @c bin/qpmsco.ra.sh without arguments to see a summary of the arguments.
|
||||
If a schedule section is present and enabled in the run file,
|
||||
the following command will submit a job to the cluster machine
|
||||
rather than starting a calculation directly:
|
||||
|
||||
To submit a job to the PSI clusters (see also the PEARL-Wiki page MscCalcRa),
|
||||
the analog command to the previous section would be:
|
||||
@code{.sh}
|
||||
bin/qpmsco.ra.sh my_job_0001 1 8 2 projects/my_project/project.py swarm
|
||||
cd ~/pmsco
|
||||
python pmsco -r run-file.json
|
||||
@endcode
|
||||
|
||||
The command will copy the pmsco and project source trees as well as the run file and job script to a job directory
|
||||
under the output directory specified in the project section of the run file.
|
||||
The full path of the job directory is _output-dir/job-name.
|
||||
The directory must be empty or not existing when you run the above command.
|
||||
|
||||
Be careful to specify correct project file paths.
|
||||
The output and data directories should be specified as absolute paths.
|
||||
|
||||
The scheduling command will also load the project and scan files.
|
||||
Many parameter errors can, thus, be caught and fixed before the job is submitted to the queue.
|
||||
The run file also offers an option to stop just before submitting the job
|
||||
so that you can inspect the job files and submit the job manually.
|
||||
|
||||
Be sure to consider the resource allocation policy of the cluster
|
||||
before you decide on the number of processes.
|
||||
Requesting less resources will prolong the run time but might increase the scheduling priority.
|
||||
|
@ -51,6 +51,14 @@ and it's difficult to switch between different Python versions.
|
||||
On the PSI cluster machines, the environment must be set using the module system and conda (on Ra).
|
||||
Details are explained in the PEARL Wiki.
|
||||
|
||||
The following tools are required to compile the documentation:
|
||||
|
||||
- doxygen
|
||||
- doxypypy
|
||||
- graphviz
|
||||
- Java
|
||||
- [plantUML](https://plantuml.com)
|
||||
- LaTeX (optional, generally not recommended)
|
||||
|
||||
\subsection sec_install_instructions Instructions
|
||||
|
||||
@ -66,7 +74,6 @@ sudo apt install \
|
||||
binutils \
|
||||
build-essential \
|
||||
doxygen \
|
||||
doxypy \
|
||||
f2c \
|
||||
g++ \
|
||||
gcc \
|
||||
@ -92,12 +99,15 @@ cd /usr/lib
|
||||
sudo ln -s /usr/lib/libblas/libblas.so.3 libblas.so
|
||||
@endcode
|
||||
|
||||
Install Miniconda according to their [instructions](https://conda.io/docs/user-guide/install/index.html),
|
||||
Download and install [Miniconda](https://conda.io/),
|
||||
then configure the Python environment:
|
||||
|
||||
@code{.sh}
|
||||
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh
|
||||
bash ~/miniconda.sh
|
||||
|
||||
conda create -q --yes -n pmsco python=3.6
|
||||
source activate pmsco
|
||||
conda activate pmsco
|
||||
conda install -q --yes -n pmsco \
|
||||
pip \
|
||||
"numpy>=1.13" \
|
||||
@ -110,7 +120,7 @@ conda install -q --yes -n pmsco \
|
||||
statsmodels \
|
||||
swig \
|
||||
gitpython
|
||||
pip install periodictable attrdict fasteners mpi4py
|
||||
pip install periodictable attrdict commentjson fasteners mpi4py doxypypy
|
||||
@endcode
|
||||
|
||||
@note `mpi4pi` should be installed via pip, _not_ conda.
|
||||
@ -119,16 +129,15 @@ pip install periodictable attrdict fasteners mpi4py
|
||||
|
||||
\subsubsection sec_install_singularity Installation in Singularity container
|
||||
|
||||
A [Singularity](https://www.sylabs.io/guides/2.5/user-guide/index.html) container
|
||||
A [Singularity](https://sylabs.io/singularity/) container
|
||||
contains all OS and Python dependencies for running PMSCO.
|
||||
Besides the Singularity executable, nothing else needs to be installed in the host system.
|
||||
This may be the fastest way to get PMSCO running.
|
||||
|
||||
For installation of Singularity,
|
||||
see their [user guide](https://www.sylabs.io/guides/2.5/user-guide/installation.html).
|
||||
On newer Linux systems (e.g. Ubuntu 18.04), Singularity is available from the package manager.
|
||||
Installation in a virtual machine on Windows or Mac are straightforward
|
||||
thanks to the [Vagrant system](https://www.vagrantup.com/).
|
||||
To get started with Singularity,
|
||||
download it from [sylabs.io](https://www.sylabs.io/singularity/) and install it according to their instructions.
|
||||
On Windows, Singularity can be installed in a virtual machine using the [Vagrant](https://www.vagrantup.com/)
|
||||
script included under `extras/vagrant`.
|
||||
|
||||
After installing Singularity,
|
||||
check out PMSCO as explained in the @ref sec_compile section:
|
||||
@ -136,6 +145,7 @@ check out PMSCO as explained in the @ref sec_compile section:
|
||||
@code{.sh}
|
||||
cd ~
|
||||
mkdir containers
|
||||
cd containers
|
||||
git clone git@git.psi.ch:pearl/pmsco.git pmsco
|
||||
cd pmsco
|
||||
git checkout master
|
||||
@ -143,11 +153,14 @@ git checkout -b my_branch
|
||||
@endcode
|
||||
|
||||
Then, either copy a pre-built container into `~/containers`,
|
||||
or build one from a script provided by the PMSCO repository:
|
||||
or build one from the definition file included under extras/singularity.
|
||||
You may need to customize the definition file to match the host OS
|
||||
or to install compatible OpenMPI libraries,
|
||||
cf. cf. [Singularity user guide](https://sylabs.io/guides/3.7/user-guide/mpi.html).
|
||||
|
||||
@code{.sh}
|
||||
cd ~/containers
|
||||
sudo singularity build pmsco.simg ~/containers/pmsco/extras/singularity/singularity_python3
|
||||
sudo singularity build pmsco.sif ~/containers/pmsco/extras/singularity/singularity_python3
|
||||
@endcode
|
||||
|
||||
To work with PMSCO, start an interactive shell in the container and switch to the pmsco environment.
|
||||
@ -155,8 +168,9 @@ Note that the PMSCO code is outside the container and can be edited with the usu
|
||||
|
||||
@code{.sh}
|
||||
cd ~/containers
|
||||
singularity shell pmsco.simg
|
||||
source activate pmsco
|
||||
singularity shell pmsco.sif
|
||||
. /opt/miniconda/etc/profile.d/conda.sh
|
||||
conda activate pmsco
|
||||
cd ~/containers/pmsco
|
||||
make all
|
||||
nosetests -w tests/
|
||||
@ -168,16 +182,17 @@ Or call PMSCO from outside:
|
||||
cd ~/containers
|
||||
mkdir output
|
||||
cd output
|
||||
singularity run ../pmsco.simg python ~/containers/pmsco/pmsco path/to/your-project.py arg1 arg2 ...
|
||||
singularity run -e ../pmsco.sif ~/containers/pmsco/pmsco -r path/to/your-runfile
|
||||
@endcode
|
||||
|
||||
For parallel processing, prepend `mpirun -np X` to the singularity command as needed.
|
||||
Note that this requires "compatible" OpenMPI versions on the host and container to avoid runtime errors.
|
||||
|
||||
|
||||
\subsubsection sec_install_extra Additional Applications
|
||||
|
||||
For working with the code and data, some other applications are recommended.
|
||||
The PyCharm IDE can be installed from the Ubuntu software center.
|
||||
The PyCharm IDE (community edition) can be installed from the Ubuntu software center.
|
||||
The following commands install other useful helper applications:
|
||||
|
||||
@code{.sh}
|
||||
@ -187,10 +202,24 @@ gitg \
|
||||
meld
|
||||
@endcode
|
||||
|
||||
To produce documentation in PDF format (not recommended on virtual machine), install LaTeX:
|
||||
To compile the documentation install the following tools.
|
||||
The basic documentation is in HTML format and can be opened in any internet browser.
|
||||
If you have a working LaTeX installation, a PDF document can be produced as well.
|
||||
It is not recommended to install LaTeX just for this documentation, however.
|
||||
|
||||
@code{.sh}
|
||||
sudo apt-get install texlive-latex-recommended
|
||||
sudo apt install \
|
||||
doxygen \
|
||||
graphviz \
|
||||
default-jre
|
||||
|
||||
conda activate pmsco
|
||||
conda install -q --yes -n pmsco doxypypy
|
||||
|
||||
wget -O plantuml.jar https://sourceforge.net/projects/plantuml/files/plantuml.jar/download
|
||||
sudo mkdir /opt/plantuml/
|
||||
sudo mv plantuml.jar /opt/plantuml/
|
||||
echo "export PLANTUML_JAR_PATH=/opt/plantuml/plantuml.jar" | sudo tee /etc/profile.d/pmsco-env.sh
|
||||
@endcode
|
||||
|
||||
|
||||
@ -250,7 +279,7 @@ mkdir work
|
||||
cd work
|
||||
mkdir twoatom
|
||||
cd twoatom/
|
||||
nice python ~/pmsco/pmsco ~/pmsco/projects/twoatom/twoatom.py -s ea -o twoatom_energy_alpha -m single
|
||||
nice python ~/pmsco/pmsco -r ~/pmsco/projects/twoatom/twoatom-energy.json
|
||||
@endcode
|
||||
|
||||
Runtime warnings may appear because the twoatom project does not contain experimental data.
|
||||
|
@ -26,13 +26,13 @@ Other programs may be integrated as well.
|
||||
- various scanning modes including energy, polar angle, azimuthal angle, analyser angle.
|
||||
- averaging over multiple domains and emitters.
|
||||
- global optimization of multiple scans.
|
||||
- structural optimization algorithms: particle swarm optimization, grid search, gradient search.
|
||||
- structural optimization algorithms: genetic, particle swarm, grid search.
|
||||
- calculation of the modulation function.
|
||||
- calculation of the weighted R-factor.
|
||||
- automatic parallel processing using OpenMPI.
|
||||
|
||||
|
||||
\section sec_project Optimization Projects
|
||||
\section sec_intro_project Optimization Projects
|
||||
|
||||
To set up a new optimization project, you need to:
|
||||
|
||||
@ -44,8 +44,7 @@ To set up a new optimization project, you need to:
|
||||
- add a global function create_project to my_project.py.
|
||||
- provide experimental data files (intensity or modulation function).
|
||||
|
||||
For details, see the documentation of the Project class,
|
||||
and the example projects.
|
||||
For details, see @ref pag_project, the documentation of the pmsco.project.Project class and the example projects.
|
||||
|
||||
|
||||
\section sec_intro_start Getting Started
|
||||
@ -54,8 +53,9 @@ and the example projects.
|
||||
- @ref pag_concepts_tasks
|
||||
- @ref pag_concepts_emitter
|
||||
- @ref pag_install
|
||||
- @ref pag_project
|
||||
- @ref pag_run
|
||||
- @ref pag_command
|
||||
- @ref pag_opt
|
||||
|
||||
\section sec_license License Information
|
||||
|
||||
@ -70,6 +70,6 @@ These programs may not be used without an explicit agreement by the respective o
|
||||
|
||||
\author Matthias Muntwiler, <mailto:matthias.muntwiler@psi.ch>
|
||||
\version This documentation is compiled from version $(REVISION).
|
||||
\copyright 2015-2019 by [Paul Scherrer Institut](http://www.psi.ch)
|
||||
\copyright 2015-2021 by [Paul Scherrer Institut](http://www.psi.ch)
|
||||
\copyright Licensed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)
|
||||
*/
|
||||
|
@ -3,28 +3,34 @@
|
||||
|
||||
|
||||
|
||||
\subsection sec_opt_swarm Particle swarm
|
||||
\subsection sec_opt_swarm Particle swarm optimization (PSO)
|
||||
|
||||
The particle swarm algorithm is adapted from
|
||||
The particle swarm optimization (PSO) algorithm seeks to find a global optimum in a multi-dimensional model space
|
||||
by employing the _swarm intelligence_ of a number of particles traversing space,
|
||||
each at its own velocity and direction,
|
||||
but adjusting its trajectory based on its own experience and the results of its peers.
|
||||
|
||||
The PSO algorithm is adapted from
|
||||
D. A. Duncan et al., Surface Science 606, 278 (2012).
|
||||
It is implemented in the @ref pmsco.optimizers.swarm module.
|
||||
|
||||
The general parameters of the genetic algorithm are specified in the @ref Project.optimizer_params dictionary.
|
||||
The general parameters of the algorithm are specified in the @ref Project.optimizer_params dictionary.
|
||||
Some of them can be changed on the command line.
|
||||
|
||||
| Parameter | Command line | Range | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| pop_size | --pop-size | ≥ 1 | |
|
||||
| pop_size | --pop-size | ≥ 1 | Recommended 20..50 |
|
||||
| position_constrain_mode | | default bounce | Resolution of domain limit violations. |
|
||||
| seed_file | --seed-file | a file path, default none | |
|
||||
| seed_limit | --seed-limit | 0..pop_size | |
|
||||
| rfac_limit | | 0..1, default 0.8 | Accept only seed values that have a lower R-factor. |
|
||||
| recalc_seed | | True or False, default True | |
|
||||
|
||||
The domain parameters have the following meanings:
|
||||
The model space attributes have the following meaning:
|
||||
|
||||
| Parameter | Description |
|
||||
| --- | --- |
|
||||
| start | Seed model. The start values are copied into particle 0 of the initial population. |
|
||||
| start | Start value of particle 0 in first iteration. |
|
||||
| min | Lower limit of the parameter range. |
|
||||
| max | Upper limit of the parameter range. |
|
||||
| step | Not used. |
|
||||
@ -32,23 +38,23 @@ The domain parameters have the following meanings:
|
||||
|
||||
\subsubsection sec_opt_seed Seeding a population
|
||||
|
||||
By default, one particle is initialized with the start value declared in the parameter domain,
|
||||
and the other are set to random values within the domain.
|
||||
By default, one particle is initialized with the start value declared with the model space,
|
||||
and the other ones are initialized at random positions in the model space.
|
||||
You may initialize more particles of the population with specific values by providing a seed file.
|
||||
|
||||
The seed file must have a similar format as the result `.dat` files
|
||||
with a header line specifying the column names and data rows containing the values for each particle.
|
||||
A good practice is to use a previous `.dat` file and remove unwanted rows.
|
||||
To continue an interrupted optimization,
|
||||
the `.dat` file from the previous optimization can be used as is.
|
||||
The `.dat` file from a previous optimization job can be used as is to continue the optimization,
|
||||
also in a different optimization mode.
|
||||
|
||||
The seeding procedure can be tweaked by several optimizer parameters (see above).
|
||||
PMSCO normally loads the first rows up to population size - 1 or up to the `seed_limit` parameter,
|
||||
whichever is lower.
|
||||
If an `_rfac` column is present, the file is first sorted by R-factor and only the best models are loaded.
|
||||
Models that resulted in an R-factor above the `rfac_limit` parameter are always ignored.
|
||||
Models that resulted in an R-factor above the `rfac_limit` parameter are ignored in any case.
|
||||
|
||||
During the optimization process, all models loaded from the seed file are normally re-calculated.
|
||||
In the first iteration of the optimization run, the models loaded from the seed file are re-calculated by default.
|
||||
This may waste CPU time if the calculation is run under the same conditions
|
||||
and would result in exactly the same R-factor,
|
||||
as is the case if the seed is used to continue a previous optimization, for example.
|
||||
@ -58,25 +64,26 @@ and PMSCO will use the R-factor value from the seed file rather than calculating
|
||||
|
||||
\subsubsection sec_opt_patch Patching a running optimization
|
||||
|
||||
While an optimization process is running, the user can manually patch the population with arbitrary values,
|
||||
While an optimization job is running, the user can manually patch the population with arbitrary values,
|
||||
for instance, to kick the population out of a local optimum or to drive it to a less sampled parameter region.
|
||||
To patch a running population, prepare a population file named `pmsco_patch.pop` and copy it to the work directory.
|
||||
|
||||
The file must have a similar format as the result `.dat` files
|
||||
The patch file must have the same format as the result `.dat` files
|
||||
with a header line specifying the column names and data rows containing the values.
|
||||
It should contain as many rows as particles to be patched but not more than the size of the population.
|
||||
The columns must include a `_particle` column which specifies the particle to patch
|
||||
as well as the model parameters to be changed.
|
||||
The columns must include a `_particle` column and the model parameters to be changed.
|
||||
The `_particle` column specifies the index of the particle that is patched (ranging from 0 to population size - 1).
|
||||
Parameters that should remain unaffected can be left out,
|
||||
extra columns including `_gen`, `_rfac` etc. are ignored.
|
||||
|
||||
PMSCO checks the file for syntax errors and ignores it if errors are present.
|
||||
Parameter values that lie outside the domain boundary are ignored.
|
||||
Individual parameter values that lie outside the domain boundary are silently ignored.
|
||||
Successful or failed patching is logged at warning level.
|
||||
The patch file is re-applied whenever its time stamp has changed.
|
||||
PMSCO keeps track of the time stamp of the file and re-applies the patch whenever the time stamp has changed.
|
||||
|
||||
\attention Do not edit the patch file in the working directory
|
||||
to prevent it from being read in an unfinished state or multiple times.
|
||||
\attention Since each change of time stamp may trigger patching,
|
||||
do not edit the patch file in the working directory
|
||||
to prevent it from being read in an unfinished state or multiple times!
|
||||
|
||||
|
||||
\subsection sec_opt_genetic Genetic optimization
|
||||
@ -103,7 +110,7 @@ Some of them can be changed on the command line.
|
||||
|
||||
| Parameter | Command line | Range | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| pop_size | --pop-size | ≥ 1 | |
|
||||
| pop_size | --pop-size | ≥ 1 | Recommended 10..40 |
|
||||
| mating_factor | | 1..pop_size, default 4 | |
|
||||
| strong_mutation_probability | | 0..1, default 0.01 | Probability that a parameter undergoes a strong mutation. |
|
||||
| weak_mutation_probability | | 0..1, default 1 | Probability that a parameter undergoes a weak mutation. This parameters should be left at 1. Lower values tend to produce discrete parameter values. Weak mutations can be tuned by the step domain parameters. |
|
||||
@ -113,7 +120,7 @@ Some of them can be changed on the command line.
|
||||
| rfac_limit | | 0..1, default 0.8 | Accept only seed values that have a lower R-factor. |
|
||||
| recalc_seed | | True or False, default True | |
|
||||
|
||||
The domain parameters have the following meanings:
|
||||
The model space attributes have the following meaning:
|
||||
|
||||
| Parameter | Description |
|
||||
| --- | --- |
|
||||
@ -129,7 +136,11 @@ cf. sections @ref sec_opt_seed and @ref sec_opt_swarm.
|
||||
\subsection sec_opt_grid Grid search
|
||||
|
||||
The grid search algorithm samples the parameter space at equidistant steps.
|
||||
The order of calculations is randomized so that distant parts of the parameter space are sampled at an early stage.
|
||||
It is implemented in the @ref pmsco.optimizers.grid module.
|
||||
|
||||
|
||||
The model space attributes have the following meaning.
|
||||
The order of calculations is random so that results from different parts of the model space become available early.
|
||||
|
||||
| Parameter | Description |
|
||||
| --- | --- |
|
||||
@ -149,15 +160,19 @@ The table scan calculates models from an explicit table of model parameters.
|
||||
It can be used to recalculate models from a previous optimization run on other experimental data,
|
||||
as an interface to external optimizers,
|
||||
or as a simple input of manually edited model parameters.
|
||||
It is implemented in the @ref pmsco.optimizers.table module.
|
||||
|
||||
The table can be stored in an external file that is specified on the command line,
|
||||
or supplied in one of several forms by the custom project class.
|
||||
The table can be left unchanged during the calculations,
|
||||
or new models can be added on the go.
|
||||
Duplicate models are ignored.
|
||||
|
||||
@attention Because it is not easily possible to know when and which models have been read from the table file, if you do modify the table file during processing, pay attention to the following hints:
|
||||
1. The file on disk must not be locked for more than a second. Do not keep the file open unnecessarily.
|
||||
2. _Append_ new models to the end of the table rather than overwriting previous ones. Otherwise, some models may be lost before they have been calculated.
|
||||
@attention Because it is not easily possible to know when the table file is read,
|
||||
if you do modify the table file while calculations are running,
|
||||
1. Do not keep the file locked for longer than a second.
|
||||
2. Append new models to the end of the table rather than overwriting previous ones.
|
||||
3. Delete lines only if you're sure that they are not needed any more.
|
||||
|
||||
The general parameters of the table scan are specified in the @ref Project.optimizer_params dictionary.
|
||||
Some of them can be changed on the command line or in the project class (depending on how the project class is implemented).
|
||||
@ -167,7 +182,7 @@ Some of them can be changed on the command line or in the project class (dependi
|
||||
| pop_size | --pop-size | ≥ 1 | Number of models in a generation (calculated in parallel). In table mode, this parameter is not so important and can be left at the default. It has nothing to do with table size. |
|
||||
| table_file | --table-file | a file path, default none | |
|
||||
|
||||
The domain parameters have the following meanings.
|
||||
The model space attributes have the following meaning.
|
||||
Models that violate the parameter range are not calculated.
|
||||
|
||||
| Parameter | Description |
|
||||
|
454
docs/src/project.dox
Normal file
454
docs/src/project.dox
Normal file
@ -0,0 +1,454 @@
|
||||
/*! @page pag_project Setting up a new project
|
||||
\section sec_project Setting Up a New Project
|
||||
|
||||
This topic guides you through the setup of a new project.
|
||||
Be sure to check out the examples in the projects folder
|
||||
and the code documentation as well.
|
||||
|
||||
The basic steps are:
|
||||
|
||||
1. Create a new folder under `projects`.
|
||||
2. In the new folder, create a Python module for the project (subsequently called _the project module_).
|
||||
3. In the project module, define a cluster generator class which derives from pmsco.cluster.ClusterGenerator.
|
||||
4. In the project module, define a project class which derives from pmsco.project.Project.
|
||||
5. In the same folder as the project module, create a JSON run-file.
|
||||
|
||||
\subsection sec_project_module Project Module
|
||||
|
||||
A skeleton of the project module file (with some common imports) may look like this:
|
||||
|
||||
~~~~~~{.py}
|
||||
import logging
|
||||
import math
|
||||
import numpy as np
|
||||
import periodictable as pt
|
||||
from pathlib import Path
|
||||
|
||||
import pmsco.cluster
|
||||
import pmsco.data
|
||||
import pmsco.dispatch
|
||||
import pmsco.elements.bindingenergy
|
||||
import pmsco.project
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MyClusterGenerator(pmsco.cluster.ClusterGenerator):
|
||||
def create_cluster(self, model, index):
|
||||
clu = pmsco.cluster.Cluster()
|
||||
# ...
|
||||
return clu
|
||||
|
||||
def count_emitters(self, model, index):
|
||||
# ...
|
||||
return 1
|
||||
|
||||
|
||||
class MyProject(pmsco.project.Project):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
# ...
|
||||
self.cluster_generator = MyClusterGenerator(self)
|
||||
|
||||
def create_model_space():
|
||||
spa = pmsco.project.ModelSpace()
|
||||
# ...
|
||||
return spa
|
||||
|
||||
def create_params(self, model, index):
|
||||
par = pmsco.project.CalculatorParams()
|
||||
# ...
|
||||
return par
|
||||
~~~~~~
|
||||
|
||||
The main purpose of the `MyProject` class is to bundle the project-specific calculation parameters and code.
|
||||
The purpose of the `MyClusterGenerator` class is to produce atomic clusters as a function of a number of model parameters.
|
||||
For the project to be useful, some of the methods in the skeleton above need to be implemented.
|
||||
The individual methods are discussed in the following.
|
||||
Further descriptions can be found in the documentation of the code.
|
||||
|
||||
\subsection sec_project_cluster Cluster Generator
|
||||
|
||||
The cluster generator is a project-specific Python object that produces a cluster, i.e., a list of atomic coordinates,
|
||||
based on a small number of model parameters whenever PMSCO requires it.
|
||||
The most important member of a cluster generator is its `create_cluster` method.
|
||||
At least this method must be implemented for a functional cluster generator.
|
||||
|
||||
A generic `count_emitters` method is implemented in the base class.
|
||||
It needs to be overridden if you want to use parallel calculation of multiple emitters.
|
||||
|
||||
\subsubsection sec_project_cluster_create Cluster Definition
|
||||
|
||||
The `create_cluster` method takes the model parameters (a dictionary)
|
||||
and the task index (a pmsco.dispatch.CalcID, cf. @ref pag_concepts_tasks) as arguments.
|
||||
Given these arguments, it must create and fill a pmsco.cluster.Cluster object.
|
||||
See pmsco.cluster.ClusterGenerator.create_cluster for details on the method contract.
|
||||
|
||||
As an example, have a look at the following simplified excerpt from the twoatom demo project.
|
||||
|
||||
~~~~~~{.py}
|
||||
def create_cluster(self, model, index):
|
||||
# access model parameters
|
||||
# dAB - distance between atoms in Angstroms
|
||||
# th - polar angle in degrees
|
||||
# ph - azimuthal angle in degrees
|
||||
r = model['dAB']
|
||||
th = math.radians(model['th'])
|
||||
ph = math.radians(model['ph'])
|
||||
|
||||
# prepare a cluster object
|
||||
clu = pmsco.cluster.Cluster()
|
||||
# the comment line is optional but can be useful
|
||||
clu.comment = "{0} {1}".format(self.__class__, index)
|
||||
# set the maximum radius of the cluster (outliers will be ignored)
|
||||
clu.set_rmax(r * 2.0)
|
||||
|
||||
# calculate atomic vectors
|
||||
dx = r * math.sin(th) * math.cos(ph)
|
||||
dy = r * math.sin(th) * math.sin(ph)
|
||||
dz = r * math.cos(th)
|
||||
a_top = np.array((0.0, 0.0, 0.0))
|
||||
a_bot = np.array((-dx, -dy, -dz))
|
||||
|
||||
# add an oxygen atom at a_top position and mark it as emitter
|
||||
clu.add_atom('O', a_top, 1)
|
||||
# add a copper atom at a_bot position
|
||||
clu.add_atom('Cu', a_bot, 0)
|
||||
|
||||
# pass the created cluster to the calculator
|
||||
return clu
|
||||
~~~~~~
|
||||
|
||||
In this example, two atoms are added to the cluster.
|
||||
The pmsco.cluster.Cluster class provides several methods to simplify the task,
|
||||
such as adding layers or bulk regions, rotation, translation, trim, emitter selection, etc.
|
||||
Please refer to the documentation of its code for details.
|
||||
It may also be instructive to have a look at the demo projects.
|
||||
|
||||
The main purposes of the cluster object are to store an array of atoms and to read/write cluster files in a variety of formats.
|
||||
For each atom, the following properties are stored:
|
||||
|
||||
- sequential atom index (1-based, maintained by cluster code)
|
||||
- atom type (chemical element number)
|
||||
- chemical element symbol from periodic table
|
||||
- x coordinate of the atom position
|
||||
- t coordinate of the atom position
|
||||
- z coordinate of the atom position
|
||||
- emitter flag (0 = scatterer, 1 = emitter, default 0)
|
||||
- charge/ionicity (units of elementary charge, default 0)
|
||||
- scatterer class (default 0)
|
||||
|
||||
All of these properties except the scatterer class can be set by the add methods of the cluster.
|
||||
The scatterer class is used internally by the atomic scattering factor calculators.
|
||||
Whether the charge/ionicity is used, depends on the particular calculators, EDAC does not use it, for instance.
|
||||
|
||||
Note: You do not need to take care how many emitters a calculator allows,
|
||||
or whether the emitter needs to be at the origin or the first place of the array.
|
||||
These technical aspects are handled by PMSCO code transparently.
|
||||
|
||||
\subsubsection sec_project_cluster_domains Domains
|
||||
|
||||
Domains refer to regions of inequivalent structure in the probing region.
|
||||
This may include regions of different orientation, different lattice constant, or even different structure.
|
||||
The cluster methods can read the selected domain from the `index.domain` argument.
|
||||
This is an index into the pmsco.project.Project.domains list where each item is a dictionary
|
||||
that holds additional, invariable structural parameters.
|
||||
|
||||
A common case are rotational domains.
|
||||
In this case, the list of domains may look like `[{"zrot": 0.0}, {"zrot": 60.0}]`, for example,
|
||||
and the `create_cluster` method would include additional code to rotate the cluster:
|
||||
|
||||
~~~~~~{.py}
|
||||
def create_cluster(self, model, index):
|
||||
# filling atoms here
|
||||
# ...
|
||||
|
||||
dom = self.domains[index.domain]
|
||||
try:
|
||||
z_rot = dom['zrot']
|
||||
except KeyError:
|
||||
z_rot = 0.0
|
||||
if z_rot:
|
||||
clu.rotate_z(z_rot)
|
||||
|
||||
# selecting emitters
|
||||
# ...
|
||||
|
||||
return clu
|
||||
~~~~~~
|
||||
|
||||
Depending on the complexity of the system, it may, however, be necessary to write a specific sub-routine for each domain.
|
||||
|
||||
The pmsco.project.Project class includes generic code to add intensities of domains incoherently (cf. pmsco.project.Project.combine_domains).
|
||||
If the model space contains parameters 'wdom0', 'wdom1', etc.,
|
||||
these parameters are interpreted at weights of domain 0, 1, etc.
|
||||
One domain must have a fixed weight to avoid correlated parameters.
|
||||
Typically, 'wdom0' is left undefined and defaults to 1.
|
||||
|
||||
\subsubsection sec_project_cluster_emitters Emitter Configurations
|
||||
|
||||
If your project has a large cluster and/or many emitters, have a look at @ref pag_concepts_emitter.
|
||||
In this case, you should override the `count_emitters` method and return the number of emitter configurations.
|
||||
In the simplest case, this is the number of inequivalent emitters, and the implementation would be:
|
||||
|
||||
~~~~~~{.py}
|
||||
def count_emitters(self, model, index):
|
||||
index = index._replace(emit=-1)
|
||||
clu = self.create_cluster(model, index)
|
||||
return clu.get_emitter_count()
|
||||
~~~~~~
|
||||
|
||||
Next, modify the `create_cluster` method to check the emitter index (`index.emit`).
|
||||
If it is -1, the method must return the full cluster with all inequivalent emitters marked.
|
||||
If it is positive, only the corresponding emitter must be marked.
|
||||
The code could be similar to this example:
|
||||
|
||||
~~~~~~{.py}
|
||||
def create_cluster(self, model, index):
|
||||
# filling atoms here
|
||||
# ...
|
||||
|
||||
# select all possible emitters (atoms of a specific element) in a cylindrical volume
|
||||
# idx_emit is an array of atom numbers (0-based atom index)
|
||||
idx_emit = clu.find_index_cylinder(origin, r_xy, r_z, self.project.scans[index.scan].emitter)
|
||||
# if a specific emitter should be marked, restrict the array index.
|
||||
if index.emit >= 0:
|
||||
idx_emit = idx_emit[index.emit]
|
||||
# mark the selected emitters
|
||||
# if index.emit was < 0, all emitters are marked
|
||||
clu.data['e'][idx_emit] = 1
|
||||
|
||||
return clu
|
||||
~~~~~~
|
||||
|
||||
Now, the individual emitter configurations will be calculated in separate tasks
|
||||
which can be run in parallel in a multi-process environment.
|
||||
Note that the processing time of EDAC scales linearly with the number of emitters.
|
||||
Thus, parallel execution is beneficial.
|
||||
|
||||
Advanced programmers may exploit more of the flexibility of emitter configurations, cf. @ref pag_concepts_emitter.
|
||||
|
||||
\subsection sec_project_project Project Class
|
||||
|
||||
Most commonly, a project class overrides the `__init__`, `create_model_space` and `create_params` methods.
|
||||
Most other inherited methods can be overridden optionally,
|
||||
for instance `validate`, `setup`, `calc_modulation`, `rfactor`,
|
||||
as well as the combine methods `combine_rfactors`, `combine_domains`, `combine_emitters`, etc.
|
||||
Int his introduction, we focus on the most basic three methods.
|
||||
|
||||
\subsubsection sec_project_project_init Initialization and Defaults
|
||||
|
||||
In the `__init__` method, you define and initialize (with default values) additional project properties.
|
||||
You may also redefine properties of the base class.
|
||||
The following code is just an example to give you some ideas.
|
||||
|
||||
~~~~~~{.py}
|
||||
class MyProject(pmsco.project.Project):
|
||||
def __init__(self):
|
||||
# call the inherited method first
|
||||
super().__init__()
|
||||
# re-define an inherited property
|
||||
self.directories["data"] = Path("/home/pmsco/data")
|
||||
# define a scan dictionary
|
||||
self.scan_dict = {}
|
||||
# fill the scan dictionary
|
||||
self.build_scan_dict()
|
||||
# create the cluster generator
|
||||
self.cluster_generator = MyClusterGenerator(self)
|
||||
# declare the list of domains (at least one is required)
|
||||
self.domains = [{"zrot": 0.}]
|
||||
|
||||
def build_scan_dict(self):
|
||||
self.scan_dict["empty"] = {"filename": "{pmsco}/projects/common/empty-hemiscan.etpi",
|
||||
"emitter": "Si", "initial_state": "2p3/2"}
|
||||
self.scan_dict["Si2p"] = {"filename": "{data}/xpd-Si2p.etpis",
|
||||
"emitter": "Si", "initial_state": "2p3/2"}
|
||||
~~~~~~
|
||||
|
||||
The scan dictionary can come in handy if you want to select scans by a shortcut on the command line or in a run file.
|
||||
|
||||
Note that most of the properties can be assigned from a run file.
|
||||
This happens after the `__init__` method.
|
||||
The values set by `__init__` serve as default values.
|
||||
|
||||
\subsubsection sec_project_project_space Model Space
|
||||
|
||||
The model space defines the keys and value ranges of the model parameters.
|
||||
There are three ways to declare the model space in order of priority:
|
||||
|
||||
1. Declare the model space in the run-file.
|
||||
2. Assign a ModelSpace to the self.model_space property directly in the `__init__` method.
|
||||
3. Implement the `create_model_space` method.
|
||||
|
||||
We begin the third way:
|
||||
|
||||
~~~~~~{.py}
|
||||
# under class MyProject(pmsco.project.Project):
|
||||
def create_model_space(self):
|
||||
# create an empty model space
|
||||
spa = pmsco.project.ModelSpace()
|
||||
|
||||
# add parameters
|
||||
spa.add_param('dAB', 2.10, 2.00, 2.25, 0.05)
|
||||
spa.add_param('th', 15.00, 0.00, 30.00, 1.00)
|
||||
spa.add_param('ph', 90.00)
|
||||
spa.add_param('V0', 21.96, 15.00, 25.00, 1.00)
|
||||
spa.add_param('Zsurf', 1.50)
|
||||
spa.add_param('wdom1', 0.5, 0.10, 10.00, 0.10)
|
||||
|
||||
# return the model space
|
||||
return spa
|
||||
~~~~~~
|
||||
|
||||
This code declares six model parameters: `dAB`, `th`, `ph`, `V0`, `Zsurf` and `wdom1`.
|
||||
Three of them are structural parameters (used by the cluster generator above),
|
||||
two are used by the `create_params` method (see below),
|
||||
and `wdom1` is used in pmsco.project.Project.combine_domains while summing up contributions from different domains.
|
||||
|
||||
The values in the arguments list correspond to the start value (initial guess),
|
||||
the lower and upper boundaries of the value range,
|
||||
and the step size for optimizers that require it.
|
||||
If just one value is given, like for `ph` and `Zsurf`, the parameter is held constant during the optimization.
|
||||
|
||||
The equivalent declaration in the run-file would look like (parameters after `th` omitted):
|
||||
|
||||
~~~~~~{.py}
|
||||
{
|
||||
"project": {
|
||||
// ...
|
||||
"model_space": {
|
||||
"dAB": {
|
||||
"start": 2.109,
|
||||
"min": 2.0,
|
||||
"max": 2.25,
|
||||
"step": 0.05
|
||||
},
|
||||
"th": {
|
||||
"start": 15.0,
|
||||
"min": 0.0,
|
||||
"max": 30.0,
|
||||
"step": 1.0
|
||||
},
|
||||
// ...
|
||||
}
|
||||
}
|
||||
}
|
||||
~~~~~~
|
||||
|
||||
\subsubsection sec_project_project_params Calculation Parameters
|
||||
|
||||
Non-structural parameters that are needed for the input files of the calculators are passed
|
||||
in a pmsco.project.CalculatorParams object.
|
||||
This object should be created and filled in the `create_params` method of the project class.
|
||||
|
||||
The following example is from the twoatoms demo project:
|
||||
|
||||
~~~~~~{.py}
|
||||
# under class MyProject(pmsco.project.Project):
|
||||
def create_params(self, model, index):
|
||||
params = pmsco.project.CalculatorParams()
|
||||
|
||||
# meta data
|
||||
params.title = "two-atom demo"
|
||||
params.comment = "{0} {1}".format(self.__class__, index)
|
||||
|
||||
# initial state and binding energy
|
||||
initial_state = self.scans[index.scan].initial_state
|
||||
params.initial_state = initial_state
|
||||
emitter = self.scans[index.scan].emitter
|
||||
params.binding_energy = pt.elements.symbol(emitter).binding_energy[initial_state]
|
||||
|
||||
# experimental setup
|
||||
params.polarization = "H"
|
||||
params.polar_incidence_angle = 60.0
|
||||
params.azimuthal_incidence_angle = 0.0
|
||||
params.experiment_temperature = 300.0
|
||||
|
||||
# material parameters
|
||||
params.z_surface = model['Zsurf']
|
||||
params.work_function = 4.5
|
||||
params.inner_potential = model['V0']
|
||||
params.debye_temperature = 356.0
|
||||
|
||||
# multiple-scattering parameters (EDAC)
|
||||
params.emitters = []
|
||||
params.lmax = 15
|
||||
params.dmax = 5.0
|
||||
params.orders = [25]
|
||||
|
||||
return params
|
||||
~~~~~~
|
||||
|
||||
Most of the code is generic and can be copied to other projects.
|
||||
Only the experimental and material parameters need to be adjusted.
|
||||
Other properties can be changed as needed, see the documentation of pmsco.project.CalculatorParams for details.
|
||||
|
||||
\subsection sec_project_args Passing Runtime Parameters
|
||||
|
||||
Runtime parameters can be passed in one of three ways:
|
||||
|
||||
1. hard-coded in the project module,
|
||||
2. on the command line, or
|
||||
3. in a JSON run-file.
|
||||
|
||||
In the first way, all parameters are hard-coded in the `create_project` function of the project module.
|
||||
This is the simplest way for a quick start to a small project.
|
||||
However, as the project code grows, it's easy to loose track of revisions.
|
||||
In programming it is usually best practice to separate code and data.
|
||||
|
||||
The command line is another option for passing parameters to a process.
|
||||
It requires extra code for parsing the command line and is not very flexible.
|
||||
It is difficult to pass complex data types.
|
||||
Using the command line is no longer recommended and may become deprecated in a future version.
|
||||
|
||||
The recommended way of passing parameters is via run-files.
|
||||
Run-files allow for complete separation of code and data in a generic and flexible way.
|
||||
For example, run-files can be stored along with the results.
|
||||
However, the semantics of the run-file may look intimidating at first.
|
||||
|
||||
\subsubsection sec_project_args_runfile Setting Up a Run-File
|
||||
|
||||
The usage and format of run-files is described in detail under @ref pag_runfile.
|
||||
|
||||
\subsubsection sec_project_args_code Hard-Coded Arguments
|
||||
|
||||
Hard-coded parameters are usually set in a `create_module` function of the project module.
|
||||
At the end of the module, this function can easily be found.
|
||||
The function has two purposes: to create the project object and to set parameters.
|
||||
The parameters can be any attributes of the project class and its ancestors.
|
||||
See the parent pmsco.project.Project class for a list of common attributes.
|
||||
|
||||
The `create_project` function may look like in the following example.
|
||||
It must return a project object, i.e. an object instance of a class that inherits from pmsco.project.Project.
|
||||
|
||||
~~~~~~{.py}
|
||||
def create_project():
|
||||
project = MyProject()
|
||||
|
||||
project.optimizer_params["pop_size"] = 20
|
||||
|
||||
project_dir = Path(__file__).parent
|
||||
scan_file = Path(project_dir, "hbnni_e156_int.etpi")
|
||||
project.add_scan(filename=scan_file, emitter="N", initial_state="1s")
|
||||
|
||||
project.add_domain({"zrot": 0.0})
|
||||
project.add_domain({"zrot": 60.0})
|
||||
|
||||
return project
|
||||
~~~~~~
|
||||
|
||||
To have PMSCO call this function,
|
||||
pass the file path of the containing module as the first command line argument of PMSCO, cf. @ref pag_command.
|
||||
PMSCO calls this function in absence of a run-file.
|
||||
|
||||
|
||||
\subsubsection sec_project_args_cmd Command Line
|
||||
|
||||
Since it is not recommended to pass calculation parameters on the command line,
|
||||
this mechanism is not described in detail here.
|
||||
It is, however, still available.
|
||||
If you really need to use it,
|
||||
have a look at the code of the pmsco.pmsco.main function
|
||||
and how it calls the `create_project`, `parse_project_args` and `set_project_args` of the project module.
|
||||
|
||||
*/
|
333
docs/src/runfile.dox
Normal file
333
docs/src/runfile.dox
Normal file
@ -0,0 +1,333 @@
|
||||
/*! @page pag_runfile Run File
|
||||
\section sec_runfile Run File
|
||||
|
||||
This section describes the format of a run-file.
|
||||
Run-files are a new way of passing arguments to a PMSCO process which avoids cluttering up the command line.
|
||||
It is more flexible than the command line
|
||||
because run-files can assign a value to any property of the project object in an abstract way.
|
||||
Moreover, there is no necessity for the project code to parse the command line.
|
||||
|
||||
|
||||
\subsection sec_runfile_how How It Works
|
||||
|
||||
Run-files are text files in [JSON](https://en.wikipedia.org/wiki/JSON) format
|
||||
which shares most syntax elements with Python.
|
||||
JSON files contain nested dictionaries, lists, strings and numbers.
|
||||
|
||||
In PMSCO, run-files contain a dictionary of parameters for the project object
|
||||
which is the main container for calculation parameters, model objects and links to data files.
|
||||
An abstract run-file parser reads the run-file,
|
||||
constructs the specified project object based on the custom project class
|
||||
and assigns the attributes of the project object.
|
||||
It's important to note that the parser does not recognize specific data types or classes.
|
||||
All specific data handling is done by the instantiated objects, mainly the project class.
|
||||
|
||||
The parser can handle the following situations:
|
||||
|
||||
- Strings, numbers as well as dictionaries and lists of simple objects can be assigned directly to project attributes.
|
||||
- If the project class defines an attribute as a _property_,
|
||||
the class can execute custom code to import or validate data.
|
||||
- The parser can instantiate an object from a class in the namespace of the project module
|
||||
and assign its properties.
|
||||
|
||||
|
||||
\subsection sec_runfile_general General File Format
|
||||
|
||||
Run-files must adhere to the [JSON](https://en.wikipedia.org/wiki/JSON) format,
|
||||
which shares most syntax elements with Python.
|
||||
Specifically, a JSON file can declare dictionaries, lists and simple objects
|
||||
such as strings, numbers and `null`.
|
||||
As one extension to plain JSON, PMSCO ignores line comments starting with a hash `#` or double-slash `//`.
|
||||
This can be used to temporarily hide a parameter from the parser.
|
||||
|
||||
For example run-files, have a look at the twoatom demo project.
|
||||
|
||||
|
||||
\subsection sec_runfile_project Project Specification
|
||||
|
||||
|
||||
The following minimum run-file demonstrates how to specify the project at the top level:
|
||||
|
||||
~~~~~~{.py}
|
||||
{
|
||||
"project": {
|
||||
"__module__": "projects.twoatom.twoatom",
|
||||
"__class__": "TwoatomProject",
|
||||
"mode": "single",
|
||||
"output_file": "twoatom0001"
|
||||
}
|
||||
}
|
||||
~~~~~~
|
||||
|
||||
Here, the `project` keyword denotes the dictionary that is used to construct the project object.
|
||||
|
||||
Within the project dictionary, the `__module__` key selects the Python module file that contains the project code,
|
||||
and `__class__` refers to the name of the actual project class.
|
||||
Further dictionary items correspond to attributes of the project class.
|
||||
|
||||
The module name is the same as would be used in a Python import statement.
|
||||
It must be findable on the Python path.
|
||||
PMSCO ensures that the directory containing the `pmsco` and `projects` sub-directories is on the Python path.
|
||||
The class name must be in the namespace of the loaded module.
|
||||
|
||||
As PMSCO starts, it imports the specified module,
|
||||
constructs an object of the specified project class,
|
||||
and assigns any further items to project attributes.
|
||||
In the example above, `twoatom0001` is assigned to the `output_file` property.
|
||||
Any attributes not specified in the run-file will remain at their default values
|
||||
that were set byt the `__init__` method of the project class.
|
||||
|
||||
Note that parameter names must start with an alphabetic character, else they are ignored.
|
||||
This provides another way to temporarily ignore an item from the file besides line comments.
|
||||
|
||||
Also note that PMSCO does not spell-check parameter names.
|
||||
The parameter values are just written to the corresponding object attribute.
|
||||
If a name is misspelled, the value will be written under the wrong name and missed by the code eventually.
|
||||
|
||||
PMSCO carries out only some most important checks on the given parameter values.
|
||||
Incorrect values may lead to improper operation or exceptions later in the calculations.
|
||||
|
||||
|
||||
\subsection sec_runfile_common Common Arguments
|
||||
|
||||
The following table lists some important parameters controlling the calculations.
|
||||
They are declared in the pmsco.projects.Project class.
|
||||
|
||||
| Key | Values | Description |
|
||||
| --- | --- | --- |
|
||||
| mode | `single` (default), `grid`, `swarm`, `genetic`, `table`, `test`, `validate` | Operation mode. `validate` can be used to check the syntax of the run-file, the process exits before starting calculations. |
|
||||
| directories | dictionary | This dictionary lists common file paths used in the project. It contains keys such as `home`, `project`, `output` (see documentation of Project class in pmsco.project). Enclosed in curly braces, the keys can be used as placeholders in filenames. |
|
||||
| output_dir | path | Shortcut for directories["output"] |
|
||||
| data_dir | path | Shortcut for directories["data"] |
|
||||
| job_name | string, must be a valid file name | Base name for all produced output files. It is recommended to set a unique name for each calculation run. Do not include a path. The path can be set in _output_dir_. |
|
||||
| cluster_generator | dictionary | Class name and attributes of the cluster generator. See below. |
|
||||
| atomic_scattering_factory | string<br>Default: InternalAtomicCalculator from pmsco.calculators.calculator | Class name of the atomic scattering calculator. This name must be in the namespace of the project module. |
|
||||
| multiple_scattering_factory | string<br>Default: EdacCalculator from pmsco.calculators.edac | Class name of the multiple scattering calculator. This name must be in the namespace of the project module. |
|
||||
| model_space | dictionary | See @ref sec_runfile_space below. |
|
||||
| domains | list of dictionaries | See @ref sec_runfile_domains below. |
|
||||
| scans | list of dictionaries | See @ref sec_runfile_scans below. |
|
||||
| optimizer_params | dictionary | See @ref sec_runfile_optimizer below. |
|
||||
|
||||
The following table lists some common control parameters and metadata
|
||||
that affect the behaviour of the program but do not affect the calculation results.
|
||||
The job metadata is used to identify and describe a job in the results database if requested.
|
||||
|
||||
| Key | Values | Description |
|
||||
| --- | --- | --- |
|
||||
| job_tags | list of strings | User-specified job tags (metadata). |
|
||||
| description | string | Description of the calculation job (metadata) |
|
||||
| time_limit | decimal number<br>Default: 24. | Wall time limit in hours. The optimizers try to finish before the limit. This cannot be guaranteed, however. |
|
||||
| keep_files | list of file categories | Output file categories to keep after the calculation. Multiple values can be specified and must be separated by spaces. By default, cluster and model (simulated data) of a limited number of best models are kept. See @ref sec_runfile_files below. |
|
||||
| keep_best | integer number<br>Default: 10 | number of best models for which result files should be kept. |
|
||||
| keep_level | integer number<br>Default: 1 | numeric task level down to which files are kept. 1 = scan level, 2 = domain level, etc. |
|
||||
| log_level | DEBUG, INFO, WARNING, ERROR, CRITICAL | Minimum level of messages that should be added to the log. Empty string turns off logging. |
|
||||
| log_file | file system path<br>Default: job_name + ".log". | Name of the main log file. Under MPI, the rank of the process is inserted before the extension. The log name is created in the working directory. |
|
||||
|
||||
|
||||
\subsection sec_runfile_space Model Space
|
||||
|
||||
The `model_space` parameter is a dictionary of model parameters.
|
||||
The key is the name of the parameter as used by the cluster and input-formatting code,
|
||||
the value is a dictionary holding the `start`, `min`, `max`, `step` values to be used by the optimizer.
|
||||
|
||||
~~~~~~{.py}
|
||||
{
|
||||
"project": {
|
||||
// ...
|
||||
"model_space": {
|
||||
"dAB": {
|
||||
"start": 2.109,
|
||||
"min": 2.0,
|
||||
"max": 2.25,
|
||||
"step": 0.05
|
||||
},
|
||||
"pAB": {
|
||||
"start": 15.0,
|
||||
"min": 0.0,
|
||||
"max": 30.0,
|
||||
"step": 1.0
|
||||
},
|
||||
// ...
|
||||
}
|
||||
}
|
||||
}
|
||||
~~~~~~
|
||||
|
||||
|
||||
\subsection sec_runfile_domains Domains
|
||||
|
||||
Domains is a list of dictionaries.
|
||||
Each dictionary holds keys describing the domain to the cluster and input-formatting code.
|
||||
The meaning of these keys is up to the project.
|
||||
|
||||
~~~~~~{.py}
|
||||
{
|
||||
"project": {
|
||||
// ...
|
||||
"domains": [
|
||||
{"surface": "Te", "doping": null, "zrot": 0.0},
|
||||
{"surface": "Te", "doping": null, "zrot": 60.0}
|
||||
],
|
||||
}
|
||||
}
|
||||
~~~~~~
|
||||
|
||||
|
||||
\subsection sec_runfile_scans Experimental Scan Files
|
||||
|
||||
The pmsco.project.Scan objects used in the calculation cannot be instantiated from the run-file directly.
|
||||
Instead, the scans object is a list of scan creators/loaders which specify what to do to create a Scan object.
|
||||
The pmsco.project module defines three scan creators: ScanLoader, ScanCreator and ScanKey.
|
||||
The following code block shows an example of each of the three:
|
||||
|
||||
~~~~~~{.py}
|
||||
{
|
||||
"project": {
|
||||
// ...
|
||||
"scans": [
|
||||
{
|
||||
"__class__": "pmsco.project.ScanCreator",
|
||||
"filename": "twoatom_energy_alpha.etpai",
|
||||
"emitter": "N",
|
||||
"initial_state": "1s",
|
||||
"positions": {
|
||||
"e": "np.arange(10, 400, 5)",
|
||||
"t": "0",
|
||||
"p": "0",
|
||||
"a": "np.linspace(-30, 30, 31)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"__class__": "pmsco.project.ScanLoader",
|
||||
"filename": "{project}/twoatom_hemi_250e.etpi",
|
||||
"emitter": "N",
|
||||
"initial_state": "1s",
|
||||
"is_modf": false
|
||||
},
|
||||
{
|
||||
"__class__": "pmsco_project.ScanKey",
|
||||
"key": "Ge3s113tp"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
~~~~~~
|
||||
|
||||
The class name must be specified as it would be called in the custom project module.
|
||||
`pmsco.project` must, thus, be imported in the custom project module.
|
||||
|
||||
The *ScanCreator* object creates a scan using Numpy array constructors in `positions`.
|
||||
In the example above, a two-dimensional rectangular energy-alpha scan grid is created.
|
||||
The values of the positions axes are passed to Python's `eval` function
|
||||
and must return a one-dimensional Numpy `ndarray`.
|
||||
|
||||
The `emitter` and `initial_state` keys define the probed core level.
|
||||
|
||||
The *ScanLoader* object loads a data file, specified under `filename`.
|
||||
The filename can include a placeholder which is replaced by the corresponding item from Project.directories.
|
||||
Note that some of the directories (including `project`) are pre-set by PMSCO.
|
||||
It is recommended to add a `data` key under `directories` in the run-file
|
||||
if the data files are outside of the PMSCO directory tree.
|
||||
The `is_modf` key indicates whether the file contains a modulation function (`true`) or intensity (`false`).
|
||||
In the latter case, the modulation function is calculated after loading.
|
||||
|
||||
The *ScanKey* is the shortest scan specification in the run-file.
|
||||
It is a shortcut to a complete scan description in `scan_dict` dictionary in the project object.
|
||||
The `scan_dict` must be set up in the `__init__` method of the project class.
|
||||
The `key` item specifies which key of `scan_dict` should be used to create the Scan object.
|
||||
|
||||
Each item of `scan_dict` holds a dictionary
|
||||
that in turn holds the attributes for either a `ScanCreator` or a `ScanLoader`.
|
||||
If it contains a `positions` key, it represents a `ScanCreator`, else a `ScanLoader`.
|
||||
|
||||
|
||||
\subsection sec_runfile_optimizer Optimizer Parameters
|
||||
|
||||
The `optimizer_params` is a dictionary holding one or more of the following items.
|
||||
|
||||
| Key | Values | Description |
|
||||
| --- | --- | --- |
|
||||
| pop-size | integer<br>The default value is the greater of 4 or the number of parallel calculation processes. | Population size (number of particles) in swarm and genetic optimization mode. |
|
||||
| seed-file | file system path | Name of the population seed file. Population data of previous optimizations can be used to seed a new optimization. The file must have the same structure as the .pop or .dat files. See @ref pmsco.project.Project.seed_file. |
|
||||
| table-file | file system path | Name of the model table file in table scan mode. |
|
||||
|
||||
|
||||
\subsubsection sec_runfile_files File Categories
|
||||
|
||||
The following category names can be used with the `keep_files` option.
|
||||
Multiple names can be specified as a list.
|
||||
|
||||
| Category | Description | Default Action |
|
||||
| --- | --- | --- |
|
||||
| all | shortcut to include all categories | |
|
||||
| input | raw input files for calculator, including cluster and phase files in custom format | delete |
|
||||
| output | raw output files from calculator | delete |
|
||||
| atomic | atomic scattering and emission files in portable format | delete |
|
||||
| cluster | cluster files in portable XYZ format for report | keep |
|
||||
| debug | debug files | delete |
|
||||
| model | output files in ETPAI format: complete simulation (a_-1_-1_-1_-1) | keep |
|
||||
| scan | output files in ETPAI format: scan (a_b_-1_-1_-1) | keep |
|
||||
| domain | output files in ETPAI format: domain (a_b_c_-1_-1) | delete |
|
||||
| emitter | output files in ETPAI format: emitter (a_b_c_d_-1) | delete |
|
||||
| region | output files in ETPAI format: region (a_b_c_d_e) | delete |
|
||||
| report| final report of results | keep always |
|
||||
| population | final state of particle population | keep |
|
||||
| rfac | files related to models which give bad r-factors, see warning below | delete |
|
||||
|
||||
\note
|
||||
The `report` category is always kept and cannot be turned off.
|
||||
The `model` category is always kept in single calculation mode.
|
||||
|
||||
\warning
|
||||
If you want to specify `rfac` with the `keep_files` option,
|
||||
you have to add the file categories that you want to keep, e.g.,
|
||||
`"keep_files": ["rfac", "cluster", "model", "scan", "population"]`
|
||||
(to return the default categories for all calculated models).
|
||||
Do not specify `rfac` alone as this will effectively not return any file.
|
||||
|
||||
|
||||
\subsection sec_runfile_schedule Job Scheduling
|
||||
|
||||
To submit a job to a resource manager such as Slurm, add a `schedule` section to the run file
|
||||
(section ordering is not important):
|
||||
|
||||
~~~~~~{.py}
|
||||
{
|
||||
"schedule": {
|
||||
"__module__": "pmsco.schedule",
|
||||
"__class__": "PsiRaSchedule",
|
||||
"nodes": 1,
|
||||
"tasks_per_node": 24,
|
||||
"walltime": "2:00",
|
||||
"manual_run": true,
|
||||
"enabled": true
|
||||
},
|
||||
"project": {
|
||||
"__module__": "projects.twoatom.twoatom",
|
||||
"__class__": "TwoatomProject",
|
||||
"mode": "single",
|
||||
"output_file": "{home}/pmsco/twoatom0001",
|
||||
...
|
||||
}
|
||||
}
|
||||
~~~~~~
|
||||
|
||||
In the same way as for the project, the `__module__` and `__class__` keys select the class that handles the job submission.
|
||||
In this example, it is pmsco.schedule.PsiRaSchedule which is tied to the Ra cluster at PSI.
|
||||
For other machines, you can sub-class one of the classes in the pmsco.schedule module and include it in your project module.
|
||||
|
||||
The parameters of pmsco.schedule.PsiRaSchedule are as follows.
|
||||
Some of them are also used in other schedule classes or may have different types or ranges.
|
||||
|
||||
| Key | Values | Description |
|
||||
| --- | --- | --- |
|
||||
| nodes | integer: 1..2 | Number of compute nodes (main boards on Ra). The maximum number available for PEARL is 2. |
|
||||
| tasks_per_node | integer: 1..24, 32 | Number of tasks (CPU cores on Ra) per node. Jobs with less than 24 tasks are assigned to the shared partition. |
|
||||
| wall_time | string: [days-]hours[:minutes[:seconds]] <br> dict: with any combination of days, hours, minutes, seconds | Maximum run time (wall time) of the job. |
|
||||
| manual | bool | Manual submission (true) or automatic submission (false). Manual submission allows you to inspect the job files before submission. |
|
||||
| enabled | bool | Enable scheduling (true). Otherwise, the calculation is started directly (false).
|
||||
|
||||
@note The calculation job may run in a different working directory than the current one.
|
||||
It is important to specify absolute data and output directories in the run file (project/directories section).
|
||||
|
||||
*/
|
@ -2,21 +2,19 @@
|
||||
|
||||
skinparam componentStyle uml2
|
||||
|
||||
component "project" as project
|
||||
component "PMSCO" as pmsco
|
||||
component "project" as project
|
||||
component "scattering code\n(calculator)" as calculator
|
||||
|
||||
interface "command line" as cli
|
||||
interface "input files" as input
|
||||
interface "output files" as output
|
||||
interface "experimental data" as data
|
||||
interface "results" as results
|
||||
interface "output files" as output
|
||||
|
||||
cli --> pmsco
|
||||
data -> project
|
||||
project ..> pmsco
|
||||
pmsco ..> project
|
||||
pmsco ..> calculator
|
||||
cli --> project
|
||||
input -> calculator
|
||||
calculator -> output
|
||||
pmsco -> results
|
||||
|
||||
|
Reference in New Issue
Block a user