Compare commits
298 Commits
main
...
edabcb57f8
Author | SHA1 | Date | |
---|---|---|---|
edabcb57f8 | |||
76f8b194c4 | |||
d0c27d4414 | |||
8b30fe5815 | |||
974260f177 | |||
ea1011a9ea | |||
d59967fcc4 | |||
40f17818f2 | |||
6b43c95a8d | |||
109be49f31 | |||
14b738818c | |||
4f438f86fe | |||
68344964ac | |||
e5fdc6fa31 | |||
2cdd6925af | |||
bc1d65d469 | |||
85d4e39299 | |||
02e926e003 | |||
81be6b54c8 | |||
df0aca97df | |||
b8900cab67 | |||
7906387271 | |||
cbf468f5ac | |||
131704dcf2 | |||
33aabf45fa | |||
3e6f6bc46e | |||
1a843ee2c6 | |||
46ca26a983 | |||
36780d1a63 | |||
5943c60216 | |||
58386ca10b | |||
d89aebd861 | |||
e358d4ab64 | |||
5e3f75d66b | |||
a3a1b8506c | |||
1b2184d8e1 | |||
7ffcd90e7b | |||
d59e9d2c0b | |||
f07dfc0a81 | |||
de9c45c21f | |||
ba49b168c4 | |||
df4bd2b3ae | |||
368e4ce6d8 | |||
4d87169732 | |||
32c1bd0731 | |||
b13b4a4b57 | |||
5fa28ca917 | |||
d787ce6972 | |||
941bf0e784 | |||
68cf2f8d3e | |||
99cc6faf11 | |||
39eec2679e | |||
6319c36cfb | |||
c97ff1208e | |||
6899894ba1 | |||
fc139e0ae5 | |||
ef8cf9bb4e | |||
d79877cc9b | |||
fa9edcb115 | |||
1bc145530c | |||
2eec2155b5 | |||
b6a41c3378 | |||
13f070e4c9 | |||
22e201d063 | |||
80a7b650af | |||
f483e8d7fe | |||
fed9aa0a1b | |||
85d44d71d6 | |||
6dc89d34fe | |||
60f9278f10 | |||
3db2eeb041 | |||
2740330709 | |||
ff31c14f92 | |||
01767340f9 | |||
7708153a62 | |||
53fae27472 | |||
d10786a534 | |||
058b82ff88 | |||
2988d816af | |||
ac80b9a3ca | |||
ca23e363c9 | |||
feee8762ed | |||
cc7ad749ca | |||
13cb5476b1 | |||
05f16bf717 | |||
59e910a0f9 | |||
90ae93c124 | |||
870d60a789 | |||
c866ce8ee9 | |||
83b4a12e8b | |||
2ae8deaf64 | |||
29c6d86583 | |||
71b7091fe7 | |||
e01147d03a | |||
c0ec7e9b26 | |||
66e236bd5d | |||
a8ceffba8b | |||
8129949db9 | |||
d49e7ae9d5 | |||
13d65a7383 | |||
9a76b3a01a | |||
5cc409f882 | |||
51e8e6ae66 | |||
5b338c8212 | |||
507454ee91 | |||
1e5581769b | |||
81e3cc58ca | |||
800a5aca49 | |||
47f095bdc8 | |||
cdb0c612ad | |||
8ea111a0c7 | |||
ecb0425a20 | |||
2dbd255589 | |||
bd4ced00ba | |||
74a78b9534 | |||
c103268102 | |||
2920be624a | |||
bac6f5d773 | |||
d49f511dbd | |||
ea898ca3c5 | |||
4f0361c6c5 | |||
1b0c666132 | |||
14a1d032b9 | |||
96500063fb | |||
db6fcd03da | |||
c992662a1f | |||
02a7c4d834 | |||
8f9e2fc594 | |||
7ab615019a | |||
57d49a8db0 | |||
7304655ba5 | |||
32ba2a13cd | |||
3e143fb9c7 | |||
dd8fc1a906 | |||
90d43a46f8 | |||
0e354a0f14 | |||
de859102ab | |||
59861c3aa8 | |||
7b3b453db1 | |||
eec38f61d7 | |||
6d91c043f8 | |||
85b4909713 | |||
0f913e5002 | |||
4813359a4f | |||
4525c1ba04 | |||
3e1a46ebc7 | |||
926dc9208a | |||
1e0da55abc | |||
0a58e86bcb | |||
cfae414b0e | |||
b499ef2845 | |||
7ad4e686a7 | |||
33ad9acdd4 | |||
f20e02d62f | |||
17dd1f1864 | |||
a33e2b681f | |||
e76ed79f1e | |||
b22b0e94e4 | |||
9d917226af | |||
da6cca1632 | |||
d6dce9a392 | |||
ea13f2b71b | |||
3fc96a89d2 | |||
6a0ae327ae | |||
9d77f4815c | |||
ac23822b0e | |||
291a5cc1b6 | |||
52a2303054 | |||
fbc8c5ebc3 | |||
780b2302b3 | |||
99fb2de6d8 | |||
381d330ee6 | |||
1acbd2f758 | |||
d7f7223d31 | |||
79a593cbbb | |||
6c50625002 | |||
085ddda0b2 | |||
6ba5a1fa2e | |||
cbc560f7e0 | |||
586dcef621 | |||
8c93c2d97b | |||
3d8b46cf05 | |||
407b287c56 | |||
aa69faa995 | |||
2992f0a645 | |||
635b158dad | |||
57ee91df7d | |||
926a0f9e08 | |||
1287d8d31f | |||
fe2e9400fd | |||
b21ccbddf0 | |||
2903856f46 | |||
29be99d479 | |||
c8113dd0d2 | |||
dde01bae8b | |||
72fc77d755 | |||
0cc6cf0785 | |||
210379a2b4 | |||
ee377ef30a | |||
2113a17e40 | |||
60f4497711 | |||
2ea9269f75 | |||
86a811e6aa | |||
652f311c8d | |||
9f6533e53b | |||
bda5e87cc8 | |||
d2e53dca3f | |||
f7f91aa105 | |||
1c937222fd | |||
2a28d45b13 | |||
71e1fffd1a | |||
622661d4d3 | |||
9b70493fbf | |||
cf82678f9e | |||
ba5b8cb407 | |||
ed33e77380 | |||
b47ed2b3f4 | |||
9d28c3e1d6 | |||
1d241f663c | |||
05e580527e | |||
f6154a6777 | |||
e3dcb1110a | |||
1f7bf98c96 | |||
0cc4a1f215 | |||
236693c66c | |||
52d8399bdd | |||
afeb2241fc | |||
156027a934 | |||
f344a45c94 | |||
866d4aa4d9 | |||
25daf66b19 | |||
2b2874cfdc | |||
85f0e69c2c | |||
d0395fff5b | |||
16dda1e834 | |||
d15c8924b5 | |||
4f462578ef | |||
8fa587ef19 | |||
894936f107 | |||
3e21ecde7b | |||
b2e807788f | |||
1987d1610f | |||
c1e5bc9ddd | |||
8226f616dd | |||
fb1c627104 | |||
4fb5ed58b1 | |||
82754e26b0 | |||
0f505df45c | |||
e0d84d7822 | |||
54e4301e93 | |||
dfd14fd029 | |||
2fe2ac2efa | |||
eb89b59702 | |||
7bfd895eb5 | |||
33fec9bd59 | |||
3ea8d1ee40 | |||
4859d6d2e4 | |||
1e10aad835 | |||
e55086b0ad | |||
b4fba4b40c | |||
1012e17905 | |||
9ad77da9f8 | |||
e0f1b6b1ff | |||
34fb1be71f | |||
7633816c23 | |||
55d3a2c92b | |||
8315f8991b | |||
8cff0d6f74 | |||
e278cde961 | |||
1c39986503 | |||
292708e745 | |||
c4f12eaa84 | |||
9c311342d8 | |||
67a52ab00a | |||
993db5d783 | |||
e4b9487575 | |||
7c1c0bf33c | |||
83de18989f | |||
e8a13dba20 | |||
6d1b7545e5 | |||
67765d53f0 | |||
fee72bbda6 | |||
faef7db666 | |||
7441d63cd3 | |||
b344a4045f | |||
9552bfead2 | |||
94d717f9db | |||
5c4be34ac2 | |||
9e951c051c | |||
1975cde356 | |||
08a64eb354 | |||
1bfa19c63f | |||
70ccc9e56d | |||
657269a2ef | |||
63b102bae9 | |||
e8409389da | |||
d0c0014a7d | |||
08bf5bc27f |
3
.gitignore
vendored
3
.gitignore
vendored
@ -6,5 +6,4 @@ tmp_files/
|
||||
logs/
|
||||
envs/
|
||||
hidden.py
|
||||
output_files/
|
||||
.env
|
||||
output_files/
|
40
CHANGELOG.md
40
CHANGELOG.md
@ -1,40 +0,0 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file, which is a **cumulative record**.
|
||||
|
||||
Each version entry follows a consistent structure with the following optional sections:
|
||||
|
||||
- **Added** – New features
|
||||
- **Changed** – Modifications to existing functionality
|
||||
- **Deprecated** – Features marked for future removal
|
||||
- **Removed** – Features removed in this version
|
||||
- **Fixed** – Bug fixes
|
||||
- **Security** – Vulnerability fixes
|
||||
|
||||
Format based on [Keep a Changelog](https://keepachangelog.com) and [Semantic Versioning](https://semver.org).
|
||||
|
||||
|
||||
## [1.0.0] - 2025-06-26
|
||||
### Added
|
||||
- Multi-format, multi-instrument file reading system for FAIR data processing
|
||||
- Data integration pipeline with YAML-based configuration for cross-project adaptability
|
||||
- Metadata revision and normalization pipeline
|
||||
- HDF5 manager object for data extraction, handling, and visualization
|
||||
|
||||
## [1.1.0] - 2025-06-26
|
||||
### Added
|
||||
- Pre-transfer validation in data integration pipeline:
|
||||
- Disk space check: Verifies sufficient free space before copying large datasets
|
||||
- Duplicate detection: Skips transfer if destination files already exist
|
||||
- Dry-run optimization: Reuses file discovery results to avoid redundant directory walks
|
||||
- Include Licence
|
||||
|
||||
### Changed
|
||||
- Update README.md with new description + authors and funding sections
|
||||
|
||||
## [1.2.0] - 2025-06-29
|
||||
### Changed
|
||||
- Updated `README.md` to use Miniforge and `conda-forge` for environment setup.
|
||||
- Removed unreliable `setup_env.sh` shell-based installation instructions.
|
||||
- Added instructions to configure Conda to use only `conda-forge` with strict priority.
|
||||
- Included a notice to verify base environment origin via `conda info`.
|
661
LICENCE
661
LICENCE
@ -1,661 +0,0 @@
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) 2025 florez_j
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published
|
||||
by the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
109
README.md
109
README.md
@ -3,10 +3,10 @@
|
||||
|
||||
## Description
|
||||
|
||||
**DIMA** (Data Integration and Metadata Annotation) is a Python package for data curation and HDF5 conversion of multi-instrument scientific data. It was developed to support the Findable, Accessible, Interoperable, and Reusable (**FAIR**) data transformation efforts at the **Laboratory of Atmospheric Chemistry** at the PSI Center for Energy and Environmental Sciences.
|
||||
**DIMA** (Data Integration and Metadata Annotation) is a Python package developed to support the findable, accessible, interoperable, and reusable (FAIR) data transformation of multi-instrument data at the **Laboratory of Atmospheric Chemistry** as part of the project **IVDAV**: *Instant and Versatile Data Visualization During the Current Dark Period of the Life Cycle of FAIR Research*, funded by the [ETH-Domain ORD Program Measure 1](https://ethrat.ch/en/measure-1-calls-for-field-specific-actions/).
|
||||
|
||||
The **FAIR** data transformation involves cycles of data harmonization and metadata review. DIMA facilitates these processes by enabling the integration and annotation of multi-instrument data into the HDF5 format. This data may originate from diverse experimental campaigns, including **beamtimes**, **kinetic flow tube studies**, **smog chamber experiments**, and **field campaigns**.
|
||||
|
||||
The **FAIR** data transformation involves cycles of data harmonization and metadata review. DIMA facilitates these processes by enabling the integration and annotation of multi-instrument data in HDF5 format. This data may originate from diverse experimental campaigns, including **beamtimes**, **kinetic flowtube studies**, **smog chamber experiments**, and **field campaigns**.
|
||||
|
||||
|
||||
## Key features
|
||||
@ -30,12 +30,9 @@ For **Windows** users, the following are required:
|
||||
|
||||
1. **Git Bash**: Install [Git Bash](https://git-scm.com/downloads) to run shell scripts (`.sh` files).
|
||||
|
||||
2. **Miniforge**: Install [Miniforge](https://conda-forge.org/download/).
|
||||
|
||||
3. **PSI Network Access**
|
||||
|
||||
Ensure you have access to the PSI internal network and the necessary permissions to access the source directories. See [notebooks/demo_data_integration.ipynb](notebooks/demo_data_integration.ipynb) for details on how to set up data integration from network drives.
|
||||
2. **Conda**: Install [Anaconda](https://www.anaconda.com/products/individual) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html).
|
||||
|
||||
3. **PSI Network Access**: Ensure access to PSI’s network and access rights to source drives for retrieving campaign data from YAML files in the `input_files/` folder.
|
||||
|
||||
:bulb: **Tip**: Editing your system’s PATH variable ensures both Conda and Git are available in the terminal environment used by Git Bash.
|
||||
|
||||
@ -44,65 +41,56 @@ For **Windows** users, the following are required:
|
||||
|
||||
### Download DIMA
|
||||
|
||||
Open a **Git Bash** terminal (or a terminal of your choice).
|
||||
Open a **Git Bash** terminal.
|
||||
|
||||
Navigate to your `Gitea` folder, clone the repository, and move into the `dima` directory:
|
||||
Navigate to your `GitLab` folder, clone the repository, and navigate to the `dima` folder as follows:
|
||||
|
||||
```bash
|
||||
cd path/to/Gitea
|
||||
git clone --recurse-submodules https://gitea.psi.ch/5505-public/dima.git
|
||||
cd dima
|
||||
```bash
|
||||
cd path/to/GitLab
|
||||
git clone --recurse-submodules https://gitlab.psi.ch/5505/dima.git
|
||||
cd dima
|
||||
```
|
||||
|
||||
### Install Python Environment Using Miniforge and conda-forge
|
||||
### Install Python Interpreter
|
||||
|
||||
We recommend using Miniforge to manage your conda environments. Miniforge ensures compatibility with packages from the conda-forge channel.
|
||||
Open **Git Bash** terminal.
|
||||
|
||||
1. Make sure you have installed **Miniforge**.
|
||||
**Option 1**: Install a suitable conda environment `multiphase_chemistry_env` inside the repository `dima` as follows:
|
||||
|
||||
2. Open **Miniforge Prompt**
|
||||
> ⚠️ Ensure your Conda base environment is from Miniforge (not Anaconda). Run `conda info` and check for `miniforge` in the base path and `conda-forge` as the default channel.
|
||||
|
||||
|
||||
3. Create the Environment from `environment.yml`. Inside the **Miniforge Prompt** or a terminal with access to conda and run:
|
||||
```bash
|
||||
cd path/to/Gitea/dima
|
||||
cd path/to/GitLab/dima
|
||||
Bash setup_env.sh
|
||||
```
|
||||
|
||||
Open **Anaconda Prompt** or a terminal with access to conda.
|
||||
|
||||
**Option 2**: Install conda enviroment from YAML file as follows:
|
||||
```bash
|
||||
cd path/to/GitLab/dima
|
||||
conda env create --file environment.yml
|
||||
```
|
||||
3. Activate the Environment
|
||||
```bash
|
||||
conda activate dima_env
|
||||
```
|
||||
4. Remove the `defaults` channel (if present):
|
||||
```bash
|
||||
conda config --remove channels defaults
|
||||
```
|
||||
5. Add `conda-forge` as the highest-priority channel:
|
||||
```bash
|
||||
conda config --add channels conda-forge
|
||||
conda config --set channel_priority strict
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary> <b> Working with Jupyter Notebooks </b> </summary>
|
||||
|
||||
### Working with Jupyter Notebooks
|
||||
We now make the previously installed Python environment `dima_env` selectable as a kernel in Jupyter's interface.
|
||||
We now make the previously installed Python environment `multiphase_chemistry_env` selectable as a kernel in Jupyter's interface.
|
||||
|
||||
1. Open an Anaconda Prompt, check if the environment exists, and activate it:
|
||||
```
|
||||
conda env list
|
||||
conda activate dima_env
|
||||
conda activate multiphase_chemistry_env
|
||||
```
|
||||
2. Register the environment in Jupyter:
|
||||
```
|
||||
python -m ipykernel install --user --name dima_env --display-name "Python (dima_env)"
|
||||
python -m ipykernel install --user --name multiphase_chemistry_env --display-name "Python (multiphase_chemistry_env)"
|
||||
```
|
||||
3. Start a Jupyter Notebook by running the command:
|
||||
```
|
||||
jupyter notebook
|
||||
```
|
||||
and select the `dima_env` environment from the kernel options.
|
||||
|
||||
and select the `multiphase_chemistry_env` environment from the kernel options.
|
||||
|
||||
</details>
|
||||
|
||||
## Repository Structure and Software arquitecture
|
||||
|
||||
@ -218,7 +206,7 @@ This section is in progress!
|
||||
| actris_level | - | Indicates the processing level of the data within the ACTRIS (Aerosol, Clouds and Trace Gases Research Infrastructure) framework. |
|
||||
| dataset_startdate | - | Denotes the start datetime of the dataset collection. |
|
||||
| dataset_enddate | - | Denotes the end datetime of the dataset collection. |
|
||||
| processing_script | - | Denotes the name of the file used to process an initial version (e.g, original version) of the dataset into a processed dataset. |
|
||||
| processing_file | - | Denotes the name of the file used to process an initial version (e.g, original version) of the dataset into a processed dataset. |
|
||||
| processing_date | - | The date when the data processing was completed. | |
|
||||
## Adaptability to Experimental Campaign Needs
|
||||
|
||||
@ -257,28 +245,31 @@ relative_humidity:
|
||||
```
|
||||
</details>
|
||||
|
||||
## Authors
|
||||
# Editing this README
|
||||
|
||||
This toolkit was developed by:
|
||||
When you're ready to make this README your own, just edit this file and use the handy template below (or feel free to structure it however you want - this is just a starting point!). Thank you to [makeareadme.com](https://www.makeareadme.com/) for this template.
|
||||
|
||||
- Juan F. Flórez-Ospina
|
||||
- Lucia Iezzi
|
||||
- Natasha Garner
|
||||
- Thorsten Bartels-Rausch
|
||||
## Suggestions for a good README
|
||||
Every project is different, so consider which of these sections apply to yours. The sections used in the template are suggestions for most open source projects. Also keep in mind that while a README can be too long and detailed, too long is better than too short. If you think your README is too long, consider utilizing another form of documentation rather than cutting out information.
|
||||
|
||||
All authors are affiliated with the **PSI Center for Energy and Environmental Sciences**, 5232 Villigen PSI, Switzerland.
|
||||
## Badges
|
||||
On some READMEs, you may see small images that convey metadata, such as whether or not all the tests are passing for the project. You can use Shields to add some to your README. Many services also have instructions for adding a badge.
|
||||
|
||||
- For general correspondence: [thorsten.bartels-rausch@psi.ch](mailto:thorsten.bartels-rausch@psi.ch)
|
||||
- For implementation-specific questions: [juan.florez-ospina@psi.ch](mailto:juan.florez-ospina@psi.ch), [juanflo16@gmail.com](mailto:juanflo16@gmail.com)
|
||||
## Visuals
|
||||
Depending on what you are making, it can be a good idea to include screenshots or even a video (you'll frequently see GIFs rather than actual videos). Tools like ttygif can help, but check out Asciinema for a more sophisticated method.
|
||||
|
||||
## Installation
|
||||
Within a particular ecosystem, there may be a common way of installing things, such as using Yarn, NuGet, or Homebrew. However, consider the possibility that whoever is reading your README is a novice and would like more guidance. Listing specific steps helps remove ambiguity and gets people to using your project as quickly as possible. If it only runs in a specific context like a particular programming language version or operating system or has dependencies that have to be installed manually, also add a Requirements subsection.
|
||||
|
||||
## Usage
|
||||
Use examples liberally, and show the expected output if you can. It's helpful to have inline the smallest example of usage that you can demonstrate, while providing links to more sophisticated examples if they are too long to reasonably include in the README.
|
||||
|
||||
## Support
|
||||
Tell people where they can go to for help. It can be any combination of an issue tracker, a chat room, an email address, etc.
|
||||
|
||||
## Roadmap
|
||||
If you have ideas for releases in the future, it is a good idea to list them in the README.
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Funding
|
||||
|
||||
This work was funded by the **ETH-Domain Open Research Data (ORD) Program – Measure 1**.
|
||||
|
||||
It is part of the project **IVDAV**: *Instant and Versatile Data Visualization During the Current Dark Period of the Life Cycle of FAIR Research*, funded by the [ETH-Domain ORD Program Measure 1](https://ethrat.ch/en/measure-1-calls-for-field-specific-actions/), which is described in more detail at the [ORD Program project portal](https://open-research-data-portal.ch/projects/instant-and-versatile-data-visualization-during-the-current-dark-period-of-the-life-cycle-of-fair-research/).
|
||||
|
||||
|
||||
---
|
@ -1,6 +1,8 @@
|
||||
name: dima_env
|
||||
name: pyenv5505
|
||||
#prefix: ./envs/pyenv5505 # Custom output folder
|
||||
channels:
|
||||
- conda-forge
|
||||
- defaults
|
||||
dependencies:
|
||||
- python=3.11
|
||||
- jupyter
|
||||
|
@ -1,69 +1,69 @@
|
||||
# Path to the directory where raw data is stored
|
||||
input_file_directory: '${NETWORK_MOUNT}/Data'
|
||||
|
||||
# Path to directory where raw data is copied and converted to HDF5 format for local analysis.
|
||||
output_file_directory: '../data/'
|
||||
|
||||
# Project metadata for data lineage and provenance
|
||||
project: 'Photoenhanced uptake of NO2 driven by Fe(III)-carboxylate'
|
||||
contact: 'LuciaI'
|
||||
group_id: '5505'
|
||||
|
||||
# Experiment description
|
||||
experiment: 'kinetic_flowtube_study' # 'beamtime', 'smog_chamber_study'
|
||||
dataset_startdate:
|
||||
dataset_enddate:
|
||||
actris_level: '0'
|
||||
|
||||
# Instrument folders containing raw data from the campaign
|
||||
instrument_datafolder:
|
||||
- 'Lopap' # Example instrument folder
|
||||
- 'Humidity_Sensors'
|
||||
- 'ICAD/HONO'
|
||||
- 'ICAD/NO2'
|
||||
- 'T200_NOx'
|
||||
- 'T360U_CO2'
|
||||
|
||||
# Data integration mode for HDF5 data ingestion
|
||||
integration_mode: 'collection' # Options: 'single_experiment', 'collection'
|
||||
|
||||
# Datetime markers for individual experiments
|
||||
# Use the format YYYY-MM-DD HH-MM-SS
|
||||
datetime_steps:
|
||||
- '2022-02-11 00-00-00'
|
||||
- '2022-03-14 00-00-00'
|
||||
- '2022-03-18 00-00-00'
|
||||
- '2022-03-25 00-00-00'
|
||||
- '2022-03-29 00-00-00'
|
||||
- '2022-04-11 00-00-00'
|
||||
- '2022-04-29 00-00-00'
|
||||
- '2022-05-16 00-00-00'
|
||||
- '2022-05-30 00-00-00'
|
||||
- '2022-06-10 00-00-00'
|
||||
- '2022-06-14 00-00-00'
|
||||
- '2022-06-15 00-00-00'
|
||||
- '2022-07-15 00-00-00'
|
||||
- '2022-11-18 00-00-00'
|
||||
- '2022-11-22 00-00-00'
|
||||
- '2022-12-01 00-00-00'
|
||||
- '2022-12-02 00-00-00'
|
||||
- '2023-05-05 00-00-00'
|
||||
- '2023-05-09 00-00-00'
|
||||
- '2023-05-11 00-00-00'
|
||||
- '2023-05-16 00-00-00'
|
||||
- '2023-05-23 00-00-00'
|
||||
- '2023-05-25 00-00-00'
|
||||
- '2023-05-30 00-00-00'
|
||||
- '2023-05-31 00-00-00'
|
||||
- '2023-06-01 00-00-00'
|
||||
- '2023-06-06 00-00-00'
|
||||
- '2023-06-09 00-00-00'
|
||||
- '2023-06-13 00-00-00'
|
||||
- '2023-06-16 00-00-00'
|
||||
- '2023-06-20 00-00-00'
|
||||
- '2023-06-22 00-00-00'
|
||||
- '2023-06-27 00-00-00'
|
||||
- '2023-06-28 00-00-00'
|
||||
- '2023-06-29 00-00-00'
|
||||
|
||||
# Path to the directory where raw data is stored
|
||||
input_file_directory: '//fs101/5505/Data'
|
||||
|
||||
# Path to directory where raw data is copied and converted to HDF5 format for local analysis.
|
||||
output_file_directory: '../output_files/'
|
||||
|
||||
# Project metadata for data lineage and provenance
|
||||
project: 'Photoenhanced uptake of NO2 driven by Fe(III)-carboxylate'
|
||||
contact: 'LuciaI'
|
||||
group_id: '5505'
|
||||
|
||||
# Experiment description
|
||||
experiment: 'kinetic_flowtube_study' # 'beamtime', 'smog_chamber_study'
|
||||
dataset_startdate:
|
||||
dataset_enddate:
|
||||
actris_level: '0'
|
||||
|
||||
# Instrument folders containing raw data from the campaign
|
||||
instrument_datafolder:
|
||||
- 'Lopap' # Example instrument folder
|
||||
- 'Humidity_Sensors'
|
||||
- 'ICAD/HONO'
|
||||
- 'ICAD/NO2'
|
||||
- 'T200_NOx'
|
||||
- 'T360U_CO2'
|
||||
|
||||
# Data integration mode for HDF5 data ingestion
|
||||
integration_mode: 'collection' # Options: 'single_experiment', 'collection'
|
||||
|
||||
# Datetime markers for individual experiments
|
||||
# Use the format YYYY-MM-DD HH-MM-SS
|
||||
datetime_steps:
|
||||
- '2022-02-11 00-00-00'
|
||||
- '2022-03-14 00-00-00'
|
||||
- '2022-03-18 00-00-00'
|
||||
- '2022-03-25 00-00-00'
|
||||
- '2022-03-29 00-00-00'
|
||||
- '2022-04-11 00-00-00'
|
||||
- '2022-04-29 00-00-00'
|
||||
- '2022-05-16 00-00-00'
|
||||
- '2022-05-30 00-00-00'
|
||||
- '2022-06-10 00-00-00'
|
||||
- '2022-06-14 00-00-00'
|
||||
- '2022-06-15 00-00-00'
|
||||
- '2022-07-15 00-00-00'
|
||||
- '2022-11-18 00-00-00'
|
||||
- '2022-11-22 00-00-00'
|
||||
- '2022-12-01 00-00-00'
|
||||
- '2022-12-02 00-00-00'
|
||||
- '2023-05-05 00-00-00'
|
||||
- '2023-05-09 00-00-00'
|
||||
- '2023-05-11 00-00-00'
|
||||
- '2023-05-16 00-00-00'
|
||||
- '2023-05-23 00-00-00'
|
||||
- '2023-05-25 00-00-00'
|
||||
- '2023-05-30 00-00-00'
|
||||
- '2023-05-31 00-00-00'
|
||||
- '2023-06-01 00-00-00'
|
||||
- '2023-06-06 00-00-00'
|
||||
- '2023-06-09 00-00-00'
|
||||
- '2023-06-13 00-00-00'
|
||||
- '2023-06-16 00-00-00'
|
||||
- '2023-06-20 00-00-00'
|
||||
- '2023-06-22 00-00-00'
|
||||
- '2023-06-27 00-00-00'
|
||||
- '2023-06-28 00-00-00'
|
||||
- '2023-06-29 00-00-00'
|
||||
|
||||
|
@ -1,32 +1,32 @@
|
||||
# Path to the directory where raw data is stored
|
||||
input_file_directory: '${NETWORK_MOUNT}/Chamber Data/L0 -raw data'
|
||||
|
||||
# Path to directory where raw data is copied and converted to HDF5 format for local analysis.
|
||||
output_file_directory: '../data/'
|
||||
|
||||
# Project metadata for data lineage and provenance
|
||||
project: 'Fe SOA project'
|
||||
contact: 'NatashaG'
|
||||
group_id: '5505'
|
||||
|
||||
# Experiment description
|
||||
experiment: 'smog_chamber_study' # beamtime, smog_chamber, lab_experiment
|
||||
dataset_startdate:
|
||||
dataset_enddate:
|
||||
actris_level: '0'
|
||||
|
||||
# Instrument folders containing raw data from the campaign
|
||||
instrument_datafolder:
|
||||
- 'gas' # Example instrument folder
|
||||
- 'smps'
|
||||
- 'htof'
|
||||
- 'ptr'
|
||||
- 'ams'
|
||||
|
||||
# Data integration mode for HDF5 data ingestion
|
||||
integration_mode: 'single_experiment' # Options: 'single_experiment', 'collection'
|
||||
|
||||
# Datetime markers for individual experiments
|
||||
# Use the format YYYY-MM-DD HH-MM-SS
|
||||
datetime_steps:
|
||||
# Path to the directory where raw data is stored
|
||||
input_file_directory: '//fs03/Iron_Sulphate'
|
||||
|
||||
# Path to directory where raw data is copied and converted to HDF5 format for local analysis.
|
||||
output_file_directory: 'output_files/'
|
||||
|
||||
# Project metadata for data lineage and provenance
|
||||
project: 'Fe SOA project'
|
||||
contact: 'NatashaG'
|
||||
group_id: '5505'
|
||||
|
||||
# Experiment description
|
||||
experiment: 'smog_chamber_study' # beamtime, smog_chamber, lab_experiment
|
||||
dataset_startdate:
|
||||
dataset_enddate:
|
||||
actris_level: '0'
|
||||
|
||||
# Instrument folders containing raw data from the campaign
|
||||
instrument_datafolder:
|
||||
- 'gas' # Example instrument folder
|
||||
- 'smps'
|
||||
- 'htof'
|
||||
- 'ptr'
|
||||
- 'ams'
|
||||
|
||||
# Data integration mode for HDF5 data ingestion
|
||||
integration_mode: 'single_experiment' # Options: 'single_experiment', 'collection'
|
||||
|
||||
# Datetime markers for individual experiments
|
||||
# Use the format YYYY-MM-DD HH-MM-SS
|
||||
datetime_steps:
|
||||
- '2022-07-26 00-00-00'
|
@ -1,35 +1,35 @@
|
||||
# Path to the directory where raw data is stored
|
||||
input_file_directory: '${NETWORK_MOUNT}/People/Juan/TypicalBeamTime'
|
||||
|
||||
# Path to directory where raw data is copied and converted to HDF5 format for local analysis.
|
||||
output_file_directory: '../data/'
|
||||
|
||||
# Project metadata for data lineage and provenance
|
||||
project: 'Beamtime May 2024, Ice Napp'
|
||||
contact: 'ThorstenBR'
|
||||
group_id: '5505'
|
||||
|
||||
# Experiment description
|
||||
experiment: 'beamtime' # beamtime, smog_chamber, lab_experiment
|
||||
dataset_startdate: '2023-09-22'
|
||||
dataset_enddate: '2023-09-25'
|
||||
actris_level: '0'
|
||||
|
||||
institution : "PSI"
|
||||
filename_format : "institution,experiment,contact"
|
||||
|
||||
# Instrument folders containing raw data from the campaign
|
||||
instrument_datafolder:
|
||||
- 'NEXAFS'
|
||||
- 'Notes'
|
||||
- 'Pressure'
|
||||
- 'Photos'
|
||||
- 'RGA'
|
||||
- 'SES'
|
||||
|
||||
# Data integration mode for HDF5 data ingestion
|
||||
integration_mode: 'collection' # Options: 'single_experiment', 'collection'
|
||||
|
||||
# Datetime markers for individual experiments
|
||||
# Use the format YYYY-MM-DD HH-MM-SS
|
||||
# Path to the directory where raw data is stored
|
||||
input_file_directory: '//fs101/5505/People/Juan/TypicalBeamTime'
|
||||
|
||||
# Path to directory where raw data is copied and converted to HDF5 format for local analysis.
|
||||
output_file_directory: 'output_files/'
|
||||
|
||||
# Project metadata for data lineage and provenance
|
||||
project: 'Beamtime May 2024, Ice Napp'
|
||||
contact: 'ThorstenBR'
|
||||
group_id: '5505'
|
||||
|
||||
# Experiment description
|
||||
experiment: 'beamtime' # beamtime, smog_chamber, lab_experiment
|
||||
dataset_startdate: '2023-09-22'
|
||||
dataset_enddate: '2023-09-25'
|
||||
actris_level: '0'
|
||||
|
||||
institution : "PSI"
|
||||
filename_format : "institution,experiment,contact"
|
||||
|
||||
# Instrument folders containing raw data from the campaign
|
||||
instrument_datafolder:
|
||||
- 'NEXAFS'
|
||||
- 'Notes'
|
||||
- 'Pressure'
|
||||
- 'Photos'
|
||||
- 'RGA'
|
||||
- 'SES'
|
||||
|
||||
# Data integration mode for HDF5 data ingestion
|
||||
integration_mode: 'collection' # Options: 'single_experiment', 'collection'
|
||||
|
||||
# Datetime markers for individual experiments
|
||||
# Use the format YYYY-MM-DD HH-MM-SS
|
||||
datetime_steps: []
|
@ -1,42 +0,0 @@
|
||||
table_header:
|
||||
w_CenterTime:
|
||||
description: time between start and stop of the measurement
|
||||
units: YYYY/MM/DD HH:MM:SS
|
||||
rename_as: center_time
|
||||
w_StartTime:
|
||||
description: Start time of the measurement
|
||||
units: YYYY/MM/DD HH:MM:SS
|
||||
rename_as: start_time
|
||||
w_StopTime:
|
||||
description: Stop time of the measurement
|
||||
units: YYYY/MM/DD HH:MM:SS
|
||||
rename_as: stop_time
|
||||
w_I2_molec_cm3:
|
||||
description: I2 concentration
|
||||
units: cm^-1
|
||||
rename_as: i2_concentration
|
||||
w_I2_SlCol:
|
||||
description: I2 concentration sl #?
|
||||
units: ppb #?
|
||||
rename_as: i2_sl
|
||||
w_I2_SlErr:
|
||||
description: Uncertainty in I2 concentration sl #?
|
||||
units: ppb #?
|
||||
rename_as: i2_sl_uncertainty
|
||||
w_I2_VMR:
|
||||
description: I2 concentration vmr #?
|
||||
units: ppb #?
|
||||
rename_as: i2_vmr
|
||||
w_I2_VMRErr:
|
||||
description: Uncertainty in I2 concentration vmr
|
||||
units: ppb #?
|
||||
rename_as: i2_vmr_uncertainty
|
||||
w_Rho:
|
||||
description: Rho #?
|
||||
units: ppb #?
|
||||
rename_as: rho
|
||||
w_RMS:
|
||||
description: RMS #?
|
||||
units: ppb #?
|
||||
rename_as: rms
|
||||
|
@ -16,9 +16,8 @@ from instruments.readers.g5505_text_reader import read_txt_files_as_dict
|
||||
from instruments.readers.acsm_tofware_reader import read_acsm_files_as_dict
|
||||
from instruments.readers.acsm_flag_reader import read_jsonflag_as_dict
|
||||
from instruments.readers.nasa_ames_reader import read_nasa_ames_as_dict
|
||||
from instruments.readers.structured_file_reader import read_structured_file_as_dict
|
||||
|
||||
file_extensions = ['.ibw','.txt','.dat','.h5','.TXT','.csv','.pkl','.json','.yaml','yml','.nas']
|
||||
file_extensions = ['.ibw','.txt','.dat','.h5','.TXT','.csv','.pkl','.json','.yaml','.nas']
|
||||
|
||||
# Define the instruments directory (modify this as needed or set to None)
|
||||
default_instruments_dir = None # or provide an absolute path
|
||||
@ -28,16 +27,11 @@ file_readers = {
|
||||
'txt': lambda a1: read_txt_files_as_dict(a1, instruments_dir=default_instruments_dir, work_with_copy=False),
|
||||
'dat': lambda a1: read_txt_files_as_dict(a1, instruments_dir=default_instruments_dir, work_with_copy=False),
|
||||
'csv': lambda a1: read_txt_files_as_dict(a1, instruments_dir=default_instruments_dir, work_with_copy=False),
|
||||
'yaml': lambda a1: read_structured_file_as_dict(a1),
|
||||
'yml': lambda a1: read_structured_file_as_dict(a1),
|
||||
'json': lambda a1: read_structured_file_as_dict(a1),
|
||||
'ACSM_TOFWARE_txt' : lambda x: read_acsm_files_as_dict(x, instruments_dir=default_instruments_dir, work_with_copy=False),
|
||||
'ACSM_TOFWARE_csv' : lambda x: read_acsm_files_as_dict(x, instruments_dir=default_instruments_dir, work_with_copy=False),
|
||||
'ACSM_TOFWARE_flags_json' : lambda x: read_jsonflag_as_dict(x),
|
||||
'ACSM_TOFWARE_nas' : lambda x: read_nasa_ames_as_dict(x)}
|
||||
|
||||
file_readers.update({'CEDOAS_txt' : lambda x: read_txt_files_as_dict(x, instruments_dir=default_instruments_dir, work_with_copy=False)})
|
||||
|
||||
REGISTRY_FILE = "registry.yaml" #os.path.join(os.path.dirname(__file__), "registry.yaml")
|
||||
|
||||
def load_registry():
|
||||
@ -58,7 +52,7 @@ def find_reader(instrument_folder, file_extension):
|
||||
registry = load_registry()
|
||||
|
||||
for entry in registry:
|
||||
if entry["instrumentFolderName"] == instrument_folder and (file_extension in entry["fileExtension"].split(sep=',')):
|
||||
if entry["instrumentFolderName"] == instrument_folder and entry["fileExtension"] == file_extension:
|
||||
return entry["fileReaderPath"], entry["InstrumentDictionaryPath"]
|
||||
|
||||
return None, None # Not found
|
||||
|
@ -81,18 +81,32 @@ gas:
|
||||
datetime_format: '%Y.%m.%d %H:%M:%S'
|
||||
link_to_description: 'dictionaries/gas.yaml'
|
||||
|
||||
CEDOAS: #CE-DOAS/I2:
|
||||
formats:
|
||||
- table_header: 'w_CenterTime w_StartTime w_StopTime w_I2_molec_cm3 w_I2_SlCol w_I2_SlErr w_I2_VMR w_I2_VMRErr w_Rho w_RMS'
|
||||
separator: '\t'
|
||||
file_encoding: 'utf-8'
|
||||
timestamp: ['w_CenterTime']
|
||||
datetime_format: '%Y/%m/%d %H:%M:%S'
|
||||
ACSM_TOFWARE:
|
||||
table_header:
|
||||
#txt:
|
||||
- 't_base VaporizerTemp_C HeaterBias_V FlowRefWave FlowRate_mb FlowRate_ccs FilamentEmission_mA Detector_V AnalogInput06_V ABRefWave ABsamp ABCorrFact'
|
||||
- 't_start_Buf,Chl_11000,NH4_11000,SO4_11000,NO3_11000,Org_11000,SO4_48_11000,SO4_62_11000,SO4_82_11000,SO4_81_11000,SO4_98_11000,NO3_30_11000,Org_60_11000,Org_43_11000,Org_44_11000'
|
||||
#csv:
|
||||
- "X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 X32 X33 X34 X35 X36 X37 X38 X39 X40 X41 X42 X43 X44 X45 X46 X47 X48 X49 X50 X51 X52 X53 X54 X55 X56 X57 X58 X59 X60 X61 X62 X63 X64 X65 X66 X67 X68 X69 X70 X71 X72 X73 X74 X75 X76 X77 X78 X79 X80 X81 X82 X83 X84 X85 X86 X87 X88 X89 X90 X91 X92 X93 X94 X95 X96 X97 X98 X99 X100 X101 X102 X103 X104 X105 X106 X107 X108 X109 X110 X111 X112 X113 X114 X115 X116 X117 X118 X119 X120 X121 X122 X123 X124 X125 X126 X127 X128 X129 X130 X131 X132 X133 X134 X135 X136 X137 X138 X139 X140 X141 X142 X143 X144 X145 X146 X147 X148 X149 X150 X151 X152 X153 X154 X155 X156 X157 X158 X159 X160 X161 X162 X163 X164 X165 X166 X167 X168 X169 X170 X171 X172 X173 X174 X175 X176 X177 X178 X179 X180 X181 X182 X183 X184 X185 X186 X187 X188 X189 X190 X191 X192 X193 X194 X195 X196 X197 X198 X199 X200 X201 X202 X203 X204 X205 X206 X207 X208 X209 X210 X211 X212 X213 X214 X215 X216 X217 X218 X219"
|
||||
- "X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 X31 X32 X33 X34 X35 X36 X37 X38 X39 X40 X41 X42 X43 X44 X45 X46 X47 X48 X49 X50 X51 X52 X53 X54 X55 X56 X57 X58 X59 X60 X61 X62 X63 X64 X65 X66 X67 X68 X69 X70 X71 X72 X73 X74 X75 X76 X77 X78 X79 X80 X81 X82 X83 X84 X85 X86 X87 X88 X89 X90 X91 X92 X93 X94 X95 X96 X97 X98 X99 X100 X101 X102 X103 X104 X105 X106 X107 X108 X109 X110 X111 X112 X113 X114 X115 X116 X117 X118 X119 X120 X121 X122 X123 X124 X125 X126 X127 X128 X129 X130 X131 X132 X133 X134 X135 X136 X137 X138 X139 X140 X141 X142 X143 X144 X145 X146 X147 X148 X149 X150 X151 X152 X153 X154 X155 X156 X157 X158 X159 X160 X161 X162 X163 X164 X165 X166 X167 X168 X169 X170 X171 X172 X173 X174 X175 X176 X177 X178 X179 X180 X181 X182 X183 X184 X185 X186 X187 X188 X189 X190 X191 X192 X193 X194 X195 X196 X197 X198 X199 X200 X201 X202 X203 X204 X205 X206 X207 X208 X209 X210 X211 X212 X213 X214 X215 X216 X217 X218 X219"
|
||||
- 'MSS_base'
|
||||
- 'tseries'
|
||||
separator:
|
||||
#txt:
|
||||
- "\t"
|
||||
- ","
|
||||
#csv:
|
||||
- "\t"
|
||||
- "\t"
|
||||
- "None"
|
||||
- "None"
|
||||
file_encoding:
|
||||
#txt:
|
||||
- "utf-8"
|
||||
- "utf-8"
|
||||
#csv:
|
||||
- "utf-8"
|
||||
- "utf-8"
|
||||
- "utf-8"
|
||||
- "utf-8"
|
||||
|
||||
- table_header: 'TimeStamp,Seconds_Midnight,Year,Month,Day,Hour,Minute,Second,HK0,HK1,HK2,HK3,HK4,HK5,HK6,HK7,HK8,HK9,HK10,HK11,HK12,HK13,HK14,HK15,RTD0_OO1,RTD1_LED,RTD2,RTD3_CBox,RTD4_Gas1,RTD5,RTD6,RTD7,Temp0,Temp1,Temp2,Temp3,DutyCycle0,DutyCycle1,DutyCycle2,DutyCycle3,Relay4,Relay5,Shutter0,Shutter1,Diode0Threshold,Diode0Hysteresis,Diode1Threshold,Diode1Hysteresis,SWTargetPosition,SWCurrentPosition,ELTargetPosition'
|
||||
separator: ','
|
||||
file_encoding: 'utf-8'
|
||||
#timestamp: []
|
||||
#datetime_format:
|
||||
|
||||
link_to_description: 'dictionaries/CEDOAS.yaml'
|
@ -19,7 +19,16 @@ import yaml
|
||||
import h5py
|
||||
import argparse
|
||||
import logging
|
||||
import warnings
|
||||
# Import project modules
|
||||
#root_dir = os.path.abspath(os.curdir)
|
||||
#sys.path.append(root_dir)
|
||||
|
||||
|
||||
#try:
|
||||
# from dima.utils import g5505_utils as utils
|
||||
#except ModuleNotFoundError:
|
||||
# import utils.g5505_utils as utils
|
||||
# import src.hdf5_ops as hdf5_ops
|
||||
import utils.g5505_utils as utils
|
||||
|
||||
|
||||
@ -32,19 +41,56 @@ def read_txt_files_as_dict(filename: str, instruments_dir: str = None, work_with
|
||||
module_dir = os.path.dirname(__file__)
|
||||
instruments_dir = os.path.join(module_dir, '..')
|
||||
|
||||
#(config_dict,
|
||||
#file_encoding,
|
||||
#separator,
|
||||
#table_header,
|
||||
#timestamp_variables,
|
||||
#datetime_format,
|
||||
#description_dict) = load_file_reader_parameters(filename, instruments_dir)
|
||||
# Normalize the path (resolves any '..' in the path)
|
||||
instrument_configs_path = os.path.abspath(os.path.join(instruments_dir,'readers','config_text_reader.yaml'))
|
||||
|
||||
format_variants, description_dict = load_file_reader_parameters(filename, instruments_dir)
|
||||
print(instrument_configs_path)
|
||||
|
||||
with open(instrument_configs_path,'r') as stream:
|
||||
try:
|
||||
config_dict = yaml.load(stream, Loader=yaml.FullLoader)
|
||||
except yaml.YAMLError as exc:
|
||||
print(exc)
|
||||
# Verify if file can be read by available intrument configurations.
|
||||
#if not any(key in filename.replace(os.sep,'/') for key in config_dict.keys()):
|
||||
# return {}
|
||||
|
||||
|
||||
#TODO: this may be prone to error if assumed folder structure is non compliant
|
||||
file_encoding = config_dict['default']['file_encoding'] #'utf-8'
|
||||
separator = config_dict['default']['separator']
|
||||
table_header = config_dict['default']['table_header']
|
||||
timestamp_variables = []
|
||||
datetime_format = []
|
||||
tb_idx = 0
|
||||
column_names = ''
|
||||
description_dict = {}
|
||||
|
||||
for instFolder in config_dict.keys():
|
||||
|
||||
if instFolder in filename.split(os.sep):
|
||||
|
||||
file_encoding = config_dict[instFolder].get('file_encoding',file_encoding)
|
||||
separator = config_dict[instFolder].get('separator',separator)
|
||||
table_header = config_dict[instFolder].get('table_header',table_header)
|
||||
timestamp_variables = config_dict[instFolder].get('timestamp',[])
|
||||
datetime_format = config_dict[instFolder].get('datetime_format',[])
|
||||
|
||||
|
||||
link_to_description = config_dict[instFolder].get('link_to_description', '').replace('/', os.sep)
|
||||
|
||||
if link_to_description:
|
||||
path = os.path.join(instruments_dir, link_to_description)
|
||||
try:
|
||||
with open(path, 'r') as stream:
|
||||
description_dict = yaml.load(stream, Loader=yaml.FullLoader)
|
||||
except (FileNotFoundError, yaml.YAMLError) as exc:
|
||||
print(exc)
|
||||
#if 'None' in table_header:
|
||||
# return {}
|
||||
|
||||
# Read header as a dictionary and detect where data table starts
|
||||
header_dict = {'actris_level': 0, 'processing_date':utils.created_at(), 'processing_script' : os.path.relpath(thisFilePath,dimaPath)}
|
||||
header_dict = {}
|
||||
data_start = False
|
||||
# Work with copy of the file for safety
|
||||
if work_with_copy:
|
||||
@ -52,36 +98,78 @@ def read_txt_files_as_dict(filename: str, instruments_dir: str = None, work_with
|
||||
else:
|
||||
tmp_filename = filename
|
||||
|
||||
# Run header detection
|
||||
header_line_number, column_names, fmt_dict, table_preamble = detect_table_header_line(tmp_filename, format_variants)
|
||||
#with open(tmp_filename,'rb',encoding=file_encoding,errors='ignore') as f:
|
||||
|
||||
# Unpack validated format info
|
||||
table_header = fmt_dict['table_header']
|
||||
separator = fmt_dict['separator']
|
||||
file_encoding = fmt_dict['file_encoding']
|
||||
timestamp_variables = fmt_dict.get('timestamp', [])
|
||||
datetime_format = fmt_dict.get('datetime_format', None)
|
||||
desired_datetime_fmt = fmt_dict.get('desired_datetime_format', None)
|
||||
if not isinstance(table_header, list):
|
||||
|
||||
table_header = [table_header]
|
||||
file_encoding = [file_encoding]
|
||||
separator = [separator]
|
||||
|
||||
# Ensure separator is valid
|
||||
if not isinstance(separator, str) or not separator.strip():
|
||||
raise ValueError(f"Invalid separator found in format: {repr(separator)}")
|
||||
table_preamble = []
|
||||
line_number = 0
|
||||
if 'infer' not in table_header:
|
||||
|
||||
# Load DataFrame
|
||||
with open(tmp_filename,'rb') as f:
|
||||
|
||||
for line_number, line in enumerate(f):
|
||||
decoded_line = line.decode(file_encoding[tb_idx])
|
||||
|
||||
|
||||
for tb_idx, tb in enumerate(table_header):
|
||||
print(tb)
|
||||
if tb in decoded_line:
|
||||
break
|
||||
|
||||
if tb in decoded_line:
|
||||
|
||||
list_of_substrings = decoded_line.split(separator[tb_idx].replace('\\t','\t'))
|
||||
|
||||
# Count occurrences of each substring
|
||||
substring_counts = collections.Counter(list_of_substrings)
|
||||
data_start = True
|
||||
# Generate column names with appended index only for repeated substrings
|
||||
column_names = [f"{i}_{name.strip()}" if substring_counts[name] > 1 else name.strip() for i, name in enumerate(list_of_substrings)]
|
||||
|
||||
#column_names = [str(i)+'_'+name.strip() for i, name in enumerate(list_of_substrings)]
|
||||
#column_names = []
|
||||
#for i, name in enumerate(list_of_substrings):
|
||||
# column_names.append(str(i)+'_'+name)
|
||||
|
||||
#print(line_number, len(column_names ),'\n')
|
||||
break
|
||||
else:
|
||||
print('Table header was not detected.')
|
||||
# Subdivide line into words, and join them by single space.
|
||||
# I asumme this can produce a cleaner line that contains no weird separator characters \t \r or extra spaces and so on.
|
||||
list_of_substrings = decoded_line.split()
|
||||
# TODO: ideally we should use a multilinear string but the yalm parser is not recognizing \n as special character
|
||||
#line = ' '.join(list_of_substrings+['\n'])
|
||||
#line = ' '.join(list_of_substrings)
|
||||
table_preamble.append(' '.join([item for item in list_of_substrings]))# += new_line
|
||||
|
||||
|
||||
# TODO: it does not work with separator as none :(. fix for RGA
|
||||
|
||||
try:
|
||||
if 'infer' not in table_header:
|
||||
df = pd.read_csv(tmp_filename,
|
||||
delimiter=separator,
|
||||
header=header_line_number,
|
||||
encoding=file_encoding,
|
||||
print(column_names)
|
||||
if not 'infer' in table_header:
|
||||
#print(table_header)
|
||||
#print(file_encoding[tb_idx])
|
||||
|
||||
df = pd.read_csv(tmp_filename,
|
||||
delimiter = separator[tb_idx].replace('\\t','\t'),
|
||||
header=line_number,
|
||||
#encoding='latin-1',
|
||||
encoding = file_encoding[tb_idx],
|
||||
names=column_names,
|
||||
skip_blank_lines=True)
|
||||
else:
|
||||
df = pd.read_csv(tmp_filename,
|
||||
delimiter=separator,
|
||||
header=header_line_number,
|
||||
encoding=file_encoding,
|
||||
skip_blank_lines=True)
|
||||
df = pd.read_csv(tmp_filename,
|
||||
delimiter = separator[tb_idx].replace('\\t','\t'),
|
||||
header=line_number,
|
||||
encoding = file_encoding[tb_idx],
|
||||
skip_blank_lines=True)
|
||||
|
||||
df_numerical_attrs = df.select_dtypes(include ='number')
|
||||
df_categorical_attrs = df.select_dtypes(exclude='number')
|
||||
@ -89,10 +177,6 @@ def read_txt_files_as_dict(filename: str, instruments_dir: str = None, work_with
|
||||
|
||||
# Consolidate into single timestamp column the separate columns 'date' 'time' specified in text_data_source.yaml
|
||||
if timestamp_variables:
|
||||
|
||||
if not all(col in df_categorical_attrs.columns for col in timestamp_variables):
|
||||
raise ValueError(f"Invalid timestamp columns: {[col for col in timestamp_variables if col not in df_categorical_attrs.columns]}.")
|
||||
|
||||
#df_categorical_attrs['timestamps'] = [' '.join(df_categorical_attrs.loc[i,timestamp_variables].to_numpy()) for i in df.index]
|
||||
#df_categorical_attrs['timestamps'] = [ df_categorical_attrs.loc[i,'0_Date']+' '+df_categorical_attrs.loc[i,'1_Time'] for i in df.index]
|
||||
|
||||
@ -108,7 +192,7 @@ def read_txt_files_as_dict(filename: str, instruments_dir: str = None, work_with
|
||||
df_categorical_attrs = df_categorical_attrs.loc[valid_indices,:]
|
||||
df_numerical_attrs = df_numerical_attrs.loc[valid_indices,:]
|
||||
|
||||
df_categorical_attrs[timestamps_name] = df_categorical_attrs[timestamps_name].dt.strftime(desired_datetime_fmt)
|
||||
df_categorical_attrs[timestamps_name] = df_categorical_attrs[timestamps_name].dt.strftime(config_dict['default']['desired_format'])
|
||||
startdate = df_categorical_attrs[timestamps_name].min()
|
||||
enddate = df_categorical_attrs[timestamps_name].max()
|
||||
|
||||
@ -121,6 +205,12 @@ def read_txt_files_as_dict(filename: str, instruments_dir: str = None, work_with
|
||||
df_categorical_attrs = df_categorical_attrs.drop(columns = timestamp_variables)
|
||||
|
||||
|
||||
#df_categorical_attrs.reindex(drop=True)
|
||||
#df_numerical_attrs.reindex(drop=True)
|
||||
|
||||
|
||||
|
||||
categorical_variables = [item for item in df_categorical_attrs.columns]
|
||||
####
|
||||
#elif 'RGA' in filename:
|
||||
# df_categorical_attrs = df_categorical_attrs.rename(columns={'0_Time(s)' : 'timestamps'})
|
||||
@ -195,169 +285,13 @@ def read_txt_files_as_dict(filename: str, instruments_dir: str = None, work_with
|
||||
# if timestamps_name in categorical_variables:
|
||||
# dataset['attributes'] = {timestamps_name: utils.parse_attribute({'unit':'YYYY-MM-DD HH:MM:SS.ffffff'})}
|
||||
# file_dict['datasets'].append(dataset)
|
||||
#except Exception as e:
|
||||
except Exception as e:
|
||||
#raise RuntimeError(f"Failed to read file with detected format: {e}")
|
||||
print(e)
|
||||
return {}
|
||||
|
||||
return file_dict
|
||||
|
||||
## Supporting functions
|
||||
|
||||
def detect_table_header_line(filepath, format_variants, verbose=False):
|
||||
"""
|
||||
Tries multiple format variants to detect the table header line in the file.
|
||||
|
||||
Args:
|
||||
filepath (str): Path to file.
|
||||
format_variants (List[Dict]): Each must contain:
|
||||
- 'file_encoding' (str)
|
||||
- 'separator' (str)
|
||||
- 'table_header' (str or list of str)
|
||||
verbose (bool): If True, prints debug info.
|
||||
|
||||
Returns:
|
||||
Tuple:
|
||||
- header_line_idx (int)
|
||||
- column_names (List[str])
|
||||
- matched_format (Dict[str, Any]) # full format dict (validated)
|
||||
- preamble_lines (List[str])
|
||||
"""
|
||||
import collections
|
||||
import warnings
|
||||
|
||||
for idx, fmt in enumerate(format_variants):
|
||||
# Validate format dict
|
||||
if 'file_encoding' not in fmt or not isinstance(fmt['file_encoding'], str):
|
||||
raise ValueError(f"[Format {idx}] 'file_encoding' must be a string.")
|
||||
if 'separator' not in fmt or not isinstance(fmt['separator'], str):
|
||||
raise ValueError(f"[Format {idx}] 'separator' must be a string.")
|
||||
if 'table_header' not in fmt or not isinstance(fmt['table_header'], (str, list)):
|
||||
raise ValueError(f"[Format {idx}] 'table_header' must be a string or list of strings.")
|
||||
|
||||
encoding = fmt['file_encoding']
|
||||
separator = fmt['separator']
|
||||
header_patterns = fmt['table_header']
|
||||
if isinstance(header_patterns, str):
|
||||
header_patterns = [header_patterns]
|
||||
|
||||
preamble_lines = []
|
||||
try:
|
||||
with open(filepath, 'rb') as f:
|
||||
for line_number, line in enumerate(f):
|
||||
try:
|
||||
decoded_line = line.decode(encoding)
|
||||
except UnicodeDecodeError:
|
||||
break # Try next format
|
||||
|
||||
for pattern in header_patterns:
|
||||
if pattern in decoded_line:
|
||||
substrings = decoded_line.split(separator.replace('\\t', '\t'))
|
||||
counts = collections.Counter(substrings)
|
||||
column_names = [
|
||||
f"{i}_{name.strip()}" if counts[name] > 1 else name.strip()
|
||||
for i, name in enumerate(substrings)
|
||||
]
|
||||
if verbose:
|
||||
print(f"[Detected header] Line {line_number}: {column_names}")
|
||||
return line_number, column_names, fmt, preamble_lines
|
||||
|
||||
preamble_lines.append(' '.join(decoded_line.split()))
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
print(f"[Format {idx}] Attempt failed: {e}")
|
||||
continue
|
||||
|
||||
warnings.warn("Table header was not detected using known patterns. Will attempt inference mode.")
|
||||
|
||||
# Return fallback format with 'infer' but retain encoding/separator from first variant
|
||||
fallback_fmt = {
|
||||
'file_encoding': 'utf-8',
|
||||
'separator': ',',
|
||||
'table_header': ['infer']
|
||||
}
|
||||
return -1, [], fallback_fmt, []
|
||||
|
||||
|
||||
def load_file_reader_parameters(filename: str, instruments_dir: str) -> tuple:
|
||||
"""
|
||||
Load file reader configuration parameters based on the file and instrument directory.
|
||||
|
||||
Returns:
|
||||
- format_variants: List of dicts with keys:
|
||||
'file_encoding', 'separator', 'table_header', 'timestamp', 'datetime_format', 'desired_datetime_format'
|
||||
- description_dict: Dict loaded from instrument's description YAML
|
||||
"""
|
||||
config_path = os.path.abspath(os.path.join(instruments_dir, 'readers', 'config_text_reader.yaml'))
|
||||
if not os.path.exists(config_path):
|
||||
config_path = os.path.join(dimaPath,'instruments','readers', 'config_text_reader.yaml')
|
||||
|
||||
|
||||
try:
|
||||
with open(config_path, 'r') as stream:
|
||||
config_dict = yaml.load(stream, Loader=yaml.FullLoader)
|
||||
except yaml.YAMLError as exc:
|
||||
print(f"[YAML Load Error] {exc}")
|
||||
return {}, [], {}
|
||||
|
||||
default_config = config_dict.get('default', {})
|
||||
default_format = {
|
||||
'file_encoding': default_config.get('file_encoding', 'utf-8'),
|
||||
'separator': default_config.get('separator', ',').replace('\\t','\t'),
|
||||
'table_header': default_config.get('table_header', 'infer'),
|
||||
'timestamp': [],
|
||||
'datetime_format': default_config.get('datetime_format', '%Y-%m-%d %H:%M:%S.%f'),
|
||||
'desired_datetime_format' : default_config.get('desired_format', '%Y-%m-%d %H:%M:%S.%f')
|
||||
}
|
||||
|
||||
format_variants = []
|
||||
description_dict = {}
|
||||
|
||||
# Match instrument key by folder name in file path
|
||||
filename = os.path.normpath(filename)
|
||||
|
||||
for instFolder in config_dict.keys():
|
||||
if instFolder in filename.split(os.sep):
|
||||
inst_config = config_dict[instFolder]
|
||||
|
||||
# New style: has 'formats' block
|
||||
if 'formats' in inst_config:
|
||||
for fmt in inst_config['formats']:
|
||||
format_variants.append({
|
||||
'file_encoding': fmt.get('file_encoding', default_format['file_encoding']),
|
||||
'separator': fmt.get('separator', default_format['separator']),
|
||||
'table_header': fmt.get('table_header', default_format['table_header']),
|
||||
'timestamp': fmt.get('timestamp', []),
|
||||
'datetime_format': fmt.get('datetime_format', default_format['desired_datetime_format']),
|
||||
'desired_datetime_format' :default_format['desired_datetime_format']
|
||||
})
|
||||
|
||||
else:
|
||||
# Old style: flat format
|
||||
format_variants.append({
|
||||
'file_encoding': inst_config.get('file_encoding', default_format['file_encoding']),
|
||||
'separator': inst_config.get('separator', default_format['separator']),
|
||||
'table_header': inst_config.get('table_header', default_format['table_header']),
|
||||
'timestamp': inst_config.get('timestamp', []),
|
||||
'datetime_format': inst_config.get('datetime_format', default_format['desired_datetime_format']),
|
||||
'desired_datetime_format' : default_format['desired_datetime_format']
|
||||
})
|
||||
|
||||
# Description loading
|
||||
link_to_description = inst_config.get('link_to_description', '').replace('/', os.sep)
|
||||
if link_to_description:
|
||||
desc_path = os.path.join(instruments_dir, link_to_description)
|
||||
try:
|
||||
with open(desc_path, 'r') as desc_stream:
|
||||
description_dict = yaml.load(desc_stream, Loader=yaml.FullLoader)
|
||||
except (FileNotFoundError, yaml.YAMLError) as exc:
|
||||
print(f"[Description Load Error] {exc}")
|
||||
|
||||
break # Stop after first match
|
||||
|
||||
# Always return config_dict + list of formats + description
|
||||
return format_variants, description_dict
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
|
@ -1,115 +0,0 @@
|
||||
import sys
|
||||
import os
|
||||
|
||||
try:
|
||||
thisFilePath = os.path.abspath(__file__)
|
||||
except NameError:
|
||||
print("Error: __file__ is not available. Ensure the script is being run from a file.")
|
||||
print("[Notice] Path to DIMA package may not be resolved properly.")
|
||||
thisFilePath = os.getcwd() # Use current directory or specify a default
|
||||
|
||||
dimaPath = os.path.normpath(os.path.join(thisFilePath, "..",'..','..')) # Move up to project root
|
||||
|
||||
if dimaPath not in sys.path: # Avoid duplicate entries
|
||||
sys.path.insert(0,dimaPath)
|
||||
|
||||
import pandas as pd
|
||||
import json, yaml
|
||||
import h5py
|
||||
import argparse
|
||||
import logging
|
||||
|
||||
import utils.g5505_utils as utils
|
||||
|
||||
def read_structured_file_as_dict(path_to_file):
|
||||
"""
|
||||
Reads a JSON or YAML file, flattens nested structures using pandas.json_normalize,
|
||||
converts to a NumPy structured array via utils.convert_attrdict_to_np_structured_array,
|
||||
and returns a standardized dictionary.
|
||||
"""
|
||||
|
||||
file_dict = {}
|
||||
_, path_head = os.path.split(path_to_file)
|
||||
|
||||
file_dict['name'] = path_head
|
||||
file_dict['attributes_dict'] = {'actris_level': 0, 'processing_date': utils.created_at(), 'processing_script' : os.path.relpath(thisFilePath,dimaPath)}
|
||||
file_dict['datasets'] = []
|
||||
|
||||
try:
|
||||
with open(path_to_file, 'r') as stream:
|
||||
if path_to_file.endswith(('.yaml', '.yml')):
|
||||
raw_data = yaml.safe_load(stream)
|
||||
elif path_to_file.endswith('.json'):
|
||||
raw_data = json.load(stream)
|
||||
else:
|
||||
raise ValueError(f"Unsupported file type: {path_to_file}")
|
||||
except Exception as exc:
|
||||
logging.error("Failed to load input file %s: %s", path_to_file, exc)
|
||||
raise
|
||||
|
||||
try:
|
||||
df = pd.json_normalize(raw_data)
|
||||
except Exception as exc:
|
||||
logging.error("Failed to normalize data structure: %s", exc)
|
||||
raise
|
||||
|
||||
for item_idx, item in enumerate(df.to_dict(orient='records')):
|
||||
try:
|
||||
structured_array = utils.convert_attrdict_to_np_structured_array(item)
|
||||
except Exception as exc:
|
||||
logging.error("Failed to convert to structured array: %s", exc)
|
||||
raise
|
||||
|
||||
dataset = {
|
||||
'name': f'data_table_{item_idx}',
|
||||
'data': structured_array,
|
||||
'shape': structured_array.shape,
|
||||
'dtype': type(structured_array)
|
||||
}
|
||||
|
||||
file_dict['datasets'].append(dataset)
|
||||
|
||||
return file_dict
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
from src.hdf5_ops import save_file_dict_to_hdf5
|
||||
from utils.g5505_utils import created_at
|
||||
|
||||
parser = argparse.ArgumentParser(description="Data ingestion process to HDF5 files.")
|
||||
parser.add_argument('dst_file_path', type=str, help="Path to the target HDF5 file.")
|
||||
parser.add_argument('src_file_path', type=str, help="Relative path to source file to be saved to target HDF5 file.")
|
||||
parser.add_argument('dst_group_name', type=str, help="Group name '/instFolder/[category]/fileName' in the target HDF5 file.")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
hdf5_file_path = args.dst_file_path
|
||||
src_file_path = args.src_file_path
|
||||
dst_group_name = args.dst_group_name
|
||||
default_mode = 'r+'
|
||||
|
||||
try:
|
||||
idr_dict = read_structured_file_as_dict(src_file_path)
|
||||
|
||||
if not os.path.exists(hdf5_file_path):
|
||||
default_mode = 'w'
|
||||
|
||||
print(f'Opening HDF5 file: {hdf5_file_path} in mode {default_mode}')
|
||||
|
||||
with h5py.File(hdf5_file_path, mode=default_mode, track_order=True) as hdf5_file_obj:
|
||||
try:
|
||||
if dst_group_name not in hdf5_file_obj:
|
||||
hdf5_file_obj.create_group(dst_group_name)
|
||||
hdf5_file_obj[dst_group_name].attrs['creation_date'] = created_at().encode('utf-8')
|
||||
print(f'Created new group: {dst_group_name}')
|
||||
else:
|
||||
print(f'Group {dst_group_name} already exists. Proceeding with data transfer...')
|
||||
except Exception as inst:
|
||||
logging.error('Failed to create group %s in HDF5: %s', dst_group_name, inst)
|
||||
|
||||
save_file_dict_to_hdf5(hdf5_file_obj, dst_group_name, idr_dict)
|
||||
print(f'Completed saving file dict with keys: {idr_dict.keys()}')
|
||||
|
||||
except Exception as e:
|
||||
logging.error('File reader failed to process %s: %s', src_file_path, e)
|
||||
print(f'File reader failed to process {src_file_path}. See logs for details.')
|
@ -1,27 +1,10 @@
|
||||
import sys
|
||||
import os
|
||||
|
||||
try:
|
||||
thisFilePath = os.path.abspath(__file__)
|
||||
except NameError:
|
||||
print("Error: __file__ is not available. Ensure the script is being run from a file.")
|
||||
print("[Notice] Path to DIMA package may not be resolved properly.")
|
||||
thisFilePath = os.getcwd() # Use current directory or specify a default
|
||||
|
||||
dimaPath = os.path.normpath(os.path.join(thisFilePath, "..",'..','..')) # Move up to project root
|
||||
|
||||
if dimaPath not in sys.path: # Avoid duplicate entries
|
||||
sys.path.insert(0,dimaPath)
|
||||
|
||||
|
||||
|
||||
import sys
|
||||
import h5py
|
||||
|
||||
from igor2.binarywave import load as loadibw
|
||||
import logging
|
||||
import argparse
|
||||
import utils.g5505_utils as utils
|
||||
|
||||
|
||||
def read_xps_ibw_file_as_dict(filename):
|
||||
"""
|
||||
@ -66,7 +49,7 @@ def read_xps_ibw_file_as_dict(filename):
|
||||
|
||||
# Group name and attributes
|
||||
file_dict['name'] = path_head
|
||||
file_dict['attributes_dict'] = {'actris_level': 0, 'processing_date':utils.created_at(), 'processing_script' : os.path.relpath(thisFilePath,dimaPath)}
|
||||
file_dict['attributes_dict'] = {}
|
||||
|
||||
# Convert notes of bytes class to string class and split string into a list of elements separated by '\r'.
|
||||
notes_list = file_obj['wave']['note'].decode("utf-8").split('\r')
|
||||
@ -102,11 +85,22 @@ def read_xps_ibw_file_as_dict(filename):
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
|
||||
try:
|
||||
thisFilePath = os.path.abspath(__file__)
|
||||
except NameError:
|
||||
print("Error: __file__ is not available. Ensure the script is being run from a file.")
|
||||
print("[Notice] Path to DIMA package may not be resolved properly.")
|
||||
thisFilePath = os.getcwd() # Use current directory or specify a default
|
||||
|
||||
dimaPath = os.path.normpath(os.path.join(thisFilePath, "..",'..','..')) # Move up to project root
|
||||
|
||||
if dimaPath not in sys.path: # Avoid duplicate entries
|
||||
sys.path.insert(0,dimaPath)
|
||||
|
||||
from src.hdf5_ops import save_file_dict_to_hdf5
|
||||
from utils.g5505_utils import created_at
|
||||
|
||||
|
||||
|
||||
# Set up argument parsing
|
||||
parser = argparse.ArgumentParser(description="Data ingestion process to HDF5 files.")
|
||||
parser.add_argument('dst_file_path', type=str, help="Path to the target HDF5 file.")
|
||||
|
@ -78,13 +78,3 @@ instruments:
|
||||
fileExtension: nas
|
||||
fileReaderPath: instruments/readers/nasa_ames_reader.py
|
||||
InstrumentDictionaryPath: instruments/dictionaries/EBAS.yaml
|
||||
|
||||
- instrumentFolderName: ACSM_TOFWARE
|
||||
fileExtension: yaml,yml,json
|
||||
fileReaderPath: instruments/readers/read_structured_file_as_dict.py
|
||||
InstrumentDictionaryPath: instruments/dictionaries/EBAS.yaml
|
||||
|
||||
- instrumentFolderName: CEDOAS
|
||||
fileExtension: txt
|
||||
fileReaderPath: instruments/readers/g5505_text_reader.py
|
||||
InstrumentDictionaryPath: instruments/dictionaries/CEDOAS.yaml
|
||||
|
@ -1,192 +1,182 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Data integration workflow of experimental campaign\n",
|
||||
"\n",
|
||||
"In this notebook, we will go through a our data integration workflow. This involves the following steps:\n",
|
||||
"\n",
|
||||
"1. Specify data integration file through YAML configuration file.\n",
|
||||
"2. Create an integrated HDF5 file of experimental campaign from configuration file.\n",
|
||||
"3. Display the created HDF5 file using a treemap\n",
|
||||
"\n",
|
||||
"## Import libraries and modules\n",
|
||||
"\n",
|
||||
"* Excecute (or Run) the Cell below"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from nbutils import add_project_path_to_sys_path\n",
|
||||
"\n",
|
||||
"# Add project root to sys.path\n",
|
||||
"add_project_path_to_sys_path()\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" import visualization.hdf5_vis as hdf5_vis\n",
|
||||
" import pipelines.data_integration as data_integration\n",
|
||||
" print(\"Imports successful!\")\n",
|
||||
"except ImportError as e:\n",
|
||||
" print(f\"Import error: {e}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 1: Configure Your Data Integration Task\n",
|
||||
"\n",
|
||||
"1. Based on one of the example `.yaml` files found in the `input_files/` folder, define the input and output directory paths inside the file.\n",
|
||||
"\n",
|
||||
"2. When working with network drives, create `.env` file in the root of the `dima/` project with the following line:\n",
|
||||
"\n",
|
||||
" ```dotenv\n",
|
||||
" NETWORK_MOUNT=//your-server/your-share\n",
|
||||
" ```\n",
|
||||
"3. Excecute Cell.\n",
|
||||
"\n",
|
||||
"**Note:** Ensure `.env` is listed in `.gitignore` and `.dockerignore`.\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"number, initials = 2, 'TBR' # Set as either 2, 'TBR' or 3, 'NG'\n",
|
||||
"campaign_descriptor_path = f'../input_files/campaignDescriptor{number}_{initials}.yaml'\n",
|
||||
"\n",
|
||||
"print(campaign_descriptor_path)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 2: Create an integrated HDF5 file of experimental campaign.\n",
|
||||
"\n",
|
||||
"* Excecute Cell. Here we run the function `integrate_data_sources` with input argument as the previously specified YAML config file.\n",
|
||||
"\n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"hdf5_file_path = data_integration.run_pipeline(campaign_descriptor_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"hdf5_file_path "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Display integrated HDF5 file using a treemap\n",
|
||||
"\n",
|
||||
"* Excecute Cell. A visual representation in html format of the integrated file should be displayed and stored in the output directory folder"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"if isinstance(hdf5_file_path ,list):\n",
|
||||
" for path_item in hdf5_file_path :\n",
|
||||
" hdf5_vis.display_group_hierarchy_on_a_treemap(path_item)\n",
|
||||
"else:\n",
|
||||
" hdf5_vis.display_group_hierarchy_on_a_treemap(hdf5_file_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import src.hdf5_ops as h5de \n",
|
||||
"h5de.serialize_metadata(hdf5_file_path[0],folder_depth=3,output_format='yaml')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import src.hdf5_ops as h5de \n",
|
||||
"print(hdf5_file_path)\n",
|
||||
"DataOpsAPI = h5de.HDF5DataOpsManager(hdf5_file_path[0])\n",
|
||||
"\n",
|
||||
"DataOpsAPI.load_file_obj()\n",
|
||||
"\n",
|
||||
"#DataOpsAPI.reformat_datetime_column('ICAD/HONO/2022_11_22_Channel1_Data.dat/data_table',\n",
|
||||
"# 'Start Date/Time (UTC)',\n",
|
||||
"# '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S')\n",
|
||||
"DataOpsAPI.extract_and_load_dataset_metadata()\n",
|
||||
"df = DataOpsAPI.dataset_metadata_df\n",
|
||||
"print(df.head())\n",
|
||||
"\n",
|
||||
"DataOpsAPI.unload_file_obj()\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"DataOpsAPI.load_file_obj()\n",
|
||||
"\n",
|
||||
"DataOpsAPI.append_metadata('/',{'test_attr':'this is a test value'})\n",
|
||||
"\n",
|
||||
"DataOpsAPI.unload_file_obj()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "multiphase_chemistry_env",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Data integration workflow of experimental campaign\n",
|
||||
"\n",
|
||||
"In this notebook, we will go through a our data integration workflow. This involves the following steps:\n",
|
||||
"\n",
|
||||
"1. Specify data integration file through YAML configuration file.\n",
|
||||
"2. Create an integrated HDF5 file of experimental campaign from configuration file.\n",
|
||||
"3. Display the created HDF5 file using a treemap\n",
|
||||
"\n",
|
||||
"## Import libraries and modules\n",
|
||||
"\n",
|
||||
"* Excecute (or Run) the Cell below"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from nbutils import add_project_path_to_sys_path\n",
|
||||
"\n",
|
||||
"# Add project root to sys.path\n",
|
||||
"add_project_path_to_sys_path()\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" import visualization.hdf5_vis as hdf5_vis\n",
|
||||
" import pipelines.data_integration as data_integration\n",
|
||||
" print(\"Imports successful!\")\n",
|
||||
"except ImportError as e:\n",
|
||||
" print(f\"Import error: {e}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 1: Specify data integration task through YAML configuration file\n",
|
||||
"\n",
|
||||
"* Create your configuration file (i.e., *.yaml file) adhering to the example yaml file in the input folder.\n",
|
||||
"* Set up input directory and output directory paths and Excecute Cell.\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#output_filename_path = 'output_files/unified_file_smog_chamber_2024-04-07_UTC-OFST_+0200_NG.h5'\n",
|
||||
"yaml_config_file_path = '../input_files/data_integr_config_file_TBR.yaml'\n",
|
||||
"\n",
|
||||
"#path_to_input_directory = 'output_files/kinetic_flowtube_study_2022-01-31_LuciaI'\n",
|
||||
"#path_to_hdf5_file = hdf5_lib.create_hdf5_file_from_filesystem_path(path_to_input_directory)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 2: Create an integrated HDF5 file of experimental campaign.\n",
|
||||
"\n",
|
||||
"* Excecute Cell. Here we run the function `integrate_data_sources` with input argument as the previously specified YAML config file."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"hdf5_file_path = data_integration.run_pipeline(yaml_config_file_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"hdf5_file_path "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Display integrated HDF5 file using a treemap\n",
|
||||
"\n",
|
||||
"* Excecute Cell. A visual representation in html format of the integrated file should be displayed and stored in the output directory folder"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"if isinstance(hdf5_file_path ,list):\n",
|
||||
" for path_item in hdf5_file_path :\n",
|
||||
" hdf5_vis.display_group_hierarchy_on_a_treemap(path_item)\n",
|
||||
"else:\n",
|
||||
" hdf5_vis.display_group_hierarchy_on_a_treemap(hdf5_file_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import src.hdf5_ops as h5de \n",
|
||||
"h5de.serialize_metadata(hdf5_file_path[0],folder_depth=3,output_format='yaml')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import src.hdf5_ops as h5de \n",
|
||||
"print(hdf5_file_path)\n",
|
||||
"DataOpsAPI = h5de.HDF5DataOpsManager(hdf5_file_path[0])\n",
|
||||
"\n",
|
||||
"DataOpsAPI.load_file_obj()\n",
|
||||
"\n",
|
||||
"#DataOpsAPI.reformat_datetime_column('ICAD/HONO/2022_11_22_Channel1_Data.dat/data_table',\n",
|
||||
"# 'Start Date/Time (UTC)',\n",
|
||||
"# '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S')\n",
|
||||
"DataOpsAPI.extract_and_load_dataset_metadata()\n",
|
||||
"df = DataOpsAPI.dataset_metadata_df\n",
|
||||
"print(df.head())\n",
|
||||
"\n",
|
||||
"DataOpsAPI.unload_file_obj()\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"DataOpsAPI.load_file_obj()\n",
|
||||
"\n",
|
||||
"DataOpsAPI.append_metadata('/',{'test_attr':'this is a test value'})\n",
|
||||
"\n",
|
||||
"DataOpsAPI.unload_file_obj()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "multiphase_chemistry_env",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
7579
output_files/smog_chamber_study_2022-07-26_NatashaG.yaml
Normal file
7579
output_files/smog_chamber_study_2022-07-26_NatashaG.yaml
Normal file
File diff suppressed because it is too large
Load Diff
@ -20,7 +20,7 @@ import logging
|
||||
from datetime import datetime
|
||||
# Importing chain class from itertools
|
||||
from itertools import chain
|
||||
import shutil
|
||||
|
||||
# Import DIMA modules
|
||||
try:
|
||||
from dima.src import hdf5_writer as hdf5_lib
|
||||
@ -38,19 +38,12 @@ def _generate_datetime_dict(datetime_steps):
|
||||
""" Generate the datetime augment dictionary from datetime steps. """
|
||||
datetime_augment_dict = {}
|
||||
for datetime_step in datetime_steps:
|
||||
#tmp = datetime.strptime(datetime_step, '%Y-%m-%d %H-%M-%S')
|
||||
datetime_augment_dict[datetime_step] = [
|
||||
datetime_step.strftime('%Y-%m-%d'), datetime_step.strftime('%Y_%m_%d'),
|
||||
datetime_step.strftime('%Y.%m.%d'), datetime_step.strftime('%Y%m%d')
|
||||
datetime_step.strftime('%Y-%m-%d'), datetime_step.strftime('%Y_%m_%d'), datetime_step.strftime('%Y.%m.%d'), datetime_step.strftime('%Y%m%d')
|
||||
]
|
||||
return datetime_augment_dict
|
||||
|
||||
def _generate_output_path_fragment(filename_prefix, integration_mode, dataset_startdate, dataset_enddate, index=None):
|
||||
"""Generate consistent directory or file name fragment based on mode."""
|
||||
if integration_mode == 'collection':
|
||||
return f'collection_{index}_{filename_prefix}_{dataset_enddate}'
|
||||
else:
|
||||
return f'{filename_prefix}_{dataset_enddate}'
|
||||
|
||||
def load_config_and_setup_logging(yaml_config_file_path, log_dir):
|
||||
"""Load YAML configuration file, set up logging, and validate required keys and datetime_steps."""
|
||||
|
||||
@ -75,28 +68,12 @@ def load_config_and_setup_logging(yaml_config_file_path, log_dir):
|
||||
except yaml.YAMLError as exc:
|
||||
logging.error("Error loading YAML file: %s", exc)
|
||||
raise ValueError(f"Failed to load YAML file: {exc}")
|
||||
|
||||
|
||||
# Check if required keys are present
|
||||
missing_keys = [key for key in required_keys if key not in config_dict]
|
||||
if missing_keys:
|
||||
raise KeyError(f"Missing required keys in YAML configuration: {missing_keys}")
|
||||
|
||||
# Look for all placeholders like ${VAR_NAME}
|
||||
input_dir = config_dict['input_file_directory']
|
||||
placeholders = re.findall(r'\$\{([^}^{]+)\}', input_dir)
|
||||
|
||||
success = utils.load_env_from_root()
|
||||
print(f'Success : {success}')
|
||||
|
||||
for var in placeholders:
|
||||
env_value = os.environ.get(var)
|
||||
if env_value is None:
|
||||
raise ValueError(f"Environment variable '{var}' is not set but used in the config.")
|
||||
input_dir = input_dir.replace(f"${{{var}}}", env_value)
|
||||
|
||||
config_dict['input_file_directory'] = input_dir
|
||||
|
||||
|
||||
# Check the instrument_datafolder required type and ensure the list is of at least length one.
|
||||
if isinstance(config_dict['instrument_datafolder'], list) and not len(config_dict['instrument_datafolder'])>=1:
|
||||
raise ValueError('Invalid value for key "instrument_datafolder". Expected a list of strings with at least one item.'
|
||||
@ -196,48 +173,11 @@ def copy_subtree_and_create_hdf5(src, dst, select_dir_keywords, select_file_keyw
|
||||
"""Helper function to copy directory with constraints and create HDF5."""
|
||||
src = src.replace(os.sep,'/')
|
||||
dst = dst.replace(os.sep,'/')
|
||||
|
||||
|
||||
# Dry run to see what needs copying
|
||||
logging.info("Checking copy status for %s", src)
|
||||
# Return path to files that are expected in the dst directory
|
||||
path_to_expected_files = utils.copy_directory_with_contraints(src, dst, select_dir_keywords,
|
||||
select_file_keywords, allowed_file_extensions,
|
||||
dry_run=True)
|
||||
|
||||
# Check existence and collect sizes
|
||||
all_exist = True
|
||||
total_size = 0
|
||||
|
||||
for dir_path, filenames in path_to_expected_files.items():
|
||||
for filename in filenames:
|
||||
dst_file_path = os.path.join(dir_path, filename)
|
||||
|
||||
if not os.path.exists(dst_file_path):
|
||||
all_exist = False
|
||||
# Get size from source file
|
||||
src_file_path = os.path.join(src, os.path.relpath(dst_file_path, dst))
|
||||
logging.info("Creating constrained copy of the experimental campaign folder %s at: %s", src, dst)
|
||||
|
||||
if os.path.exists(src_file_path):
|
||||
#print(os.path.getsize(src_file_path))
|
||||
total_size += os.path.getsize(src_file_path)
|
||||
|
||||
if all_exist:
|
||||
logging.info(f"All files already exist at {dst}, skipping copy.")
|
||||
print(f"[Notice] All files already exist at {dst}, skipping copy.")
|
||||
path_to_files_dict = path_to_expected_files
|
||||
else:
|
||||
# Check available space for missing files only
|
||||
dst_free = shutil.disk_usage(".").free # checks the free space in the current dir
|
||||
if total_size > dst_free:
|
||||
raise Exception(f"Insufficient space: need {total_size/1e9:.6f}GB, have {dst_free/1e9:.6f}GB")
|
||||
else:
|
||||
print(f"Campaign folder size: {total_size/1e9:.6f}GB")
|
||||
print(f"Free space: {dst_free/1e9:.6f}GB")
|
||||
|
||||
logging.info(f"Creating constrained copy of the experimental campaign folder {src} at: {dst}")#, src, dst)
|
||||
path_to_files_dict = utils.copy_directory_with_contraints(src, dst, select_dir_keywords, select_file_keywords, allowed_file_extensions)
|
||||
logging.info("Finished creating a copy of the experimental campaign folder tree at: %s", dst)
|
||||
path_to_files_dict = utils.copy_directory_with_contraints(src, dst, select_dir_keywords, select_file_keywords, allowed_file_extensions)
|
||||
logging.info("Finished creating a copy of the experimental campaign folder tree at: %s", dst)
|
||||
|
||||
|
||||
logging.info("Creating HDF5 file at: %s", dst)
|
||||
@ -248,9 +188,18 @@ def copy_subtree_and_create_hdf5(src, dst, select_dir_keywords, select_file_keyw
|
||||
return hdf5_path
|
||||
|
||||
|
||||
|
||||
|
||||
def run_pipeline(path_to_config_yamlFile, log_dir='logs/'):
|
||||
|
||||
"""Integrates data sources specified by the input configuration file into HDF5 files.
|
||||
|
||||
Parameters:
|
||||
yaml_config_file_path (str): Path to the YAML configuration file.
|
||||
log_dir (str): Directory to save the log file.
|
||||
|
||||
Returns:
|
||||
list: List of Paths to the created HDF5 file(s).
|
||||
"""
|
||||
|
||||
config_dict = load_config_and_setup_logging(path_to_config_yamlFile, log_dir)
|
||||
|
||||
path_to_input_dir = config_dict['input_file_directory']
|
||||
@ -264,59 +213,61 @@ def run_pipeline(path_to_config_yamlFile, log_dir='logs/'):
|
||||
dataset_startdate = config_dict['dataset_startdate']
|
||||
dataset_enddate = config_dict['dataset_enddate']
|
||||
|
||||
integration_mode = config_dict.get('integration_mode', 'single_experiment')
|
||||
filename_prefix = config_dict['filename_prefix']
|
||||
|
||||
# Determine mode and process accordingly
|
||||
output_filename_path = []
|
||||
campaign_name_template = lambda filename_prefix, suffix: '_'.join([filename_prefix, suffix])
|
||||
date_str = f'{dataset_startdate}_{dataset_enddate}'
|
||||
|
||||
# Determine top-level campaign folder path
|
||||
top_level_foldername = _generate_output_path_fragment(
|
||||
filename_prefix, integration_mode, dataset_startdate, dataset_enddate, index=1
|
||||
)
|
||||
|
||||
# Create path to new raw datafolder and standardize with forward slashes
|
||||
path_to_rawdata_folder = os.path.join(
|
||||
path_to_output_dir, top_level_foldername, ""
|
||||
).replace(os.sep, '/')
|
||||
path_to_output_dir, 'collection_' + campaign_name_template(config_dict['filename_prefix'], date_str), "").replace(os.sep, '/')
|
||||
|
||||
# Process individual datetime steps if available, regardless of mode
|
||||
if config_dict.get('datetime_steps_dict', {}):
|
||||
if config_dict.get('datetime_steps_dict', {}):
|
||||
# Single experiment mode
|
||||
for datetime_step, file_keywords in config_dict['datetime_steps_dict'].items():
|
||||
single_date_str = datetime_step.strftime('%Y%m%d')
|
||||
subfolder_name = f"{filename_prefix}_{single_date_str}"
|
||||
subfolder_name = f"experimental_step_{single_date_str}"
|
||||
path_to_rawdata_subfolder = os.path.join(path_to_rawdata_folder, subfolder_name, "")
|
||||
date_str = datetime_step.strftime('%Y-%m-%d')
|
||||
single_campaign_name = campaign_name_template(config_dict['filename_prefix'], date_str)
|
||||
path_to_rawdata_subfolder = os.path.join(path_to_rawdata_folder, single_campaign_name, "")
|
||||
|
||||
path_to_integrated_stepwise_hdf5_file = copy_subtree_and_create_hdf5(
|
||||
path_to_input_dir, path_to_rawdata_subfolder, select_dir_keywords,
|
||||
file_keywords, allowed_file_extensions, root_metadata_dict)
|
||||
path_to_input_dir, path_to_rawdata_subfolder, select_dir_keywords,
|
||||
file_keywords, allowed_file_extensions, root_metadata_dict)
|
||||
|
||||
output_filename_path.append(path_to_integrated_stepwise_hdf5_file)
|
||||
|
||||
# Collection mode post-processing
|
||||
if integration_mode == 'collection':
|
||||
# Collection mode processing if specified
|
||||
if 'collection' in config_dict.get('integration_mode', 'single_experiment'):
|
||||
path_to_filenames_dict = {path_to_rawdata_folder: [os.path.basename(path) for path in output_filename_path]} if output_filename_path else {}
|
||||
hdf5_path = hdf5_lib.create_hdf5_file_from_filesystem_path(
|
||||
path_to_rawdata_folder, path_to_filenames_dict, [], root_metadata_dict
|
||||
)
|
||||
#hdf5_path = hdf5_lib.create_hdf5_file_from_filesystem_path_new(path_to_rawdata_folder, path_to_filenames_dict, [], root_metadata_dict)
|
||||
hdf5_path = hdf5_lib.create_hdf5_file_from_filesystem_path(path_to_rawdata_folder, path_to_filenames_dict, [], root_metadata_dict)
|
||||
output_filename_path.append(hdf5_path)
|
||||
else:
|
||||
path_to_integrated_stepwise_hdf5_file = copy_subtree_and_create_hdf5(
|
||||
path_to_input_dir, path_to_rawdata_folder, select_dir_keywords, [],
|
||||
allowed_file_extensions, root_metadata_dict)
|
||||
path_to_input_dir, path_to_rawdata_folder, select_dir_keywords, [],
|
||||
allowed_file_extensions, root_metadata_dict)
|
||||
output_filename_path.append(path_to_integrated_stepwise_hdf5_file)
|
||||
|
||||
return output_filename_path
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: python data_integration.py <function_name> <function_args>")
|
||||
sys.exit(1)
|
||||
|
||||
# Extract the function name from the command line arguments
|
||||
function_name = sys.argv[1]
|
||||
|
||||
# Handle function execution based on the provided function name
|
||||
if function_name == 'run':
|
||||
|
||||
if len(sys.argv) != 3:
|
||||
print("Usage: python data_integration.py run <path_to_config_yamlFile>")
|
||||
sys.exit(1)
|
||||
# Extract path to configuration file, specifying the data integration task
|
||||
path_to_config_yamlFile = sys.argv[2]
|
||||
run_pipeline(path_to_config_yamlFile)
|
||||
|
||||
|
||||
|
47
setup_env.sh
Normal file
47
setup_env.sh
Normal file
@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Define the name of the environment
|
||||
ENV_NAME="multiphase_chemistry_env"
|
||||
|
||||
# Check if mamba is available and use it instead of conda for faster installation
|
||||
if command -v mamba &> /dev/null; then
|
||||
CONDA_COMMAND="mamba"
|
||||
else
|
||||
CONDA_COMMAND="conda"
|
||||
fi
|
||||
|
||||
# Create the conda environment with all dependencies, resolving from conda-forge and defaults
|
||||
$CONDA_COMMAND create -y -n "$ENV_NAME" -c conda-forge -c defaults python=3.11 \
|
||||
jupyter numpy h5py pandas matplotlib plotly=5.24 scipy pip
|
||||
|
||||
# Check if the environment was successfully created
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create the environment '$ENV_NAME'. Please check the logs above for details."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Activate the new environment
|
||||
if source activate "$ENV_NAME" 2>/dev/null || conda activate "$ENV_NAME" 2>/dev/null; then
|
||||
echo "Environment '$ENV_NAME' activated successfully."
|
||||
else
|
||||
echo "Failed to activate the environment '$ENV_NAME'. Please check your conda setup."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install additional pip packages only if the environment is activated
|
||||
echo "Installing additional pip packages..."
|
||||
pip install pybis==1.35 igor2 ipykernel sphinx
|
||||
|
||||
# Check if pip installations were successful
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to install pip packages. Please check the logs above for details."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Optional: Export the environment to a YAML file (commented out)
|
||||
# $CONDA_COMMAND env export -n "$ENV_NAME" > "$ENV_NAME-environment.yaml"
|
||||
|
||||
# Print success message
|
||||
echo "Environment '$ENV_NAME' created and configured successfully."
|
||||
# echo "Environment configuration saved to '$ENV_NAME-environment.yaml'."
|
||||
|
@ -19,10 +19,17 @@ import pandas as pd
|
||||
import numpy as np
|
||||
import logging
|
||||
import datetime
|
||||
|
||||
import h5py
|
||||
|
||||
import yaml
|
||||
import json
|
||||
import copy
|
||||
|
||||
#try:
|
||||
# from dima.utils import g5505_utils as utils
|
||||
# from dima.src import hdf5_writer as hdf5_lib
|
||||
#except ModuleNotFoundError:
|
||||
import utils.g5505_utils as utils
|
||||
import src.hdf5_writer as hdf5_lib
|
||||
|
||||
@ -737,29 +744,9 @@ def save_file_dict_to_hdf5(h5file, group_name, file_dict):
|
||||
try:
|
||||
# Create group and add their attributes
|
||||
filename = file_dict['name']
|
||||
|
||||
# Base filename to use as group name
|
||||
base_filename = file_dict['name']
|
||||
candidate_name = base_filename
|
||||
replicate_index = 0
|
||||
|
||||
# Check for existing group and find a free name
|
||||
parent_group = h5file.require_group(group_name)
|
||||
while candidate_name in parent_group:
|
||||
replicate_index += 1
|
||||
candidate_name = f"{base_filename}_{replicate_index}"
|
||||
|
||||
group = h5file[group_name].create_group(name=candidate_name )
|
||||
group = h5file[group_name].create_group(name=filename)
|
||||
# Add group attributes
|
||||
group.attrs.update(file_dict['attributes_dict'])
|
||||
|
||||
# Annotate replicate if renamed
|
||||
if replicate_index > 0:
|
||||
group.attrs['replicate_of'] = base_filename
|
||||
group.attrs['replicate_info'] = (
|
||||
f"Renamed due to existing group with same name. "
|
||||
f"This is replicate #{replicate_index}."
|
||||
)
|
||||
|
||||
# Add datasets to the just created group
|
||||
for dataset in file_dict['datasets']:
|
||||
|
@ -100,20 +100,6 @@ def create_hdf5_file_from_filesystem_path(path_to_input_directory: str,
|
||||
print(message)
|
||||
logging.error(message)
|
||||
else:
|
||||
# Step 1: Preprocess all metadata.json files into a lookup dict
|
||||
all_metadata_dict = {}
|
||||
|
||||
for dirpath, filenames in path_to_filenames_dict.items():
|
||||
metadata_file = next((f for f in filenames if f.endswith('metadata.json')), None)
|
||||
if metadata_file:
|
||||
metadata_path = os.path.join(dirpath, metadata_file)
|
||||
try:
|
||||
with open(metadata_path, 'r') as metafile:
|
||||
all_metadata_dict[dirpath] = json.load(metafile)
|
||||
except json.JSONDecodeError:
|
||||
logging.warning(f"Invalid JSON in metadata file: {metadata_path}")
|
||||
all_metadata_dict[dirpath] = {}
|
||||
|
||||
with h5py.File(path_to_output_file, mode=mode, track_order=True) as h5file:
|
||||
|
||||
number_of_dirs = len(path_to_filenames_dict.keys())
|
||||
@ -152,14 +138,21 @@ def create_hdf5_file_from_filesystem_path(path_to_input_directory: str,
|
||||
stdout = inst
|
||||
logging.error('Failed to create group %s into HDF5: %s', group_name, inst)
|
||||
|
||||
# Step 3: During ingestion, attach metadata per file
|
||||
metadata_dict = all_metadata_dict.get(dirpath, {})
|
||||
if 'data_lineage_metadata.json' in filtered_filenames_list:
|
||||
idx = filtered_filenames_list.index('data_lineage_metadata.json')
|
||||
data_lineage_file = filtered_filenames_list[idx]
|
||||
try:
|
||||
with open('/'.join([dirpath,data_lineage_file]),'r') as dlf:
|
||||
data_lineage_dict = json.load(dlf)
|
||||
filtered_filenames_list.pop(idx)
|
||||
except json.JSONDecodeError:
|
||||
data_lineage_dict = {} # Start fresh if file is invalid
|
||||
|
||||
else:
|
||||
data_lineage_dict = {}
|
||||
|
||||
|
||||
for filenumber, filename in enumerate(filtered_filenames_list):
|
||||
|
||||
# Skip any file that itself ends in metadata.json
|
||||
if filename.endswith('metadata.json'):
|
||||
continue
|
||||
|
||||
# hdf5 path to filename group
|
||||
dest_group_name = f'{group_name}/{filename}'
|
||||
@ -170,10 +163,6 @@ def create_hdf5_file_from_filesystem_path(path_to_input_directory: str,
|
||||
#file_dict = ext_to_reader_dict[file_ext](os.path.join(dirpath,filename))
|
||||
file_dict = filereader_registry.select_file_reader(dest_group_name)(source_file_path)
|
||||
|
||||
# Attach per-file metadata if available
|
||||
if filename in metadata_dict:
|
||||
file_dict.get("attributes_dict",{}).update(metadata_dict[filename])
|
||||
file_dict.get("attributes_dict",{}).update({'original_path' : dirpath})
|
||||
stdout = hdf5_ops.save_file_dict_to_hdf5(dest_file_obj, group_name, file_dict)
|
||||
|
||||
else:
|
||||
@ -281,21 +270,6 @@ def create_hdf5_file_from_filesystem_path_new(path_to_input_directory: str,
|
||||
print(message)
|
||||
logging.error(message)
|
||||
else:
|
||||
|
||||
# Step 1: Preprocess all metadata.json files into a lookup dict
|
||||
all_metadata_dict = {}
|
||||
|
||||
for dirpath, filenames in path_to_filenames_dict.items():
|
||||
metadata_file = next((f for f in filenames if f.endswith('metadata.json')), None)
|
||||
if metadata_file:
|
||||
metadata_path = os.path.join(dirpath, metadata_file)
|
||||
try:
|
||||
with open(metadata_path, 'r') as metafile:
|
||||
all_metadata_dict[dirpath] = json.load(metafile)
|
||||
except json.JSONDecodeError:
|
||||
logging.warning(f"Invalid JSON in metadata file: {metadata_path}")
|
||||
all_metadata_dict[dirpath] = {}
|
||||
|
||||
with h5py.File(path_to_output_file, mode=mode, track_order=True) as h5file:
|
||||
print('Created file')
|
||||
|
||||
@ -335,14 +309,7 @@ def create_hdf5_file_from_filesystem_path_new(path_to_input_directory: str,
|
||||
# stdout = inst
|
||||
# logging.error('Failed to create group %s into HDF5: %s', group_name, inst)
|
||||
|
||||
# Step 3: During ingestion, attach metadata per file
|
||||
# TODO: pass this metadata fict to run_file_reader line 363
|
||||
metadata_dict = all_metadata_dict.get(dirpath, {})
|
||||
|
||||
for filenumber, filename in enumerate(filtered_filenames_list):
|
||||
|
||||
if filename.endswith('metadata.json'):
|
||||
continue
|
||||
|
||||
#file_ext = os.path.splitext(filename)[1]
|
||||
#try:
|
||||
|
@ -1,7 +0,0 @@
|
||||
exclude_paths:
|
||||
containing :
|
||||
- .ipynb_checkpoints
|
||||
- .renku
|
||||
- .git
|
||||
# - params
|
||||
- .Trash
|
@ -1,18 +1,3 @@
|
||||
import sys
|
||||
import os
|
||||
|
||||
try:
|
||||
thisFilePath = os.path.abspath(__file__)
|
||||
except NameError:
|
||||
print("Error: __file__ is not available. Ensure the script is being run from a file.")
|
||||
print("[Notice] Path to DIMA package may not be resolved properly.")
|
||||
thisFilePath = os.getcwd() # Use current directory or specify a default
|
||||
|
||||
dimaPath = os.path.normpath(os.path.join(thisFilePath, "..",'..','..')) # Move up to project root
|
||||
|
||||
if dimaPath not in sys.path: # Avoid duplicate entries
|
||||
sys.path.insert(0,dimaPath)
|
||||
|
||||
import pandas as pd
|
||||
import os
|
||||
import sys
|
||||
@ -22,7 +7,7 @@ import logging
|
||||
import numpy as np
|
||||
import h5py
|
||||
import re
|
||||
import yaml
|
||||
|
||||
|
||||
def setup_logging(log_dir, log_filename):
|
||||
"""Sets up logging to a specified directory and file.
|
||||
@ -217,49 +202,43 @@ def convert_string_to_bytes(input_list: list):
|
||||
|
||||
def convert_attrdict_to_np_structured_array(attr_value: dict):
|
||||
"""
|
||||
Converts a dictionary of attributes into a NumPy structured array with byte-encoded fields.
|
||||
Handles UTF-8 encoding to avoid UnicodeEncodeError with non-ASCII characters.
|
||||
Converts a dictionary of attributes into a numpy structured array for HDF5
|
||||
compound type compatibility.
|
||||
|
||||
Each dictionary key is mapped to a field in the structured array, with the
|
||||
data type (S) determined by the longest string representation of the values.
|
||||
If the dictionary is empty, the function returns 'missing'.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
attr_value : dict
|
||||
Dictionary with scalar values (int, float, str).
|
||||
Dictionary containing the attributes to be converted. Example:
|
||||
attr_value = {
|
||||
'name': 'Temperature',
|
||||
'unit': 'Celsius',
|
||||
'value': 23.5,
|
||||
'timestamp': '2023-09-26 10:00'
|
||||
}
|
||||
|
||||
Returns
|
||||
-------
|
||||
new_attr_value : ndarray
|
||||
1-row structured array with fixed-size byte fields (dtype='S').
|
||||
new_attr_value : ndarray or str
|
||||
Numpy structured array with UTF-8 encoded fields. Returns 'missing' if
|
||||
the input dictionary is empty.
|
||||
"""
|
||||
if not isinstance(attr_value, dict):
|
||||
raise ValueError(f"Input must be a dictionary, got {type(attr_value)}")
|
||||
|
||||
if not attr_value:
|
||||
return np.array(['missing'], dtype=[('value', 'S16')]) # placeholder
|
||||
|
||||
dtype = []
|
||||
values_list = []
|
||||
|
||||
max_str_len = max(len(str(v)) for v in attr_value.values())
|
||||
byte_len = max_str_len * 4 # UTF-8 worst-case
|
||||
|
||||
for key, val in attr_value.items():
|
||||
if key == 'rename_as':
|
||||
continue
|
||||
if isinstance(val, (int, float, str)):
|
||||
dtype.append((key, f'S{byte_len}'))
|
||||
try:
|
||||
encoded_val = str(val).encode('utf-8') # explicit UTF-8
|
||||
values_list.append(encoded_val)
|
||||
except UnicodeEncodeError as e:
|
||||
logging.error(f"Failed to encode {key}={val}: {e}")
|
||||
raise
|
||||
else:
|
||||
logging.warning(f"Skipping unsupported type for key {key}: {type(val)}")
|
||||
|
||||
max_length = max(len(str(attr_value[key])) for key in attr_value.keys())
|
||||
for key in attr_value.keys():
|
||||
if key != 'rename_as':
|
||||
dtype.append((key, f'S{max_length}'))
|
||||
values_list.append(attr_value[key])
|
||||
if values_list:
|
||||
return np.array([tuple(values_list)], dtype=dtype)
|
||||
new_attr_value = np.array([tuple(values_list)], dtype=dtype)
|
||||
else:
|
||||
return np.array(['missing'], dtype=[('value', 'S16')])
|
||||
new_attr_value = 'missing'
|
||||
|
||||
return new_attr_value
|
||||
|
||||
|
||||
def infer_units(column_name):
|
||||
@ -313,19 +292,6 @@ def copy_directory_with_contraints(input_dir_path, output_dir_path,
|
||||
output_dir_path = os.path.normpath(output_dir_path)
|
||||
select_dir_keywords = [keyword.replace('/',os.sep) for keyword in select_dir_keywords]
|
||||
|
||||
try:
|
||||
with open(os.path.join(dimaPath, 'dima/utils/exclude_path_keywords.yaml'), 'r') as stream:
|
||||
exclude_path_dict = yaml.safe_load(stream)
|
||||
if isinstance(exclude_path_dict, dict):
|
||||
exclude_path_keywords = exclude_path_dict.get('exclude_paths',{}).get('containing', [])
|
||||
if not all(isinstance(keyword, str) for keyword in exclude_path_keywords):
|
||||
exclude_path_keywords = []
|
||||
else:
|
||||
exclude_path_keywords = []
|
||||
except (FileNotFoundError, yaml.YAMLError) as e:
|
||||
print(f"Warning. Unable to load YAML file: {e}")
|
||||
exclude_path_keywords = []
|
||||
|
||||
date = created_at('%Y_%m').replace(":", "-")
|
||||
log_dir='logs/'
|
||||
setup_logging(log_dir, f"copy_directory_with_contraints_{date}.log")
|
||||
@ -336,9 +302,8 @@ def copy_directory_with_contraints(input_dir_path, output_dir_path,
|
||||
|
||||
def file_is_selected(filename):
|
||||
return not select_file_keywords or any(keyword in filename for keyword in select_file_keywords)
|
||||
# Exclude path keywords
|
||||
|
||||
|
||||
|
||||
# Collect paths of directories, which are directly connected to the root dir and match select_dir_keywords
|
||||
paths = []
|
||||
if select_dir_keywords:
|
||||
@ -354,11 +319,7 @@ def copy_directory_with_contraints(input_dir_path, output_dir_path,
|
||||
for subpath in paths:
|
||||
|
||||
for dirpath, _, filenames in os.walk(subpath,topdown=False):
|
||||
|
||||
# Exclude any dirpath containing a keyword in exclude_path_keywords
|
||||
if any(excluded in dirpath for excluded in exclude_path_keywords):
|
||||
continue
|
||||
|
||||
|
||||
# Ensure composite keywords e.g., <keyword>/<keyword> are contained in the path
|
||||
if select_dir_keywords and not any([keyword in dirpath for keyword in select_dir_keywords]):
|
||||
continue
|
||||
@ -450,57 +411,4 @@ def is_structured_array(attr_val):
|
||||
if isinstance(attr_val,np.ndarray):
|
||||
return True if attr_val.dtype.names is not None else False
|
||||
else:
|
||||
return False
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
def find_env_file(start_path=None):
|
||||
"""
|
||||
Find .env file by walking up the directory tree.
|
||||
Looks for .env in current dir, then parent dirs up to filesystem root.
|
||||
|
||||
Args:
|
||||
start_path: Starting directory (defaults to current working directory)
|
||||
|
||||
Returns:
|
||||
Path to .env file or None if not found
|
||||
"""
|
||||
if start_path is None:
|
||||
start_path = os.getcwd()
|
||||
|
||||
current_path = Path(start_path).resolve()
|
||||
|
||||
# Walk up the directory tree
|
||||
for path in [current_path] + list(current_path.parents):
|
||||
env_file = path / '.env'
|
||||
if env_file.exists():
|
||||
return str(env_file)
|
||||
|
||||
return None
|
||||
|
||||
import os
|
||||
|
||||
def load_env_from_root():
|
||||
"""Load environment variables from .env file found in project root or parent."""
|
||||
env_file = find_env_file()
|
||||
|
||||
if env_file:
|
||||
try:
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv(env_file, override=True) # override existing values
|
||||
print(f"Loaded .env from: {env_file}")
|
||||
return True
|
||||
except ImportError:
|
||||
with open(env_file, 'r') as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if line and not line.startswith('#') and '=' in line:
|
||||
key, value = line.split('=', 1)
|
||||
os.environ[key.strip()] = value.strip()
|
||||
print(f"Manually loaded .env from: {env_file}")
|
||||
return True
|
||||
|
||||
else:
|
||||
print("No .env file found in project hierarchy")
|
||||
return False
|
Reference in New Issue
Block a user