mirror of
https://github.com/tiqi-group/pydase.git
synced 2025-12-21 05:31:18 +01:00
Compare commits
87 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cac74e90db | ||
|
|
c24d63f4c0 | ||
|
|
b0dd5835a3 | ||
|
|
b0c8af0108 | ||
|
|
c0016673a8 | ||
|
|
eadc1df763 | ||
|
|
922fdf8fd0 | ||
|
|
8b21c42ef7 | ||
|
|
2399b3ca9f | ||
|
|
db43f5dbbb | ||
|
|
f2c0a94904 | ||
|
|
c36cebf17c | ||
|
|
a96387b4d7 | ||
|
|
d1feff1a6a | ||
|
|
95df2f1650 | ||
|
|
0565c82448 | ||
|
|
755265bf53 | ||
|
|
4c7b386ab4 | ||
|
|
92b2326dfc | ||
|
|
9e18783a05 | ||
|
|
9be4aac988 | ||
|
|
f3d659670f | ||
|
|
23f051d6f1 | ||
|
|
c8979ab2e6 | ||
|
|
bd33252775 | ||
|
|
1fbcbc72bf | ||
|
|
9a8628cfbd | ||
|
|
3d13b20fda | ||
|
|
f2183ec3e4 | ||
|
|
360aeb5574 | ||
|
|
e85e93a1d9 | ||
|
|
ea5fd42919 | ||
|
|
247113f1db | ||
|
|
c76b0b0b6e | ||
|
|
2d39c56e3d | ||
|
|
60287fef95 | ||
|
|
c5e1a08c54 | ||
|
|
9424d4c412 | ||
|
|
0a4c13c617 | ||
|
|
5d72604199 | ||
|
|
3479c511fe | ||
|
|
9bf3b28390 | ||
|
|
0195f9d6f6 | ||
|
|
197268255b | ||
|
|
3698cb7f92 | ||
|
|
0625832457 | ||
|
|
f35bcf3be6 | ||
|
|
3fe77bb4e5 | ||
|
|
9b2d181f4a | ||
|
|
045334e51e | ||
|
|
1d8d17d715 | ||
|
|
4d84c9778f | ||
|
|
e3c144fa6e | ||
|
|
192075057f | ||
|
|
053050a62c | ||
|
|
aacc69ae94 | ||
|
|
de1483bdc5 | ||
|
|
b24db00eda | ||
|
|
36ee760610 | ||
|
|
3a67c07bad | ||
|
|
b9a91e5ee2 | ||
|
|
f83bc0073b | ||
|
|
c66b90c4e5 | ||
|
|
d0b0803407 | ||
|
|
e25511768d | ||
|
|
303de82318 | ||
|
|
db559e8ada | ||
|
|
1b35dba64f | ||
|
|
8a8ac9d297 | ||
|
|
40a8863ecd | ||
|
|
1dca04f693 | ||
|
|
2b520834dc | ||
|
|
d6bad37233 | ||
|
|
53a2a3303f | ||
|
|
4f206bbae9 | ||
|
|
090b8acd44 | ||
|
|
17b2ad32e5 | ||
|
|
3c99f3fe04 | ||
|
|
2bcc6b9660 | ||
|
|
c1ace54c78 | ||
|
|
56af2a423b | ||
|
|
eba0eb83e6 | ||
|
|
b7818c0d8a | ||
|
|
a0c3882f35 | ||
|
|
1d773ba09b | ||
|
|
10f1b8691c | ||
|
|
a99db6f053 |
5
.github/ISSUE_TEMPLATE/bug_report.md
vendored
5
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -18,7 +18,10 @@ Provide steps to reproduce the behaviour, including a minimal code snippet (if a
|
|||||||
## Expected behaviour
|
## Expected behaviour
|
||||||
A clear and concise description of what you expected to happen.
|
A clear and concise description of what you expected to happen.
|
||||||
|
|
||||||
## Screenshot/Video
|
## Actual behaviour
|
||||||
|
Describe what you see instead of the expected behaviour.
|
||||||
|
|
||||||
|
### Screenshot/Video
|
||||||
If applicable, add visual content that helps explain your problem.
|
If applicable, add visual content that helps explain your problem.
|
||||||
|
|
||||||
## Additional context
|
## Additional context
|
||||||
|
|||||||
6
.github/workflows/publish-to-pypi.yaml
vendored
6
.github/workflows/publish-to-pypi.yaml
vendored
@@ -22,7 +22,7 @@ jobs:
|
|||||||
- name: Build a binary wheel and a source tarball
|
- name: Build a binary wheel and a source tarball
|
||||||
run: python3 -m build
|
run: python3 -m build
|
||||||
- name: Store the distribution packages
|
- name: Store the distribution packages
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: python-package-distributions
|
name: python-package-distributions
|
||||||
path: dist/
|
path: dist/
|
||||||
@@ -44,7 +44,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Download all the dists
|
- name: Download all the dists
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: python-package-distributions
|
name: python-package-distributions
|
||||||
path: dist/
|
path: dist/
|
||||||
@@ -65,7 +65,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Download all the dists
|
- name: Download all the dists
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: python-package-distributions
|
name: python-package-distributions
|
||||||
path: dist/
|
path: dist/
|
||||||
|
|||||||
41
README.md
41
README.md
@@ -226,44 +226,15 @@ For details, please see [here](https://pydase.readthedocs.io/en/stable/user-guid
|
|||||||
|
|
||||||
## Logging in pydase
|
## Logging in pydase
|
||||||
|
|
||||||
The `pydase` library organizes its loggers on a per-module basis, mirroring the Python package hierarchy. This structured approach allows for granular control over logging levels and behaviour across different parts of the library.
|
The `pydase` library provides structured, per-module logging with support for log level configuration, rich formatting, and optional client identification in logs.
|
||||||
|
|
||||||
### Changing the Log Level
|
To configure logging in your own service, you can use:
|
||||||
|
|
||||||
You have two primary ways to adjust the log levels in `pydase`:
|
```python
|
||||||
|
from pydase.utils.logging import configure_logging_with_pydase_formatter
|
||||||
|
```
|
||||||
|
|
||||||
1. directly targeting `pydase` loggers
|
For more information, see the [full guide](https://pydase.readthedocs.io/en/stable/user-guide/Logging/).
|
||||||
|
|
||||||
You can set the log level for any `pydase` logger directly in your code. This method is useful for fine-tuning logging levels for specific modules within `pydase`. For instance, if you want to change the log level of the main `pydase` logger or target a submodule like `pydase.data_service`, you can do so as follows:
|
|
||||||
|
|
||||||
```python
|
|
||||||
# <your_script.py>
|
|
||||||
import logging
|
|
||||||
|
|
||||||
# Set the log level for the main pydase logger
|
|
||||||
logging.getLogger("pydase").setLevel(logging.INFO)
|
|
||||||
|
|
||||||
# Optionally, target a specific submodule logger
|
|
||||||
# logging.getLogger("pydase.data_service").setLevel(logging.DEBUG)
|
|
||||||
|
|
||||||
# Your logger for the current script
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
logger.info("My info message.")
|
|
||||||
```
|
|
||||||
|
|
||||||
This approach allows for specific control over different parts of the `pydase` library, depending on your logging needs.
|
|
||||||
|
|
||||||
2. using the `ENVIRONMENT` environment variable
|
|
||||||
|
|
||||||
For a more global setting that affects the entire `pydase` library, you can utilize the `ENVIRONMENT` environment variable. Setting this variable to "production" will configure all `pydase` loggers to only log messages of level "INFO" and above, filtering out more verbose logging. This is particularly useful for production environments where excessive logging can be overwhelming or unnecessary.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
ENVIRONMENT="production" python -m <module_using_pydase>
|
|
||||||
```
|
|
||||||
|
|
||||||
In the absence of this setting, the default behavior is to log everything of level "DEBUG" and above, suitable for development environments where more detailed logs are beneficial.
|
|
||||||
|
|
||||||
**Note**: It is recommended to avoid calling the `pydase.utils.logging.setup_logging` function directly, as this may result in duplicated logging messages.
|
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,15 @@
|
|||||||
::: pydase.data_service
|
::: pydase.data_service
|
||||||
handler: python
|
handler: python
|
||||||
|
|
||||||
|
::: pydase.data_service.data_service_cache
|
||||||
|
handler: python
|
||||||
|
|
||||||
|
::: pydase.data_service.data_service_observer
|
||||||
|
handler: python
|
||||||
|
|
||||||
|
::: pydase.data_service.state_manager
|
||||||
|
handler: python
|
||||||
|
|
||||||
::: pydase.server.server
|
::: pydase.server.server
|
||||||
handler: python
|
handler: python
|
||||||
|
|
||||||
@@ -38,6 +47,9 @@
|
|||||||
options:
|
options:
|
||||||
filters: ["!render_in_frontend"]
|
filters: ["!render_in_frontend"]
|
||||||
|
|
||||||
|
::: pydase.utils.logging
|
||||||
|
handler: python
|
||||||
|
|
||||||
::: pydase.units
|
::: pydase.units
|
||||||
handler: python
|
handler: python
|
||||||
|
|
||||||
|
|||||||
91
docs/user-guide/Logging.md
Normal file
91
docs/user-guide/Logging.md
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
# Logging in pydase
|
||||||
|
|
||||||
|
The `pydase` library organizes its loggers per module, mirroring the Python package hierarchy. This structured approach allows for granular control over logging levels and behaviour across different parts of the library. Logs can also include details about client identification based on headers sent by the client or proxy, providing additional context for debugging or auditing.
|
||||||
|
|
||||||
|
## Changing the pydase Log Level
|
||||||
|
|
||||||
|
You have two primary ways to adjust the log levels in `pydase`:
|
||||||
|
|
||||||
|
1. **Directly targeting `pydase` loggers**
|
||||||
|
|
||||||
|
You can set the log level for any `pydase` logger directly in your code. This method is useful for fine-tuning logging levels for specific modules within `pydase`. For instance, if you want to change the log level of the main `pydase` logger or target a submodule like `pydase.data_service`, you can do so as follows:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# <your_script.py>
|
||||||
|
import logging
|
||||||
|
|
||||||
|
# Set the log level for the main pydase logger
|
||||||
|
logging.getLogger("pydase").setLevel(logging.INFO)
|
||||||
|
|
||||||
|
# Optionally, target a specific submodule logger
|
||||||
|
# logging.getLogger("pydase.data_service").setLevel(logging.DEBUG)
|
||||||
|
|
||||||
|
# Your logger for the current script
|
||||||
|
from pydase.utils.logging import configure_logging_with_pydase_formatter
|
||||||
|
configure_logging_with_pydase_formatter(level=logging.DEBUG)
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
logger.debug("My debug message.")
|
||||||
|
```
|
||||||
|
|
||||||
|
This approach allows for specific control over different parts of the `pydase` library, depending on your logging needs.
|
||||||
|
|
||||||
|
2. **Using the `ENVIRONMENT` environment variable**
|
||||||
|
|
||||||
|
For a more global setting that affects the entire `pydase` library, you can utilize the `ENVIRONMENT` environment variable. Setting this variable to `"production"` will configure all `pydase` loggers to only log messages of level `"INFO"` and above, filtering out more verbose logging. This is particularly useful for production environments where excessive logging can be overwhelming or unnecessary.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ENVIRONMENT="production" python -m <module_using_pydase>
|
||||||
|
```
|
||||||
|
|
||||||
|
In the absence of this setting, the default behavior is to log everything of level `"DEBUG"` and above, suitable for development environments where more detailed logs are beneficial.
|
||||||
|
|
||||||
|
## Client Identification in pydase Logs
|
||||||
|
|
||||||
|
The logging system in `pydase` includes information about clients based on headers sent by the client or a proxy. The priority for identifying the client is fixed and as follows:
|
||||||
|
|
||||||
|
1. **`Remote-User` Header**: This header is typically set by authentication servers like [Authelia](https://www.authelia.com/). While it can be set manually by users, its primary purpose is to provide client information authenticated through such servers.
|
||||||
|
2. **`X-Client-ID` Header**: This header is intended for use by Python clients to pass custom client identification information. It acts as a fallback when the `Remote-User` header is not available.
|
||||||
|
3. **Default Socket.IO Session ID**: If neither of the above headers is present, the system falls back to the default Socket.IO session ID to identify the client.
|
||||||
|
|
||||||
|
For example, a log entries might include the following details based on the available headers:
|
||||||
|
|
||||||
|
```plaintext
|
||||||
|
2025-01-20 06:47:50.940 | INFO | pydase.server.web_server.api.v1.application:_get_value:36 - Client [id=This is me!] is getting the value of 'property_attr'
|
||||||
|
|
||||||
|
2025-01-20 06:48:13.710 | INFO | pydase.server.web_server.api.v1.application:_get_value:36 - Client [user=Max Muster] is getting the value of 'property_attr'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuring Logging in Services
|
||||||
|
|
||||||
|
To configure logging in services built with `pydase`, use the helper function [`configure_logging_with_pydase_formatter`][pydase.utils.logging.configure_logging_with_pydase_formatter]. This function sets up a logger with the same formatting used internally by `pydase`, so your service logs match the style and structure of `pydase` logs.
|
||||||
|
|
||||||
|
### Example
|
||||||
|
|
||||||
|
If your service follows a typical layout like:
|
||||||
|
|
||||||
|
```text
|
||||||
|
└── src
|
||||||
|
└── my_service
|
||||||
|
├── __init__.py
|
||||||
|
└── ...
|
||||||
|
```
|
||||||
|
|
||||||
|
you should call `configure_logging_with_pydase_formatter` inside `src/my_service/__init__.py`. This ensures the logger is configured as soon as your service is imported, and before any log messages are emitted.
|
||||||
|
|
||||||
|
```python title="src/my_service/__init__.py"
|
||||||
|
import sys
|
||||||
|
from pydase.utils.logging import configure_logging_with_pydase_formatter
|
||||||
|
|
||||||
|
configure_logging_with_pydase_formatter(
|
||||||
|
name="my_service", # Use the package/module name or None for the root logger
|
||||||
|
level=logging.DEBUG, # Set the desired logging level (defaults to INFO)
|
||||||
|
stream=sys.stderr # Optional: set the output stream (stderr by default)
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Notes
|
||||||
|
|
||||||
|
- If you pass `name=None`, the root logger will be configured. This affects **all logs** that propagate to the root logger.
|
||||||
|
- Passing a specific `name` like `"my_service"` allows you to scope the configuration to your service only, which is safer in multi-library environments.
|
||||||
|
- You can use `sys.stdout` instead of `sys.stderr` if your logs are being captured or processed differently (e.g., in containers or logging systems).
|
||||||
@@ -2,29 +2,47 @@
|
|||||||
|
|
||||||
`pydase` allows you to easily persist the state of your service by saving it to a file. This is especially useful when you want to maintain the service's state across different runs.
|
`pydase` allows you to easily persist the state of your service by saving it to a file. This is especially useful when you want to maintain the service's state across different runs.
|
||||||
|
|
||||||
To save the state of your service, pass a `filename` keyword argument to the constructor of the `pydase.Server` class. If the file specified by `filename` does not exist, the state manager will create this file and store its state in it when the service is shut down. If the file already exists, the state manager will load the state from this file, setting the values of its attributes to the values stored in the file.
|
To enable persistence, pass a `filename` keyword argument to the constructor of the [`pydase.Server`][pydase.Server] class. The `filename` specifies the file where the state will be saved:
|
||||||
|
|
||||||
Here's an example:
|
- If the file **does not exist**, it will be created and populated with the current state when the service shuts down or saves.
|
||||||
|
- If the file **already exists**, the state manager will **load** the saved values into the service at startup.
|
||||||
|
|
||||||
|
Here’s an example:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import pydase
|
import pydase
|
||||||
|
|
||||||
class Device(pydase.DataService):
|
class Device(pydase.DataService):
|
||||||
# ... defining the Device class ...
|
# ... define your service class ...
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
service = Device()
|
service = Device()
|
||||||
pydase.Server(service=service, filename="device_state.json").run()
|
pydase.Server(service=service, filename="device_state.json").run()
|
||||||
```
|
```
|
||||||
|
|
||||||
In this example, the state of the `Device` service will be saved to `device_state.json` when the service is shut down. If `device_state.json` exists when the server is started, the state manager will restore the state of the service from this file.
|
In this example, the service state will be automatically loaded from `device_state.json` at startup (if it exists), and saved to the same file periodically and upon shutdown.
|
||||||
|
|
||||||
|
## Automatic Periodic State Saving
|
||||||
|
|
||||||
|
When a `filename` is provided, `pydase` automatically enables **periodic autosaving** of the service state to that file. This ensures that the current state is regularly persisted, reducing the risk of data loss during unexpected shutdowns.
|
||||||
|
|
||||||
|
The autosave happens every 30 seconds by default. You can customize the interval using the `autosave_interval` argument (in seconds):
|
||||||
|
|
||||||
|
```python
|
||||||
|
pydase.Server(
|
||||||
|
service=service,
|
||||||
|
filename="device_state.json",
|
||||||
|
autosave_interval=10.0, # save every 10 seconds
|
||||||
|
).run()
|
||||||
|
```
|
||||||
|
|
||||||
|
To disable automatic saving, set `autosave_interval` to `None`.
|
||||||
|
|
||||||
## Controlling Property State Loading with `@load_state`
|
## Controlling Property State Loading with `@load_state`
|
||||||
|
|
||||||
By default, the state manager only restores values for public attributes of your service. If you have properties that you want to control the loading for, you can use the `@load_state` decorator on your property setters. This indicates to the state manager that the value of the property should be loaded from the state file.
|
By default, the state manager only restores values for public attributes of your service (i.e. *it does not restore property values*). If you have properties that you want to control the loading for, you can use the [`@load_state`][pydase.data_service.state_manager.load_state] decorator on your property setters. This indicates to the state manager that the value of the property should be loaded from the state file.
|
||||||
|
|
||||||
Here is how you can apply the `@load_state` decorator:
|
Example:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import pydase
|
import pydase
|
||||||
@@ -43,7 +61,6 @@ class Device(pydase.DataService):
|
|||||||
self._name = value
|
self._name = value
|
||||||
```
|
```
|
||||||
|
|
||||||
With the `@load_state` decorator applied to the `name` property setter, the state manager will load and apply the `name` property's value from the file storing the state upon server startup, assuming it exists.
|
With the `@load_state` decorator applied to the `name` property setter, the state manager will load and apply the `name` property's value from the file upon server startup.
|
||||||
|
|
||||||
Note: If the service class structure has changed since the last time its state was saved, only the attributes and properties decorated with `@load_state` that have remained the same will be restored from the settings file.
|
|
||||||
|
|
||||||
|
**Note**: If the structure of your service class changes between saves, only properties decorated with `@load_state` and unchanged public attributes will be restored safely.
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
# Understanding Tasks
|
# Understanding Tasks
|
||||||
|
|
||||||
In `pydase`, a task is defined as an asynchronous function without arguments that is decorated with the `@task` decorator and contained in a class that inherits from `pydase.DataService`. These tasks usually contain a while loop and are designed to carry out periodic functions. For example, a task might be used to periodically read sensor data, update a database, or perform any other recurring job.
|
In `pydase`, a task is defined as an asynchronous function without arguments that is decorated with the [`@task`][pydase.task.decorator.task] decorator and contained in a class that inherits from [`pydase.DataService`][pydase.DataService]. These tasks usually contain a while loop and are designed to carry out periodic functions. For example, a task might be used to periodically read sensor data, update a database, or perform any other recurring job.
|
||||||
|
|
||||||
`pydase` allows you to control task execution via both the frontend and Python clients and can automatically start tasks upon initialization of the service. By using the `@task` decorator with the `autostart=True` argument in your service class, `pydase` will automatically start these tasks when the server is started. Here's an example:
|
`pydase` allows you to control task execution via both the frontend and Python clients and can automatically start tasks upon initialization of the service. By using the [`@task`][pydase.task.decorator.task] decorator with the `autostart=True` argument in your service class, `pydase` will automatically start these tasks when the server is started. Here's an example:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import pydase
|
import pydase
|
||||||
@@ -35,4 +35,48 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
In this example, `read_sensor_data` is a task that continuously reads data from a sensor. By decorating it with `@task(autostart=True)`, it will automatically start running when `pydase.Server(service).run()` is executed.
|
In this example, `read_sensor_data` is a task that continuously reads data from a sensor. By decorating it with `@task(autostart=True)`, it will automatically start running when `pydase.Server(service).run()` is executed.
|
||||||
|
|
||||||
The `@task` decorator replaces the function with a task object that has `start()` and `stop()` methods. This means you can control the task execution directly using these methods. For instance, you can manually start or stop the task by calling `service.read_sensor_data.start()` and `service.read_sensor_data.stop()`, respectively.
|
## Task Lifecycle Control
|
||||||
|
|
||||||
|
The [`@task`][pydase.task.decorator.task] decorator replaces the function with a task object that has `start()` and `stop()` methods. This means you can control the task execution directly using these methods. For instance, you can manually start or stop the task by calling `service.read_sensor_data.start()` and `service.read_sensor_data.stop()`, respectively.
|
||||||
|
|
||||||
|
## Advanced Task Options
|
||||||
|
|
||||||
|
The [`@task`][pydase.task.decorator.task] decorator supports several options inspired by systemd unit services, allowing fine-grained control over task behavior:
|
||||||
|
|
||||||
|
- **`autostart`**: Automatically starts the task when the service initializes. Defaults to `False`.
|
||||||
|
- **`restart_on_exception`**: Configures whether the task should restart if it exits due to an exception (other than `asyncio.CancelledError`). Defaults to `True`.
|
||||||
|
- **`restart_sec`**: Specifies the delay (in seconds) before restarting a failed task. Defaults to `1.0`.
|
||||||
|
- **`start_limit_interval_sec`**: Configures a time window (in seconds) for rate limiting task restarts. If the task restarts more than `start_limit_burst` times within this interval, it will no longer restart. Defaults to `None` (disabled).
|
||||||
|
- **`start_limit_burst`**: Defines the maximum number of restarts allowed within the interval specified by `start_limit_interval_sec`. Defaults to `3`.
|
||||||
|
- **`exit_on_failure`**: If set to `True`, the service will exit if the task fails and either `restart_on_exception` is `False` or the start rate limiting is exceeded. Defaults to `False`.
|
||||||
|
|
||||||
|
### Example with Advanced Options
|
||||||
|
|
||||||
|
Here is an example showcasing advanced task options:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import pydase
|
||||||
|
from pydase.task.decorator import task
|
||||||
|
|
||||||
|
|
||||||
|
class AdvancedTaskService(pydase.DataService):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
@task(
|
||||||
|
autostart=True,
|
||||||
|
restart_on_exception=True,
|
||||||
|
restart_sec=2.0,
|
||||||
|
start_limit_interval_sec=10.0,
|
||||||
|
start_limit_burst=5,
|
||||||
|
exit_on_failure=True,
|
||||||
|
)
|
||||||
|
async def critical_task(self):
|
||||||
|
while True:
|
||||||
|
raise Exception("Critical failure")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
service = AdvancedTaskService()
|
||||||
|
pydase.Server(service=service).run()
|
||||||
|
```
|
||||||
|
|||||||
@@ -50,12 +50,14 @@ import pydase
|
|||||||
class MyService(pydase.DataService):
|
class MyService(pydase.DataService):
|
||||||
proxy = pydase.Client(
|
proxy = pydase.Client(
|
||||||
url="ws://<ip_addr>:<service_port>",
|
url="ws://<ip_addr>:<service_port>",
|
||||||
block_until_connected=False
|
block_until_connected=False,
|
||||||
|
client_id="my_pydase_client_id",
|
||||||
).proxy
|
).proxy
|
||||||
# For SSL-encrypted services, use the wss protocol
|
# For SSL-encrypted services, use the wss protocol
|
||||||
# proxy = pydase.Client(
|
# proxy = pydase.Client(
|
||||||
# url="wss://your-domain.ch",
|
# url="wss://your-domain.ch",
|
||||||
# block_until_connected=False
|
# block_until_connected=False,
|
||||||
|
# client_id="my_pydase_client_id",
|
||||||
# ).proxy
|
# ).proxy
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
@@ -67,6 +69,7 @@ if __name__ == "__main__":
|
|||||||
In this example:
|
In this example:
|
||||||
- The `MyService` class has a `proxy` attribute that connects to a `pydase` service at `<ip_addr>:<service_port>`.
|
- The `MyService` class has a `proxy` attribute that connects to a `pydase` service at `<ip_addr>:<service_port>`.
|
||||||
- By setting `block_until_connected=False`, the service can start without waiting for the connection to succeed, which is particularly useful in distributed systems where services may initialize in any order.
|
- By setting `block_until_connected=False`, the service can start without waiting for the connection to succeed, which is particularly useful in distributed systems where services may initialize in any order.
|
||||||
|
- By setting `client_id`, the server will provide more accurate logs of the connecting client. If set, this ID is sent as `X-Client-Id` header in the HTTP(s) request.
|
||||||
|
|
||||||
## Custom `socketio.AsyncClient` Connection Parameters
|
## Custom `socketio.AsyncClient` Connection Parameters
|
||||||
|
|
||||||
|
|||||||
@@ -13,7 +13,7 @@
|
|||||||
// this will be set by the python backend if the service is behind a proxy which strips a prefix. The frontend can use this to build the paths to the resources.
|
// this will be set by the python backend if the service is behind a proxy which strips a prefix. The frontend can use this to build the paths to the resources.
|
||||||
window.__FORWARDED_PREFIX__ = "";
|
window.__FORWARDED_PREFIX__ = "";
|
||||||
window.__FORWARDED_PROTO__ = "";
|
window.__FORWARDED_PROTO__ = "";
|
||||||
</script>`
|
</script>
|
||||||
|
|
||||||
<body>
|
<body>
|
||||||
<noscript>You need to enable JavaScript to run this app.</noscript>
|
<noscript>You need to enable JavaScript to run this app.</noscript>
|
||||||
|
|||||||
3070
frontend/package-lock.json
generated
3070
frontend/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -10,31 +10,31 @@
|
|||||||
"preview": "vite preview"
|
"preview": "vite preview"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@emotion/styled": "^11.11.0",
|
"@emotion/styled": "^11.14.0",
|
||||||
"@mui/material": "^5.14.1",
|
"@mui/material": "^5.16.14",
|
||||||
"bootstrap": "^5.3.3",
|
"bootstrap": "^5.3.3",
|
||||||
"deep-equal": "^2.2.3",
|
"deep-equal": "^2.2.3",
|
||||||
"react": "^18.3.1",
|
"react": "^19.0.0",
|
||||||
"react-bootstrap": "^2.10.0",
|
"react-bootstrap": "^2.10.7",
|
||||||
"react-bootstrap-icons": "^1.11.4",
|
"react-bootstrap-icons": "^1.11.5",
|
||||||
"socket.io-client": "^4.7.1"
|
"socket.io-client": "^4.8.1"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@eslint/js": "^9.6.0",
|
"@eslint/js": "^9.18.0",
|
||||||
"@types/deep-equal": "^1.0.4",
|
"@types/deep-equal": "^1.0.4",
|
||||||
"@types/eslint__js": "^8.42.3",
|
"@types/eslint__js": "^8.42.3",
|
||||||
"@types/node": "^20.14.10",
|
"@types/node": "^20.17.14",
|
||||||
"@types/react": "^18.3.3",
|
"@types/react": "^19.0.7",
|
||||||
"@types/react-dom": "^18.3.0",
|
"@types/react-dom": "^19.0.3",
|
||||||
"@typescript-eslint/eslint-plugin": "^7.15.0",
|
"@typescript-eslint/eslint-plugin": "^7.15.0",
|
||||||
"@vitejs/plugin-react-swc": "^3.5.0",
|
"@vitejs/plugin-react-swc": "^3.7.2",
|
||||||
"eslint": "^8.57.0",
|
"eslint": "^8.57.1",
|
||||||
"eslint-config-prettier": "^9.1.0",
|
"eslint-config-prettier": "^9.1.0",
|
||||||
"eslint-plugin-prettier": "^5.1.3",
|
"eslint-plugin-prettier": "^5.2.3",
|
||||||
"eslint-plugin-react": "^7.34.3",
|
"eslint-plugin-react": "^7.37.4",
|
||||||
"prettier": "3.3.2",
|
"prettier": "3.3.2",
|
||||||
"typescript": "^5.5.3",
|
"typescript": "^5.7.3",
|
||||||
"typescript-eslint": "^7.15.0",
|
"typescript-eslint": "^7.18.0",
|
||||||
"vite": "^5.3.1"
|
"vite": "^5.4.12"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -199,16 +199,8 @@ export const NumberComponent = React.memo((props: NumberComponentProps) => {
|
|||||||
const handleKeyDown = (event: React.KeyboardEvent<HTMLInputElement>) => {
|
const handleKeyDown = (event: React.KeyboardEvent<HTMLInputElement>) => {
|
||||||
const { key, target } = event;
|
const { key, target } = event;
|
||||||
|
|
||||||
// Typecast
|
|
||||||
const inputTarget = target as HTMLInputElement;
|
const inputTarget = target as HTMLInputElement;
|
||||||
if (
|
if (key === "F1" || key === "F5" || key === "F12" || key === "Tab") {
|
||||||
key === "F1" ||
|
|
||||||
key === "F5" ||
|
|
||||||
key === "F12" ||
|
|
||||||
key === "Tab" ||
|
|
||||||
key === "ArrowRight" ||
|
|
||||||
key === "ArrowLeft"
|
|
||||||
) {
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
event.preventDefault();
|
event.preventDefault();
|
||||||
@@ -223,6 +215,11 @@ export const NumberComponent = React.memo((props: NumberComponentProps) => {
|
|||||||
// Select everything when pressing Ctrl + a
|
// Select everything when pressing Ctrl + a
|
||||||
inputTarget.setSelectionRange(0, value.length);
|
inputTarget.setSelectionRange(0, value.length);
|
||||||
return;
|
return;
|
||||||
|
} else if (key === "ArrowRight" || key === "ArrowLeft") {
|
||||||
|
// Move the cursor with the arrow keys and store its position
|
||||||
|
selectionStart = key === "ArrowRight" ? selectionStart + 1 : selectionStart - 1;
|
||||||
|
setCursorPosition(selectionStart);
|
||||||
|
return;
|
||||||
} else if ((key >= "0" && key <= "9") || key === "-") {
|
} else if ((key >= "0" && key <= "9") || key === "-") {
|
||||||
// Check if a number key or a decimal point key is pressed
|
// Check if a number key or a decimal point key is pressed
|
||||||
({ value: newValue, selectionStart } = handleNumericKey(
|
({ value: newValue, selectionStart } = handleNumericKey(
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ nav:
|
|||||||
- Understanding Units: user-guide/Understanding-Units.md
|
- Understanding Units: user-guide/Understanding-Units.md
|
||||||
- Validating Property Setters: user-guide/Validating-Property-Setters.md
|
- Validating Property Setters: user-guide/Validating-Property-Setters.md
|
||||||
- Configuring pydase: user-guide/Configuration.md
|
- Configuring pydase: user-guide/Configuration.md
|
||||||
|
- Logging in pydase: user-guide/Logging.md
|
||||||
- Advanced:
|
- Advanced:
|
||||||
- Deploying behind a Reverse Proxy: user-guide/advanced/Reverse-Proxy.md
|
- Deploying behind a Reverse Proxy: user-guide/advanced/Reverse-Proxy.md
|
||||||
- Developer Guide:
|
- Developer Guide:
|
||||||
@@ -54,7 +55,7 @@ plugins:
|
|||||||
handlers:
|
handlers:
|
||||||
python:
|
python:
|
||||||
paths: [src] # search packages in the src folder
|
paths: [src] # search packages in the src folder
|
||||||
import:
|
inventories:
|
||||||
- https://docs.python.org/3/objects.inv
|
- https://docs.python.org/3/objects.inv
|
||||||
- https://docs.pydantic.dev/latest/objects.inv
|
- https://docs.pydantic.dev/latest/objects.inv
|
||||||
- https://confz.readthedocs.io/en/latest/objects.inv
|
- https://confz.readthedocs.io/en/latest/objects.inv
|
||||||
|
|||||||
3130
poetry.lock
generated
3130
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
|||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "pydase"
|
name = "pydase"
|
||||||
version = "0.10.7"
|
version = "0.10.11"
|
||||||
description = "A flexible and robust Python library for creating, managing, and interacting with data services, with built-in support for web and RPC servers, and customizable features for diverse use cases."
|
description = "A flexible and robust Python library for creating, managing, and interacting with data services, with built-in support for web and RPC servers, and customizable features for diverse use cases."
|
||||||
authors = ["Mose Mueller <mosmuell@ethz.ch>"]
|
authors = ["Mose Mueller <mosmuell@ethz.ch>"]
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
@@ -39,9 +39,9 @@ optional = true
|
|||||||
[tool.poetry.group.docs.dependencies]
|
[tool.poetry.group.docs.dependencies]
|
||||||
mkdocs-material = "^9.5.30"
|
mkdocs-material = "^9.5.30"
|
||||||
mkdocs-include-markdown-plugin = "^3.9.1"
|
mkdocs-include-markdown-plugin = "^3.9.1"
|
||||||
mkdocstrings = {extras = ["python"], version = "^0.25.2"}
|
mkdocstrings = {extras = ["python"], version = "^0.29.0"}
|
||||||
pymdown-extensions = "^10.1"
|
pymdown-extensions = "^10.1"
|
||||||
mkdocs-swagger-ui-tag = "^0.6.10"
|
mkdocs-swagger-ui-tag = "^0.7.0"
|
||||||
|
|
||||||
[build-system]
|
[build-system]
|
||||||
requires = ["poetry-core"]
|
requires = ["poetry-core"]
|
||||||
|
|||||||
@@ -56,6 +56,9 @@ class Client:
|
|||||||
[`AsyncClient`][socketio.AsyncClient]. This allows fine-tuning of the
|
[`AsyncClient`][socketio.AsyncClient]. This allows fine-tuning of the
|
||||||
client's behaviour (e.g., reconnection attempts or reconnection delay).
|
client's behaviour (e.g., reconnection attempts or reconnection delay).
|
||||||
Default is an empty dictionary.
|
Default is an empty dictionary.
|
||||||
|
client_id: Client identification that will be shown in the server logs this
|
||||||
|
client is connecting to. This ID is passed as a `X-Client-Id` header in the
|
||||||
|
HTTP(s) request. Defaults to None.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
The following example demonstrates a `Client` instance that connects to another
|
The following example demonstrates a `Client` instance that connects to another
|
||||||
@@ -84,6 +87,7 @@ class Client:
|
|||||||
url: str,
|
url: str,
|
||||||
block_until_connected: bool = True,
|
block_until_connected: bool = True,
|
||||||
sio_client_kwargs: dict[str, Any] = {},
|
sio_client_kwargs: dict[str, Any] = {},
|
||||||
|
client_id: str | None = None,
|
||||||
):
|
):
|
||||||
# Parse the URL to separate base URL and path prefix
|
# Parse the URL to separate base URL and path prefix
|
||||||
parsed_url = urllib.parse.urlparse(url)
|
parsed_url = urllib.parse.urlparse(url)
|
||||||
@@ -98,6 +102,7 @@ class Client:
|
|||||||
self._url = url
|
self._url = url
|
||||||
self._sio = socketio.AsyncClient(**sio_client_kwargs)
|
self._sio = socketio.AsyncClient(**sio_client_kwargs)
|
||||||
self._loop = asyncio.new_event_loop()
|
self._loop = asyncio.new_event_loop()
|
||||||
|
self._client_id = client_id
|
||||||
self.proxy = ProxyClass(
|
self.proxy = ProxyClass(
|
||||||
sio_client=self._sio, loop=self._loop, reconnect=self.connect
|
sio_client=self._sio, loop=self._loop, reconnect=self.connect
|
||||||
)
|
)
|
||||||
@@ -136,8 +141,14 @@ class Client:
|
|||||||
async def _connect(self) -> None:
|
async def _connect(self) -> None:
|
||||||
logger.debug("Connecting to server '%s' ...", self._url)
|
logger.debug("Connecting to server '%s' ...", self._url)
|
||||||
await self._setup_events()
|
await self._setup_events()
|
||||||
|
|
||||||
|
headers = {}
|
||||||
|
if self._client_id is not None:
|
||||||
|
headers["X-Client-Id"] = self._client_id
|
||||||
|
|
||||||
await self._sio.connect(
|
await self._sio.connect(
|
||||||
self._base_url,
|
url=self._base_url,
|
||||||
|
headers=headers,
|
||||||
socketio_path=f"{self._path_prefix}/ws/socket.io",
|
socketio_path=f"{self._path_prefix}/ws/socket.io",
|
||||||
transports=["websocket"],
|
transports=["websocket"],
|
||||||
retry=True,
|
retry=True,
|
||||||
|
|||||||
@@ -13,11 +13,11 @@ class NumberSlider(DataService):
|
|||||||
|
|
||||||
Args:
|
Args:
|
||||||
value:
|
value:
|
||||||
The initial value of the slider. Defaults to 0.
|
The initial value of the slider. Defaults to 0.0.
|
||||||
min_:
|
min_:
|
||||||
The minimum value of the slider. Defaults to 0.
|
The minimum value of the slider. Defaults to 0.0.
|
||||||
max_:
|
max_:
|
||||||
The maximum value of the slider. Defaults to 100.
|
The maximum value of the slider. Defaults to 100.0.
|
||||||
step_size:
|
step_size:
|
||||||
The increment/decrement step size of the slider. Defaults to 1.0.
|
The increment/decrement step size of the slider. Defaults to 1.0.
|
||||||
|
|
||||||
@@ -84,9 +84,9 @@ class NumberSlider(DataService):
|
|||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
value: Any = 0.0,
|
value: Any = 0.0,
|
||||||
min_: float = 0.0,
|
min_: Any = 0.0,
|
||||||
max_: float = 100.0,
|
max_: Any = 100.0,
|
||||||
step_size: float = 1.0,
|
step_size: Any = 1.0,
|
||||||
) -> None:
|
) -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self._step_size = step_size
|
self._step_size = step_size
|
||||||
@@ -95,17 +95,17 @@ class NumberSlider(DataService):
|
|||||||
self._max = max_
|
self._max = max_
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def min(self) -> float:
|
def min(self) -> Any:
|
||||||
"""The min property."""
|
"""The min property."""
|
||||||
return self._min
|
return self._min
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def max(self) -> float:
|
def max(self) -> Any:
|
||||||
"""The min property."""
|
"""The min property."""
|
||||||
return self._max
|
return self._max
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def step_size(self) -> float:
|
def step_size(self) -> Any:
|
||||||
"""The min property."""
|
"""The min property."""
|
||||||
return self._step_size
|
return self._step_size
|
||||||
|
|
||||||
|
|||||||
@@ -14,6 +14,22 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class DataServiceCache:
|
class DataServiceCache:
|
||||||
|
"""Maintains a serialized cache of the current state of a DataService instance.
|
||||||
|
|
||||||
|
This class is responsible for storing and updating a representation of the service's
|
||||||
|
public attributes and properties. It is primarily used by the StateManager and the
|
||||||
|
web server to serve consistent state to clients without accessing the DataService
|
||||||
|
attributes directly.
|
||||||
|
|
||||||
|
The cache is initialized once upon construction by serializing the full state of
|
||||||
|
the service. After that, it can be incrementally updated using attribute paths and
|
||||||
|
values as notified by the
|
||||||
|
[`DataServiceObserver`][pydase.data_service.data_service_observer.DataServiceObserver].
|
||||||
|
|
||||||
|
Args:
|
||||||
|
service: The DataService instance whose state should be cached.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self, service: "DataService") -> None:
|
def __init__(self, service: "DataService") -> None:
|
||||||
self._cache: SerializedObject
|
self._cache: SerializedObject
|
||||||
self.service = service
|
self.service = service
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ from pydase.observer_pattern.observer.property_observer import (
|
|||||||
)
|
)
|
||||||
from pydase.utils.helpers import (
|
from pydase.utils.helpers import (
|
||||||
get_object_attr_from_path,
|
get_object_attr_from_path,
|
||||||
normalize_full_access_path_string,
|
|
||||||
)
|
)
|
||||||
from pydase.utils.serialization.serializer import (
|
from pydase.utils.serialization.serializer import (
|
||||||
SerializationPathError,
|
SerializationPathError,
|
||||||
@@ -102,8 +101,7 @@ class DataServiceObserver(PropertyObserver):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def _notify_dependent_property_changes(self, changed_attr_path: str) -> None:
|
def _notify_dependent_property_changes(self, changed_attr_path: str) -> None:
|
||||||
normalized_attr_path = normalize_full_access_path_string(changed_attr_path)
|
changed_props = self.property_deps_dict.get(changed_attr_path, [])
|
||||||
changed_props = self.property_deps_dict.get(normalized_attr_path, [])
|
|
||||||
for prop in changed_props:
|
for prop in changed_props:
|
||||||
# only notify about changing attribute if it is not currently being
|
# only notify about changing attribute if it is not currently being
|
||||||
# "changed" e.g. when calling the getter of a property within another
|
# "changed" e.g. when calling the getter of a property within another
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
import asyncio
|
||||||
import contextlib
|
import contextlib
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
@@ -66,43 +67,41 @@ def has_load_state_decorator(prop: property) -> bool:
|
|||||||
class StateManager:
|
class StateManager:
|
||||||
"""
|
"""
|
||||||
Manages the state of a DataService instance, serving as both a cache and a
|
Manages the state of a DataService instance, serving as both a cache and a
|
||||||
persistence layer. It is designed to provide quick access to the latest known state
|
persistence layer. It provides fast access to the most recently known state of the
|
||||||
for newly connecting web clients without the need for expensive property accesses
|
service and ensures consistent state updates across connected clients and service
|
||||||
that may involve complex calculations or I/O operations.
|
restarts.
|
||||||
|
|
||||||
The StateManager listens for state change notifications from the DataService's
|
The StateManager is used by the web server to apply updates to service attributes
|
||||||
callback manager and updates its cache accordingly. This cache does not always
|
and to serve the current state to newly connected clients. Internally, it creates a
|
||||||
reflect the most current complex property states but rather retains the value from
|
[`DataServiceCache`][pydase.data_service.data_service_cache.DataServiceCache]
|
||||||
the last known state, optimizing for performance and reducing the load on the
|
instance to track the state of public attributes and properties.
|
||||||
system.
|
|
||||||
|
|
||||||
While the StateManager ensures that the cached state is as up-to-date as possible,
|
The StateManager also handles state persistence: it can load a previously saved
|
||||||
it does not autonomously update complex properties of the DataService. Such
|
state from disk at startup and periodically autosave the current state to a file
|
||||||
properties must be updated programmatically, for instance, by invoking specific
|
during runtime.
|
||||||
tasks or methods that trigger the necessary operations to refresh their state.
|
|
||||||
|
|
||||||
The cached state maintained by the StateManager is particularly useful for web
|
|
||||||
clients that connect to the system and need immediate access to the current state of
|
|
||||||
the DataService. By avoiding direct and potentially costly property accesses, the
|
|
||||||
StateManager provides a snapshot of the DataService's state that is sufficiently
|
|
||||||
accurate for initial rendering and interaction.
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
service:
|
service: The DataService instance whose state is being managed.
|
||||||
The DataService instance whose state is being managed.
|
filename: The file name used for loading and storing the DataService's state.
|
||||||
filename:
|
If provided, the state is loaded from this file at startup and saved to it
|
||||||
The file name used for storing the DataService's state.
|
on shutdown or at regular intervals.
|
||||||
|
autosave_interval: Interval in seconds between automatic state save events.
|
||||||
|
If set to `None`, automatic saving is disabled.
|
||||||
|
|
||||||
Note:
|
Note:
|
||||||
The StateManager's cache updates are triggered by notifications and do not
|
The StateManager does not autonomously poll hardware state. It relies on the
|
||||||
include autonomous updates of complex DataService properties, which must be
|
service to perform such updates. The cache maintained by
|
||||||
managed programmatically. The cache serves the purpose of providing immediate
|
[`DataServiceCache`][pydase.data_service.data_service_cache.DataServiceCache]
|
||||||
state information to web clients, reflecting the state after the last property
|
reflects the last known state as notified by the `DataServiceObserver`, and is
|
||||||
update.
|
used by the web interface to provide fast and accurate state rendering for
|
||||||
|
connected clients.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self, service: "DataService", filename: str | Path | None = None
|
self,
|
||||||
|
service: "DataService",
|
||||||
|
filename: str | Path | None = None,
|
||||||
|
autosave_interval: float | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
self.filename = getattr(service, "_filename", None)
|
self.filename = getattr(service, "_filename", None)
|
||||||
|
|
||||||
@@ -115,6 +114,29 @@ class StateManager:
|
|||||||
|
|
||||||
self.service = service
|
self.service = service
|
||||||
self.cache_manager = DataServiceCache(self.service)
|
self.cache_manager = DataServiceCache(self.service)
|
||||||
|
self.autosave_interval = autosave_interval
|
||||||
|
|
||||||
|
async def autosave(self) -> None:
|
||||||
|
"""Periodically saves the current service state to the configured file.
|
||||||
|
|
||||||
|
This coroutine is automatically started by the [`pydase.Server`][pydase.Server]
|
||||||
|
when a filename is provided. It runs in the background and writes the latest
|
||||||
|
known state of the service to disk every `autosave_interval` seconds.
|
||||||
|
|
||||||
|
If `autosave_interval` is set to `None`, autosaving is disabled and this
|
||||||
|
coroutine exits immediately.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if self.autosave_interval is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
if self.filename is not None:
|
||||||
|
self.save_state()
|
||||||
|
await asyncio.sleep(self.autosave_interval)
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception(e)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def cache_value(self) -> dict[str, SerializedObject]:
|
def cache_value(self) -> dict[str, SerializedObject]:
|
||||||
@@ -122,23 +144,21 @@ class StateManager:
|
|||||||
return cast(dict[str, SerializedObject], self.cache_manager.cache["value"])
|
return cast(dict[str, SerializedObject], self.cache_manager.cache["value"])
|
||||||
|
|
||||||
def save_state(self) -> None:
|
def save_state(self) -> None:
|
||||||
"""
|
"""Saves the DataService's current state to a JSON file defined by
|
||||||
Saves the DataService's current state to a JSON file defined by `self.filename`.
|
`self.filename`.
|
||||||
Logs an error if `self.filename` is not set.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if self.filename is not None:
|
if self.filename is not None:
|
||||||
with open(self.filename, "w") as f:
|
with open(self.filename, "w") as f:
|
||||||
json.dump(self.cache_value, f, indent=4)
|
json.dump(self.cache_value, f, indent=4)
|
||||||
else:
|
else:
|
||||||
logger.info(
|
logger.debug(
|
||||||
"State manager was not initialised with a filename. Skipping "
|
"State manager was not initialised with a filename. Skipping "
|
||||||
"'save_state'..."
|
"'save_state'..."
|
||||||
)
|
)
|
||||||
|
|
||||||
def load_state(self) -> None:
|
def load_state(self) -> None:
|
||||||
"""
|
"""Loads the DataService's state from a JSON file defined by `self.filename`.
|
||||||
Loads the DataService's state from a JSON file defined by `self.filename`.
|
|
||||||
Updates the service's attributes, respecting type and read-only constraints.
|
Updates the service's attributes, respecting type and read-only constraints.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@@ -191,8 +211,7 @@ class StateManager:
|
|||||||
path: str,
|
path: str,
|
||||||
serialized_value: SerializedObject,
|
serialized_value: SerializedObject,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""Sets the value of an attribute in the service managed by the `StateManager`
|
||||||
Sets the value of an attribute in the service managed by the `StateManager`
|
|
||||||
given its path as a dot-separated string.
|
given its path as a dot-separated string.
|
||||||
|
|
||||||
This method updates the attribute specified by 'path' with 'value' only if the
|
This method updates the attribute specified by 'path' with 'value' only if the
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
71
src/pydase/frontend/assets/index-DpoEqi_N.js
Normal file
71
src/pydase/frontend/assets/index-DpoEqi_N.js
Normal file
File diff suppressed because one or more lines are too long
@@ -7,15 +7,15 @@
|
|||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||||
<meta name="theme-color" content="#000000" />
|
<meta name="theme-color" content="#000000" />
|
||||||
<meta name="description" content="Web site displaying a pydase UI." />
|
<meta name="description" content="Web site displaying a pydase UI." />
|
||||||
<script type="module" crossorigin src="/assets/index-BqF7l_R8.js"></script>
|
<script type="module" crossorigin src="/assets/index-DpoEqi_N.js"></script>
|
||||||
<link rel="stylesheet" crossorigin href="/assets/index-D2aktF3W.css">
|
<link rel="stylesheet" crossorigin href="/assets/index-DJzFvk4W.css">
|
||||||
</head>
|
</head>
|
||||||
|
|
||||||
<script>
|
<script>
|
||||||
// this will be set by the python backend if the service is behind a proxy which strips a prefix. The frontend can use this to build the paths to the resources.
|
// this will be set by the python backend if the service is behind a proxy which strips a prefix. The frontend can use this to build the paths to the resources.
|
||||||
window.__FORWARDED_PREFIX__ = "";
|
window.__FORWARDED_PREFIX__ = "";
|
||||||
window.__FORWARDED_PROTO__ = "";
|
window.__FORWARDED_PROTO__ = "";
|
||||||
</script>`
|
</script>
|
||||||
|
|
||||||
<body>
|
<body>
|
||||||
<noscript>You need to enable JavaScript to run this app.</noscript>
|
<noscript>You need to enable JavaScript to run this app.</noscript>
|
||||||
|
|||||||
@@ -55,6 +55,10 @@ class Observable(ObservableObject):
|
|||||||
value = super().__getattribute__(name)
|
value = super().__getattribute__(name)
|
||||||
|
|
||||||
if is_property_attribute(self, name):
|
if is_property_attribute(self, name):
|
||||||
|
# fixes https://github.com/tiqi-group/pydase/issues/187 and
|
||||||
|
# https://github.com/tiqi-group/pydase/issues/192
|
||||||
|
if isinstance(value, ObservableObject):
|
||||||
|
value.add_observer(self, name)
|
||||||
self._notify_changed(name, value)
|
self._notify_changed(name, value)
|
||||||
|
|
||||||
return value
|
return value
|
||||||
|
|||||||
@@ -100,7 +100,7 @@ class PropertyObserver(Observer):
|
|||||||
elif isinstance(collection, dict):
|
elif isinstance(collection, dict):
|
||||||
for key, val in collection.items():
|
for key, val in collection.items():
|
||||||
if isinstance(val, Observable):
|
if isinstance(val, Observable):
|
||||||
new_prefix = f"{parent_path}['{key}']"
|
new_prefix = f'{parent_path}["{key}"]'
|
||||||
deps.update(
|
deps.update(
|
||||||
self._get_properties_and_their_dependencies(val, new_prefix)
|
self._get_properties_and_their_dependencies(val, new_prefix)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -84,21 +84,17 @@ class Server:
|
|||||||
The `Server` class provides a flexible server implementation for the `DataService`.
|
The `Server` class provides a flexible server implementation for the `DataService`.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
service:
|
service: The DataService instance that this server will manage.
|
||||||
The DataService instance that this server will manage.
|
host: The host address for the server. Defaults to `'0.0.0.0'`, which means all
|
||||||
host:
|
|
||||||
The host address for the server. Defaults to `'0.0.0.0'`, which means all
|
|
||||||
available network interfaces.
|
available network interfaces.
|
||||||
web_port:
|
web_port: The port number for the web server. If set to None, it will use the
|
||||||
The port number for the web server. Defaults to
|
port defined in
|
||||||
[`ServiceConfig().web_port`][pydase.config.ServiceConfig.web_port].
|
[`ServiceConfig().web_port`][pydase.config.ServiceConfig.web_port]. Defaults
|
||||||
enable_web:
|
to None.
|
||||||
Whether to enable the web server.
|
enable_web: Whether to enable the web server.
|
||||||
filename:
|
filename: Filename of the file managing the service state persistence.
|
||||||
Filename of the file managing the service state persistence.
|
additional_servers: A list of additional servers to run alongside the main
|
||||||
additional_servers:
|
server.
|
||||||
A list of additional servers to run alongside the main server.
|
|
||||||
|
|
||||||
Here's an example of how you might define an additional server:
|
Here's an example of how you might define an additional server:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
@@ -137,31 +133,40 @@ class Server:
|
|||||||
)
|
)
|
||||||
server.run()
|
server.run()
|
||||||
```
|
```
|
||||||
**kwargs:
|
autosave_interval: Interval in seconds between automatic state save events.
|
||||||
Additional keyword arguments.
|
If set to `None`, automatic saving is disabled. Defaults to 30 seconds.
|
||||||
|
**kwargs: Additional keyword arguments.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__( # noqa: PLR0913
|
def __init__( # noqa: PLR0913
|
||||||
self,
|
self,
|
||||||
service: DataService,
|
service: DataService,
|
||||||
host: str = "0.0.0.0",
|
host: str = "0.0.0.0",
|
||||||
web_port: int = ServiceConfig().web_port,
|
web_port: int | None = None,
|
||||||
enable_web: bool = True,
|
enable_web: bool = True,
|
||||||
filename: str | Path | None = None,
|
filename: str | Path | None = None,
|
||||||
additional_servers: list[AdditionalServer] | None = None,
|
additional_servers: list[AdditionalServer] | None = None,
|
||||||
|
autosave_interval: float = 30.0,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> None:
|
) -> None:
|
||||||
if additional_servers is None:
|
if additional_servers is None:
|
||||||
additional_servers = []
|
additional_servers = []
|
||||||
self._service = service
|
self._service = service
|
||||||
self._host = host
|
self._host = host
|
||||||
self._web_port = web_port
|
if web_port is None:
|
||||||
|
self._web_port = ServiceConfig().web_port
|
||||||
|
else:
|
||||||
|
self._web_port = web_port
|
||||||
self._enable_web = enable_web
|
self._enable_web = enable_web
|
||||||
self._kwargs = kwargs
|
self._kwargs = kwargs
|
||||||
self._additional_servers = additional_servers
|
self._additional_servers = additional_servers
|
||||||
self.should_exit = False
|
self.should_exit = False
|
||||||
self.servers: dict[str, asyncio.Future[Any]] = {}
|
self.servers: dict[str, asyncio.Future[Any]] = {}
|
||||||
self._state_manager = StateManager(self._service, filename)
|
self._state_manager = StateManager(
|
||||||
|
service=self._service,
|
||||||
|
filename=filename,
|
||||||
|
autosave_interval=autosave_interval,
|
||||||
|
)
|
||||||
self._observer = DataServiceObserver(self._state_manager)
|
self._observer = DataServiceObserver(self._state_manager)
|
||||||
self._state_manager.load_state()
|
self._state_manager.load_state()
|
||||||
autostart_service_tasks(self._service)
|
autostart_service_tasks(self._service)
|
||||||
@@ -223,6 +228,8 @@ class Server:
|
|||||||
server_task.add_done_callback(self._handle_server_shutdown)
|
server_task.add_done_callback(self._handle_server_shutdown)
|
||||||
self.servers["web"] = server_task
|
self.servers["web"] = server_task
|
||||||
|
|
||||||
|
self._loop.create_task(self._state_manager.autosave())
|
||||||
|
|
||||||
def _handle_server_shutdown(self, task: asyncio.Task[Any]) -> None:
|
def _handle_server_shutdown(self, task: asyncio.Task[Any]) -> None:
|
||||||
"""Handle server shutdown. If the service should exit, do nothing. Else, make
|
"""Handle server shutdown. If the service should exit, do nothing. Else, make
|
||||||
the service exit."""
|
the service exit."""
|
||||||
@@ -258,7 +265,7 @@ class Server:
|
|||||||
except asyncio.CancelledError:
|
except asyncio.CancelledError:
|
||||||
logger.debug("Cancelled '%s' server.", server_name)
|
logger.debug("Cancelled '%s' server.", server_name)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error("Unexpected exception: %s", e)
|
logger.exception("Unexpected exception: %s", e)
|
||||||
|
|
||||||
async def __cancel_tasks(self) -> None:
|
async def __cancel_tasks(self) -> None:
|
||||||
for task in asyncio.all_tasks(self._loop):
|
for task in asyncio.all_tasks(self._loop):
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
import inspect
|
import inspect
|
||||||
import logging
|
import logging
|
||||||
|
from functools import partial
|
||||||
from typing import TYPE_CHECKING
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
import aiohttp.web
|
import aiohttp.web
|
||||||
import aiohttp_middlewares.error
|
import aiohttp_middlewares.error
|
||||||
|
import click
|
||||||
|
|
||||||
from pydase.data_service.state_manager import StateManager
|
from pydase.data_service.state_manager import StateManager
|
||||||
from pydase.server.web_server.api.v1.endpoints import (
|
from pydase.server.web_server.api.v1.endpoints import (
|
||||||
@@ -25,12 +27,14 @@ STATUS_FAILED = 400
|
|||||||
|
|
||||||
|
|
||||||
async def _get_value(
|
async def _get_value(
|
||||||
state_manager: StateManager, request: aiohttp.web.Request
|
request: aiohttp.web.Request, state_manager: StateManager
|
||||||
) -> aiohttp.web.Response:
|
) -> aiohttp.web.Response:
|
||||||
logger.info("Handle api request: %s", request)
|
log_id = get_log_id(request)
|
||||||
|
|
||||||
access_path = request.rel_url.query["access_path"]
|
access_path = request.rel_url.query["access_path"]
|
||||||
|
|
||||||
|
logger.info("Client [%s] is getting the value of '%s'", log_id, access_path)
|
||||||
|
|
||||||
status = STATUS_OK
|
status = STATUS_OK
|
||||||
try:
|
try:
|
||||||
result = get_value(state_manager, access_path)
|
result = get_value(state_manager, access_path)
|
||||||
@@ -42,10 +46,16 @@ async def _get_value(
|
|||||||
|
|
||||||
|
|
||||||
async def _update_value(
|
async def _update_value(
|
||||||
state_manager: StateManager, request: aiohttp.web.Request
|
request: aiohttp.web.Request, state_manager: StateManager
|
||||||
) -> aiohttp.web.Response:
|
) -> aiohttp.web.Response:
|
||||||
|
log_id = get_log_id(request)
|
||||||
|
|
||||||
data: UpdateDict = await request.json()
|
data: UpdateDict = await request.json()
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Client [%s] is updating the value of '%s'", log_id, data["access_path"]
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
update_value(state_manager, data)
|
update_value(state_manager, data)
|
||||||
|
|
||||||
@@ -56,11 +66,17 @@ async def _update_value(
|
|||||||
|
|
||||||
|
|
||||||
async def _trigger_method(
|
async def _trigger_method(
|
||||||
state_manager: StateManager, request: aiohttp.web.Request
|
request: aiohttp.web.Request, state_manager: StateManager
|
||||||
) -> aiohttp.web.Response:
|
) -> aiohttp.web.Response:
|
||||||
|
log_id = get_log_id(request)
|
||||||
|
|
||||||
data: TriggerMethodDict = await request.json()
|
data: TriggerMethodDict = await request.json()
|
||||||
|
|
||||||
method = get_object_attr_from_path(state_manager.service, data["access_path"])
|
access_path = data["access_path"]
|
||||||
|
|
||||||
|
logger.info("Client [%s] is triggering the method '%s'", log_id, access_path)
|
||||||
|
|
||||||
|
method = get_object_attr_from_path(state_manager.service, access_path)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if inspect.iscoroutinefunction(method):
|
if inspect.iscoroutinefunction(method):
|
||||||
@@ -77,22 +93,33 @@ async def _trigger_method(
|
|||||||
return aiohttp.web.json_response(dump(e), status=STATUS_FAILED)
|
return aiohttp.web.json_response(dump(e), status=STATUS_FAILED)
|
||||||
|
|
||||||
|
|
||||||
|
def get_log_id(request: aiohttp.web.Request) -> str:
|
||||||
|
client_id_header = request.headers.get("x-client-id", None)
|
||||||
|
remote_username_header = request.headers.get("remote-user", None)
|
||||||
|
|
||||||
|
if remote_username_header is not None:
|
||||||
|
log_id = f"user={click.style(remote_username_header, fg='cyan')}"
|
||||||
|
elif client_id_header is not None:
|
||||||
|
log_id = f"id={click.style(client_id_header, fg='cyan')}"
|
||||||
|
else:
|
||||||
|
log_id = f"id={click.style(None, fg='cyan')}"
|
||||||
|
|
||||||
|
return log_id
|
||||||
|
|
||||||
|
|
||||||
def create_api_application(state_manager: StateManager) -> aiohttp.web.Application:
|
def create_api_application(state_manager: StateManager) -> aiohttp.web.Application:
|
||||||
api_application = aiohttp.web.Application(
|
api_application = aiohttp.web.Application(
|
||||||
middlewares=(aiohttp_middlewares.error.error_middleware(),)
|
middlewares=(aiohttp_middlewares.error.error_middleware(),)
|
||||||
)
|
)
|
||||||
|
|
||||||
api_application.router.add_get(
|
api_application.router.add_get(
|
||||||
"/get_value",
|
"/get_value", partial(_get_value, state_manager=state_manager)
|
||||||
lambda request: _get_value(state_manager=state_manager, request=request),
|
|
||||||
)
|
)
|
||||||
api_application.router.add_put(
|
api_application.router.add_put(
|
||||||
"/update_value",
|
"/update_value", partial(_update_value, state_manager=state_manager)
|
||||||
lambda request: _update_value(state_manager=state_manager, request=request),
|
|
||||||
)
|
)
|
||||||
api_application.router.add_put(
|
api_application.router.add_put(
|
||||||
"/trigger_method",
|
"/trigger_method", partial(_trigger_method, state_manager=state_manager)
|
||||||
lambda request: _trigger_method(state_manager=state_manager, request=request),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return api_application
|
return api_application
|
||||||
|
|||||||
@@ -141,22 +141,41 @@ def setup_sio_server(
|
|||||||
def setup_sio_events(sio: socketio.AsyncServer, state_manager: StateManager) -> None: # noqa: C901
|
def setup_sio_events(sio: socketio.AsyncServer, state_manager: StateManager) -> None: # noqa: C901
|
||||||
@sio.event # type: ignore
|
@sio.event # type: ignore
|
||||||
async def connect(sid: str, environ: Any) -> None:
|
async def connect(sid: str, environ: Any) -> None:
|
||||||
logger.debug("Client [%s] connected", click.style(str(sid), fg="cyan"))
|
client_id_header = environ.get("HTTP_X_CLIENT_ID", None)
|
||||||
|
remote_username_header = environ.get("HTTP_REMOTE_USER", None)
|
||||||
|
|
||||||
|
if remote_username_header is not None:
|
||||||
|
log_id = f"user={click.style(remote_username_header, fg='cyan')}"
|
||||||
|
elif client_id_header is not None:
|
||||||
|
log_id = f"id={click.style(client_id_header, fg='cyan')}"
|
||||||
|
else:
|
||||||
|
log_id = f"sid={click.style(sid, fg='cyan')}"
|
||||||
|
|
||||||
|
async with sio.session(sid) as session:
|
||||||
|
session["client_id"] = log_id
|
||||||
|
logger.info("Client [%s] connected", session["client_id"])
|
||||||
|
|
||||||
@sio.event # type: ignore
|
@sio.event # type: ignore
|
||||||
async def disconnect(sid: str) -> None:
|
async def disconnect(sid: str) -> None:
|
||||||
logger.debug("Client [%s] disconnected", click.style(str(sid), fg="cyan"))
|
async with sio.session(sid) as session:
|
||||||
|
logger.info("Client [%s] disconnected", session["client_id"])
|
||||||
|
|
||||||
@sio.event # type: ignore
|
@sio.event # type: ignore
|
||||||
async def service_serialization(sid: str) -> SerializedObject:
|
async def service_serialization(sid: str) -> SerializedObject:
|
||||||
logger.debug(
|
async with sio.session(sid) as session:
|
||||||
"Client [%s] requested service serialization",
|
logger.info(
|
||||||
click.style(str(sid), fg="cyan"),
|
"Client [%s] requested service serialization", session["client_id"]
|
||||||
)
|
)
|
||||||
return state_manager.cache_manager.cache
|
return state_manager.cache_manager.cache
|
||||||
|
|
||||||
@sio.event
|
@sio.event
|
||||||
async def update_value(sid: str, data: UpdateDict) -> SerializedObject | None:
|
async def update_value(sid: str, data: UpdateDict) -> SerializedObject | None:
|
||||||
|
async with sio.session(sid) as session:
|
||||||
|
logger.info(
|
||||||
|
"Client [%s] is updating the value of '%s'",
|
||||||
|
session["client_id"],
|
||||||
|
data["access_path"],
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
endpoints.update_value(state_manager=state_manager, data=data)
|
endpoints.update_value(state_manager=state_manager, data=data)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -166,6 +185,12 @@ def setup_sio_events(sio: socketio.AsyncServer, state_manager: StateManager) ->
|
|||||||
|
|
||||||
@sio.event
|
@sio.event
|
||||||
async def get_value(sid: str, access_path: str) -> SerializedObject:
|
async def get_value(sid: str, access_path: str) -> SerializedObject:
|
||||||
|
async with sio.session(sid) as session:
|
||||||
|
logger.info(
|
||||||
|
"Client [%s] is getting the value of '%s'",
|
||||||
|
session["client_id"],
|
||||||
|
access_path,
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
return endpoints.get_value(
|
return endpoints.get_value(
|
||||||
state_manager=state_manager, access_path=access_path
|
state_manager=state_manager, access_path=access_path
|
||||||
@@ -176,16 +201,23 @@ def setup_sio_events(sio: socketio.AsyncServer, state_manager: StateManager) ->
|
|||||||
|
|
||||||
@sio.event
|
@sio.event
|
||||||
async def trigger_method(sid: str, data: TriggerMethodDict) -> Any:
|
async def trigger_method(sid: str, data: TriggerMethodDict) -> Any:
|
||||||
method = get_object_attr_from_path(state_manager.service, data["access_path"])
|
async with sio.session(sid) as session:
|
||||||
|
logger.info(
|
||||||
|
"Client [%s] is triggering the method '%s'",
|
||||||
|
session["client_id"],
|
||||||
|
data["access_path"],
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
|
method = get_object_attr_from_path(
|
||||||
|
state_manager.service, data["access_path"]
|
||||||
|
)
|
||||||
if inspect.iscoroutinefunction(method):
|
if inspect.iscoroutinefunction(method):
|
||||||
return await endpoints.trigger_async_method(
|
return await endpoints.trigger_async_method(
|
||||||
state_manager=state_manager, data=data
|
state_manager=state_manager, data=data
|
||||||
)
|
)
|
||||||
return endpoints.trigger_method(state_manager=state_manager, data=data)
|
return endpoints.trigger_method(state_manager=state_manager, data=data)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(e)
|
logger.exception(e)
|
||||||
return dump(e)
|
return dump(e)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -150,10 +150,7 @@ class WebServer:
|
|||||||
f"{escaped_prefix}/favicon.ico",
|
f"{escaped_prefix}/favicon.ico",
|
||||||
)
|
)
|
||||||
|
|
||||||
return aiohttp.web.Response(
|
return aiohttp.web.Response(text=modified_html, content_type="text/html")
|
||||||
text=modified_html, content_type="text/html"
|
|
||||||
)
|
|
||||||
return aiohttp.web.FileResponse(self.frontend_src / "index.html")
|
|
||||||
|
|
||||||
app = aiohttp.web.Application()
|
app = aiohttp.web.Application()
|
||||||
|
|
||||||
|
|||||||
@@ -26,15 +26,25 @@ class PerInstanceTaskDescriptor(Generic[R]):
|
|||||||
the service class.
|
the service class.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__( # noqa: PLR0913
|
||||||
self,
|
self,
|
||||||
func: Callable[[Any], Coroutine[None, None, R]]
|
func: Callable[[Any], Coroutine[None, None, R]]
|
||||||
| Callable[[], Coroutine[None, None, R]],
|
| Callable[[], Coroutine[None, None, R]],
|
||||||
autostart: bool = False,
|
autostart: bool,
|
||||||
|
restart_on_exception: bool,
|
||||||
|
restart_sec: float,
|
||||||
|
start_limit_interval_sec: float | None,
|
||||||
|
start_limit_burst: int,
|
||||||
|
exit_on_failure: bool,
|
||||||
) -> None:
|
) -> None:
|
||||||
self.__func = func
|
self.__func = func
|
||||||
self.__autostart = autostart
|
self.__autostart = autostart
|
||||||
self.__task_instances: dict[object, Task[R]] = {}
|
self.__task_instances: dict[object, Task[R]] = {}
|
||||||
|
self.__restart_on_exception = restart_on_exception
|
||||||
|
self.__restart_sec = restart_sec
|
||||||
|
self.__start_limit_interval_sec = start_limit_interval_sec
|
||||||
|
self.__start_limit_burst = start_limit_burst
|
||||||
|
self.__exit_on_failure = exit_on_failure
|
||||||
|
|
||||||
def __set_name__(self, owner: type[DataService], name: str) -> None:
|
def __set_name__(self, owner: type[DataService], name: str) -> None:
|
||||||
"""Stores the name of the task within the owning class. This method is called
|
"""Stores the name of the task within the owning class. This method is called
|
||||||
@@ -67,14 +77,28 @@ class PerInstanceTaskDescriptor(Generic[R]):
|
|||||||
if instance not in self.__task_instances:
|
if instance not in self.__task_instances:
|
||||||
self.__task_instances[instance] = instance._initialise_new_objects(
|
self.__task_instances[instance] = instance._initialise_new_objects(
|
||||||
self.__task_name,
|
self.__task_name,
|
||||||
Task(self.__func.__get__(instance, owner), autostart=self.__autostart),
|
Task(
|
||||||
|
self.__func.__get__(instance, owner),
|
||||||
|
autostart=self.__autostart,
|
||||||
|
restart_on_exception=self.__restart_on_exception,
|
||||||
|
restart_sec=self.__restart_sec,
|
||||||
|
start_limit_interval_sec=self.__start_limit_interval_sec,
|
||||||
|
start_limit_burst=self.__start_limit_burst,
|
||||||
|
exit_on_failure=self.__exit_on_failure,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
return self.__task_instances[instance]
|
return self.__task_instances[instance]
|
||||||
|
|
||||||
|
|
||||||
def task(
|
def task( # noqa: PLR0913
|
||||||
*, autostart: bool = False
|
*,
|
||||||
|
autostart: bool = False,
|
||||||
|
restart_on_exception: bool = True,
|
||||||
|
restart_sec: float = 1.0,
|
||||||
|
start_limit_interval_sec: float | None = None,
|
||||||
|
start_limit_burst: int = 3,
|
||||||
|
exit_on_failure: bool = False,
|
||||||
) -> Callable[
|
) -> Callable[
|
||||||
[
|
[
|
||||||
Callable[[Any], Coroutine[None, None, R]]
|
Callable[[Any], Coroutine[None, None, R]]
|
||||||
@@ -96,13 +120,30 @@ def task(
|
|||||||
periodically or perform asynchronous operations, such as polling data sources,
|
periodically or perform asynchronous operations, such as polling data sources,
|
||||||
updating databases, or any recurring job that should be managed within the context
|
updating databases, or any recurring job that should be managed within the context
|
||||||
of a `DataService`.
|
of a `DataService`.
|
||||||
time.
|
|
||||||
|
The keyword arguments that can be passed to this decorator are inspired by systemd
|
||||||
|
unit services.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
autostart:
|
autostart:
|
||||||
If set to True, the task will automatically start when the service is
|
If set to True, the task will automatically start when the service is
|
||||||
initialized. Defaults to False.
|
initialized. Defaults to False.
|
||||||
|
restart_on_exception:
|
||||||
|
Configures whether the task shall be restarted when it exits with an
|
||||||
|
exception other than [`asyncio.CancelledError`][asyncio.CancelledError].
|
||||||
|
restart_sec:
|
||||||
|
Configures the time to sleep before restarting a task. Defaults to 1.0.
|
||||||
|
start_limit_interval_sec:
|
||||||
|
Configures start rate limiting. Tasks which are started more than
|
||||||
|
`start_limit_burst` times within an `start_limit_interval_sec` time span are
|
||||||
|
not permitted to start any more. Defaults to None (disabled rate limiting).
|
||||||
|
start_limit_burst:
|
||||||
|
Configures unit start rate limiting. Tasks which are started more than
|
||||||
|
`start_limit_burst` times within an `start_limit_interval_sec` time span are
|
||||||
|
not permitted to start any more. Defaults to 3.
|
||||||
|
exit_on_failure:
|
||||||
|
If True, exit the service if the task fails and restart_on_exception is
|
||||||
|
False or burst limits are exceeded.
|
||||||
Returns:
|
Returns:
|
||||||
A decorator that wraps an asynchronous function in a
|
A decorator that wraps an asynchronous function in a
|
||||||
[`PerInstanceTaskDescriptor`][pydase.task.decorator.PerInstanceTaskDescriptor]
|
[`PerInstanceTaskDescriptor`][pydase.task.decorator.PerInstanceTaskDescriptor]
|
||||||
@@ -140,6 +181,14 @@ def task(
|
|||||||
func: Callable[[Any], Coroutine[None, None, R]]
|
func: Callable[[Any], Coroutine[None, None, R]]
|
||||||
| Callable[[], Coroutine[None, None, R]],
|
| Callable[[], Coroutine[None, None, R]],
|
||||||
) -> PerInstanceTaskDescriptor[R]:
|
) -> PerInstanceTaskDescriptor[R]:
|
||||||
return PerInstanceTaskDescriptor(func, autostart=autostart)
|
return PerInstanceTaskDescriptor(
|
||||||
|
func,
|
||||||
|
autostart=autostart,
|
||||||
|
restart_on_exception=restart_on_exception,
|
||||||
|
restart_sec=restart_sec,
|
||||||
|
start_limit_interval_sec=start_limit_interval_sec,
|
||||||
|
start_limit_burst=start_limit_burst,
|
||||||
|
exit_on_failure=exit_on_failure,
|
||||||
|
)
|
||||||
|
|
||||||
return decorator
|
return decorator
|
||||||
|
|||||||
@@ -1,7 +1,10 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import inspect
|
|
||||||
import logging
|
import logging
|
||||||
|
import os
|
||||||
|
import signal
|
||||||
from collections.abc import Callable, Coroutine
|
from collections.abc import Callable, Coroutine
|
||||||
|
from datetime import datetime
|
||||||
|
from time import time
|
||||||
from typing import (
|
from typing import (
|
||||||
Generic,
|
Generic,
|
||||||
TypeVar,
|
TypeVar,
|
||||||
@@ -28,6 +31,9 @@ class Task(pydase.data_service.data_service.DataService, Generic[R]):
|
|||||||
decorator, it is replaced by a `Task` instance that controls the execution of the
|
decorator, it is replaced by a `Task` instance that controls the execution of the
|
||||||
original function.
|
original function.
|
||||||
|
|
||||||
|
The keyword arguments that can be passed to this class are inspired by systemd unit
|
||||||
|
services.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
func:
|
func:
|
||||||
The asynchronous function that this task wraps. It must be a coroutine
|
The asynchronous function that this task wraps. It must be a coroutine
|
||||||
@@ -35,6 +41,22 @@ class Task(pydase.data_service.data_service.DataService, Generic[R]):
|
|||||||
autostart:
|
autostart:
|
||||||
If set to True, the task will automatically start when the service is
|
If set to True, the task will automatically start when the service is
|
||||||
initialized. Defaults to False.
|
initialized. Defaults to False.
|
||||||
|
restart_on_exception:
|
||||||
|
Configures whether the task shall be restarted when it exits with an
|
||||||
|
exception other than [`asyncio.CancelledError`][asyncio.CancelledError].
|
||||||
|
restart_sec:
|
||||||
|
Configures the time to sleep before restarting a task. Defaults to 1.0.
|
||||||
|
start_limit_interval_sec:
|
||||||
|
Configures start rate limiting. Tasks which are started more than
|
||||||
|
`start_limit_burst` times within an `start_limit_interval_sec` time span are
|
||||||
|
not permitted to start any more. Defaults to None (disabled rate limiting).
|
||||||
|
start_limit_burst:
|
||||||
|
Configures unit start rate limiting. Tasks which are started more than
|
||||||
|
`start_limit_burst` times within an `start_limit_interval_sec` time span are
|
||||||
|
not permitted to start any more. Defaults to 3.
|
||||||
|
exit_on_failure:
|
||||||
|
If True, exit the service if the task fails and restart_on_exception is
|
||||||
|
False or burst limits are exceeded.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
```python
|
```python
|
||||||
@@ -63,14 +85,24 @@ class Task(pydase.data_service.data_service.DataService, Generic[R]):
|
|||||||
`service.my_task.start()` and `service.my_task.stop()`, respectively.
|
`service.my_task.start()` and `service.my_task.stop()`, respectively.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__( # noqa: PLR0913
|
||||||
self,
|
self,
|
||||||
func: Callable[[], Coroutine[None, None, R | None]],
|
func: Callable[[], Coroutine[None, None, R | None]],
|
||||||
*,
|
*,
|
||||||
autostart: bool = False,
|
autostart: bool,
|
||||||
|
restart_on_exception: bool,
|
||||||
|
restart_sec: float,
|
||||||
|
start_limit_interval_sec: float | None,
|
||||||
|
start_limit_burst: int,
|
||||||
|
exit_on_failure: bool,
|
||||||
) -> None:
|
) -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self._autostart = autostart
|
self._autostart = autostart
|
||||||
|
self._restart_on_exception = restart_on_exception
|
||||||
|
self._restart_sec = restart_sec
|
||||||
|
self._start_limit_interval_sec = start_limit_interval_sec
|
||||||
|
self._start_limit_burst = start_limit_burst
|
||||||
|
self._exit_on_failure = exit_on_failure
|
||||||
self._func_name = func.__name__
|
self._func_name = func.__name__
|
||||||
self._func = func
|
self._func = func
|
||||||
self._task: asyncio.Task[R | None] | None = None
|
self._task: asyncio.Task[R | None] | None = None
|
||||||
@@ -109,38 +141,95 @@ class Task(pydase.data_service.data_service.DataService, Generic[R]):
|
|||||||
self._task = None
|
self._task = None
|
||||||
self._status = TaskStatus.NOT_RUNNING
|
self._status = TaskStatus.NOT_RUNNING
|
||||||
|
|
||||||
exception = task.exception()
|
exception = None
|
||||||
|
try:
|
||||||
|
exception = task.exception()
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
return
|
||||||
|
|
||||||
if exception is not None:
|
if exception is not None:
|
||||||
# Handle the exception, or you can re-raise it.
|
|
||||||
logger.error(
|
logger.error(
|
||||||
"Task '%s' encountered an exception: %s: %s",
|
"Task '%s' encountered an exception: %r",
|
||||||
self._func_name,
|
self._func_name,
|
||||||
type(exception).__name__,
|
|
||||||
exception,
|
exception,
|
||||||
)
|
)
|
||||||
raise exception
|
os.kill(os.getpid(), signal.SIGTERM)
|
||||||
|
else:
|
||||||
self._result = task.result()
|
self._result = task.result()
|
||||||
|
|
||||||
async def run_task() -> R | None:
|
|
||||||
if inspect.iscoroutinefunction(self._func):
|
|
||||||
logger.info("Starting task %r", self._func_name)
|
|
||||||
self._status = TaskStatus.RUNNING
|
|
||||||
res: Coroutine[None, None, R | None] = self._func()
|
|
||||||
try:
|
|
||||||
return await res
|
|
||||||
except asyncio.CancelledError:
|
|
||||||
logger.info("Task '%s' was cancelled", self._func_name)
|
|
||||||
return None
|
|
||||||
logger.warning(
|
|
||||||
"Cannot start task %r. Function has not been bound yet", self._func_name
|
|
||||||
)
|
|
||||||
return None
|
|
||||||
|
|
||||||
logger.info("Creating task %r", self._func_name)
|
logger.info("Creating task %r", self._func_name)
|
||||||
self._task = self._loop.create_task(run_task())
|
self._task = self._loop.create_task(self.__running_task_loop())
|
||||||
self._task.add_done_callback(task_done_callback)
|
self._task.add_done_callback(task_done_callback)
|
||||||
|
|
||||||
|
async def __running_task_loop(self) -> R | None:
|
||||||
|
logger.info("Starting task %r", self._func_name)
|
||||||
|
self._status = TaskStatus.RUNNING
|
||||||
|
attempts = 0
|
||||||
|
start_time_of_start_limit_interval = None
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return await self._func()
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
logger.info("Task '%s' was cancelled", self._func_name)
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
attempts, start_time_of_start_limit_interval = (
|
||||||
|
self._handle_task_exception(
|
||||||
|
e, attempts, start_time_of_start_limit_interval
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if not self._should_restart_task(
|
||||||
|
attempts, start_time_of_start_limit_interval
|
||||||
|
):
|
||||||
|
if self._exit_on_failure:
|
||||||
|
raise e
|
||||||
|
break
|
||||||
|
await asyncio.sleep(self._restart_sec)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _handle_task_exception(
|
||||||
|
self,
|
||||||
|
exception: Exception,
|
||||||
|
attempts: int,
|
||||||
|
start_time_of_start_limit_interval: float | None,
|
||||||
|
) -> tuple[int, float]:
|
||||||
|
"""Handle an exception raised during task execution."""
|
||||||
|
if start_time_of_start_limit_interval is None:
|
||||||
|
start_time_of_start_limit_interval = time()
|
||||||
|
|
||||||
|
attempts += 1
|
||||||
|
logger.exception(
|
||||||
|
"Task %r encountered an exception: %r [attempt %s since %s].",
|
||||||
|
self._func.__name__,
|
||||||
|
exception,
|
||||||
|
attempts,
|
||||||
|
datetime.fromtimestamp(start_time_of_start_limit_interval),
|
||||||
|
)
|
||||||
|
return attempts, start_time_of_start_limit_interval
|
||||||
|
|
||||||
|
def _should_restart_task(
|
||||||
|
self, attempts: int, start_time_of_start_limit_interval: float
|
||||||
|
) -> bool:
|
||||||
|
"""Determine if the task should be restarted."""
|
||||||
|
if not self._restart_on_exception:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if self._start_limit_interval_sec is not None:
|
||||||
|
if (
|
||||||
|
time() - start_time_of_start_limit_interval
|
||||||
|
) > self._start_limit_interval_sec:
|
||||||
|
# Reset attempts if interval is exceeded
|
||||||
|
start_time_of_start_limit_interval = time()
|
||||||
|
attempts = 1
|
||||||
|
elif attempts > self._start_limit_burst:
|
||||||
|
logger.error(
|
||||||
|
"Task %r exceeded restart burst limit. Stopping.",
|
||||||
|
self._func.__name__,
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
def stop(self) -> None:
|
def stop(self) -> None:
|
||||||
"""Stops the running asynchronous task by cancelling it."""
|
"""Stops the running asynchronous task by cancelling it."""
|
||||||
|
|
||||||
|
|||||||
@@ -223,25 +223,3 @@ def current_event_loop_exists() -> bool:
|
|||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
return asyncio.get_event_loop_policy()._local._loop is not None # type: ignore
|
return asyncio.get_event_loop_policy()._local._loop is not None # type: ignore
|
||||||
|
|
||||||
|
|
||||||
def normalize_full_access_path_string(s: str) -> str:
|
|
||||||
"""Normalizes a string representing a full access path by converting double quotes
|
|
||||||
to single quotes.
|
|
||||||
|
|
||||||
This function is useful for ensuring consistency in strings that represent access
|
|
||||||
paths containing dictionary keys, by replacing all double quotes (`"`) with single
|
|
||||||
quotes (`'`).
|
|
||||||
|
|
||||||
Args:
|
|
||||||
s (str): The input string to be normalized.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A new string with all double quotes replaced by single quotes.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
>>> normalize_full_access_path_string('dictionary["first"].my_task')
|
|
||||||
"dictionary['first'].my_task"
|
|
||||||
"""
|
|
||||||
|
|
||||||
return s.replace('"', "'")
|
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ import logging.config
|
|||||||
import sys
|
import sys
|
||||||
from collections.abc import Callable
|
from collections.abc import Callable
|
||||||
from copy import copy
|
from copy import copy
|
||||||
from typing import ClassVar, Literal
|
from typing import ClassVar, Literal, TextIO
|
||||||
|
|
||||||
import click
|
import click
|
||||||
import socketio # type: ignore[import-untyped]
|
import socketio # type: ignore[import-untyped]
|
||||||
@@ -29,22 +29,44 @@ LOGGING_CONFIG = {
|
|||||||
"datefmt": "%Y-%m-%d %H:%M:%S",
|
"datefmt": "%Y-%m-%d %H:%M:%S",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"filters": {
|
||||||
|
"only_pydase_server": {
|
||||||
|
"()": "pydase.utils.logging.NameFilter",
|
||||||
|
"match": "pydase.server",
|
||||||
|
},
|
||||||
|
"exclude_pydase_server": {
|
||||||
|
"()": "pydase.utils.logging.NameFilter",
|
||||||
|
"match": "pydase.server",
|
||||||
|
"invert": True,
|
||||||
|
},
|
||||||
|
},
|
||||||
"handlers": {
|
"handlers": {
|
||||||
"default": {
|
"stdout_handler": {
|
||||||
"formatter": "default",
|
"formatter": "default",
|
||||||
"class": "logging.StreamHandler",
|
"class": "logging.StreamHandler",
|
||||||
"stream": "ext://sys.stdout",
|
"stream": "ext://sys.stdout",
|
||||||
|
"filters": ["only_pydase_server"],
|
||||||
|
},
|
||||||
|
"stderr_handler": {
|
||||||
|
"formatter": "default",
|
||||||
|
"class": "logging.StreamHandler",
|
||||||
|
"stream": "ext://sys.stderr",
|
||||||
|
"filters": ["exclude_pydase_server"],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"loggers": {
|
"loggers": {
|
||||||
"pydase": {"handlers": ["default"], "level": LOG_LEVEL, "propagate": False},
|
"pydase": {
|
||||||
|
"handlers": ["stdout_handler", "stderr_handler"],
|
||||||
|
"level": LOG_LEVEL,
|
||||||
|
"propagate": False,
|
||||||
|
},
|
||||||
"aiohttp_middlewares": {
|
"aiohttp_middlewares": {
|
||||||
"handlers": ["default"],
|
"handlers": ["stderr_handler"],
|
||||||
"level": logging.WARNING,
|
"level": logging.WARNING,
|
||||||
"propagate": False,
|
"propagate": False,
|
||||||
},
|
},
|
||||||
"aiohttp": {
|
"aiohttp": {
|
||||||
"handlers": ["default"],
|
"handlers": ["stderr_handler"],
|
||||||
"level": logging.INFO,
|
"level": logging.INFO,
|
||||||
"propagate": False,
|
"propagate": False,
|
||||||
},
|
},
|
||||||
@@ -52,6 +74,23 @@ LOGGING_CONFIG = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class NameFilter(logging.Filter):
|
||||||
|
"""
|
||||||
|
Logging filter that allows filtering logs based on the logger name.
|
||||||
|
Can either include or exclude a specific logger.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, match: str, invert: bool = False):
|
||||||
|
super().__init__()
|
||||||
|
self.match = match
|
||||||
|
self.invert = invert
|
||||||
|
|
||||||
|
def filter(self, record: logging.LogRecord) -> bool:
|
||||||
|
if self.invert:
|
||||||
|
return not record.name.startswith(self.match)
|
||||||
|
return record.name.startswith(self.match)
|
||||||
|
|
||||||
|
|
||||||
class DefaultFormatter(logging.Formatter):
|
class DefaultFormatter(logging.Formatter):
|
||||||
"""
|
"""
|
||||||
A custom log formatter class that:
|
A custom log formatter class that:
|
||||||
@@ -150,3 +189,51 @@ def setup_logging() -> None:
|
|||||||
logger.debug("Configuring pydase logging.")
|
logger.debug("Configuring pydase logging.")
|
||||||
|
|
||||||
logging.config.dictConfig(LOGGING_CONFIG)
|
logging.config.dictConfig(LOGGING_CONFIG)
|
||||||
|
|
||||||
|
|
||||||
|
def configure_logging_with_pydase_formatter(
|
||||||
|
name: str | None = None, level: int = logging.INFO, stream: TextIO | None = None
|
||||||
|
) -> None:
|
||||||
|
"""Configure a logger with the pydase `DefaultFormatter`.
|
||||||
|
|
||||||
|
This sets up a `StreamHandler` with the custom `DefaultFormatter`, which includes
|
||||||
|
timestamp, log level with color (if supported), logger name, function, and line
|
||||||
|
number. It can be used to configure the root logger or any named logger.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name: The name of the logger to configure. If None, the root logger is used.
|
||||||
|
level: The logging level to set on the logger (e.g., logging.DEBUG,
|
||||||
|
logging.INFO). Defaults to logging.INFO.
|
||||||
|
stream: The output stream for the log messages (e.g., sys.stdout or sys.stderr).
|
||||||
|
If None, defaults to sys.stderr.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
Configure logging in your service:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import sys
|
||||||
|
from pydase.utils.logging import configure_logging_with_pydase_formatter
|
||||||
|
|
||||||
|
configure_logging_with_pydase_formatter(
|
||||||
|
name="my_service", # Use the package/module name or None for the root logger
|
||||||
|
level=logging.DEBUG, # Set the desired logging level (defaults to INFO)
|
||||||
|
stream=sys.stdout # Set the output stream (stderr by default)
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- This function adds a new handler each time it's called.
|
||||||
|
Use carefully to avoid duplicate logs.
|
||||||
|
- Colors are enabled if the stream supports TTY (e.g., in terminal).
|
||||||
|
""" # noqa: E501
|
||||||
|
|
||||||
|
logger = logging.getLogger(name=name)
|
||||||
|
handler = logging.StreamHandler(stream=stream)
|
||||||
|
formatter = DefaultFormatter(
|
||||||
|
fmt="%(asctime)s.%(msecs)03d | %(levelprefix)s | "
|
||||||
|
"%(name)s:%(funcName)s:%(lineno)d - %(message)s",
|
||||||
|
datefmt="%Y-%m-%d %H:%M:%S",
|
||||||
|
)
|
||||||
|
handler.setFormatter(formatter)
|
||||||
|
logger.addHandler(handler)
|
||||||
|
logger.setLevel(level)
|
||||||
|
|||||||
@@ -393,7 +393,7 @@ def set_nested_value_by_path(
|
|||||||
current_dict, path_parts[-1], allow_append=True
|
current_dict, path_parts[-1], allow_append=True
|
||||||
)
|
)
|
||||||
except (SerializationPathError, KeyError) as e:
|
except (SerializationPathError, KeyError) as e:
|
||||||
logger.error("Error occured trying to change %a: %s", path, e)
|
logger.exception("Error occured trying to change %a: %s", path, e)
|
||||||
return
|
return
|
||||||
|
|
||||||
if next_level_serialized_object["type"] == "method": # state change of task
|
if next_level_serialized_object["type"] == "method": # state change of task
|
||||||
|
|||||||
@@ -161,3 +161,15 @@ def test_context_manager(pydase_client: pydase.Client) -> None:
|
|||||||
assert client.proxy.my_property == 1337.01
|
assert client.proxy.my_property == 1337.01
|
||||||
|
|
||||||
assert not client.proxy.connected
|
assert not client.proxy.connected
|
||||||
|
|
||||||
|
|
||||||
|
def test_client_id(
|
||||||
|
pydase_client: pydase.Client, caplog: pytest.LogCaptureFixture
|
||||||
|
) -> None:
|
||||||
|
pydase.Client(url="ws://localhost:9999")
|
||||||
|
|
||||||
|
assert "Client [sid=" in caplog.text
|
||||||
|
caplog.clear()
|
||||||
|
|
||||||
|
pydase.Client(url="ws://localhost:9999", client_id="my_service")
|
||||||
|
assert "Client [id=my_service] connected" in caplog.text
|
||||||
|
|||||||
@@ -167,8 +167,8 @@ def test_normalized_attr_path_in_dependent_property_changes(
|
|||||||
state_manager = StateManager(service=service_instance)
|
state_manager = StateManager(service=service_instance)
|
||||||
observer = DataServiceObserver(state_manager=state_manager)
|
observer = DataServiceObserver(state_manager=state_manager)
|
||||||
|
|
||||||
assert observer.property_deps_dict["service_dict['one']._prop"] == [
|
assert observer.property_deps_dict['service_dict["one"]._prop'] == [
|
||||||
"service_dict['one'].prop"
|
'service_dict["one"].prop'
|
||||||
]
|
]
|
||||||
|
|
||||||
# We can use dict key path encoded with double quotes
|
# We can use dict key path encoded with double quotes
|
||||||
@@ -184,3 +184,60 @@ def test_normalized_attr_path_in_dependent_property_changes(
|
|||||||
)
|
)
|
||||||
assert service_instance.service_dict["one"].prop == 12.0
|
assert service_instance.service_dict["one"].prop == 12.0
|
||||||
assert "'service_dict[\"one\"].prop' changed to '12.0'" in caplog.text
|
assert "'service_dict[\"one\"].prop' changed to '12.0'" in caplog.text
|
||||||
|
|
||||||
|
|
||||||
|
def test_nested_dict_property_changes(
|
||||||
|
caplog: pytest.LogCaptureFixture,
|
||||||
|
) -> None:
|
||||||
|
def get_voltage() -> float:
|
||||||
|
"""Mocking a remote device."""
|
||||||
|
return 2.0
|
||||||
|
|
||||||
|
def set_voltage(value: float) -> None:
|
||||||
|
"""Mocking a remote device."""
|
||||||
|
|
||||||
|
class OtherService(pydase.DataService):
|
||||||
|
_voltage = 1.0
|
||||||
|
|
||||||
|
@property
|
||||||
|
def voltage(self) -> float:
|
||||||
|
# Property dependency _voltage changes within the property itself.
|
||||||
|
# This should be handled gracefully, i.e. not introduce recursion
|
||||||
|
self._voltage = get_voltage()
|
||||||
|
return self._voltage
|
||||||
|
|
||||||
|
@voltage.setter
|
||||||
|
def voltage(self, value: float) -> None:
|
||||||
|
self._voltage = value
|
||||||
|
set_voltage(self._voltage)
|
||||||
|
|
||||||
|
class MyService(pydase.DataService):
|
||||||
|
def __init__(self) -> None:
|
||||||
|
super().__init__()
|
||||||
|
self.my_dict = {"key": OtherService()}
|
||||||
|
|
||||||
|
service = MyService()
|
||||||
|
pydase.Server(service)
|
||||||
|
|
||||||
|
# Changing the _voltage attribute should re-evaluate the voltage property, but avoid
|
||||||
|
# recursion
|
||||||
|
service.my_dict["key"].voltage = 1.2
|
||||||
|
|
||||||
|
|
||||||
|
def test_read_only_dict_property(caplog: pytest.LogCaptureFixture) -> None:
|
||||||
|
class MyObservable(pydase.DataService):
|
||||||
|
def __init__(self) -> None:
|
||||||
|
super().__init__()
|
||||||
|
self._dict_attr = {"dotted.key": 1.0}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def dict_attr(self) -> dict[str, Any]:
|
||||||
|
return self._dict_attr
|
||||||
|
|
||||||
|
service_instance = MyObservable()
|
||||||
|
state_manager = StateManager(service=service_instance)
|
||||||
|
DataServiceObserver(state_manager)
|
||||||
|
|
||||||
|
service_instance._dict_attr["dotted.key"] = 2.0
|
||||||
|
|
||||||
|
assert "'dict_attr[\"dotted.key\"]' changed to '2.0'" in caplog.text
|
||||||
|
|||||||
@@ -1,10 +1,13 @@
|
|||||||
|
import asyncio
|
||||||
import json
|
import json
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
|
import anyio
|
||||||
import pydase
|
import pydase
|
||||||
import pydase.components
|
import pydase.components
|
||||||
import pydase.units as u
|
import pydase.units as u
|
||||||
|
import pytest
|
||||||
from pydase.data_service.data_service_observer import DataServiceObserver
|
from pydase.data_service.data_service_observer import DataServiceObserver
|
||||||
from pydase.data_service.state_manager import (
|
from pydase.data_service.state_manager import (
|
||||||
StateManager,
|
StateManager,
|
||||||
@@ -349,4 +352,24 @@ def test_property_load_state(tmp_path: Path) -> None:
|
|||||||
|
|
||||||
assert service_instance.name == "Some other name"
|
assert service_instance.name == "Some other name"
|
||||||
assert service_instance.not_loadable_attr == "Not loadable"
|
assert service_instance.not_loadable_attr == "Not loadable"
|
||||||
assert not has_load_state_decorator(type(service_instance).property_without_setter)
|
assert not has_load_state_decorator(type(service_instance).property_without_setter) # type: ignore
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio()
|
||||||
|
async def test_autosave(tmp_path: Path, caplog: LogCaptureFixture) -> None:
|
||||||
|
filename = tmp_path / "state.json"
|
||||||
|
|
||||||
|
service = Service()
|
||||||
|
manager = StateManager(service=service, filename=filename, autosave_interval=0.1)
|
||||||
|
DataServiceObserver(state_manager=manager)
|
||||||
|
|
||||||
|
task = asyncio.create_task(manager.autosave())
|
||||||
|
service.property_attr = 198.0
|
||||||
|
await asyncio.sleep(0.1)
|
||||||
|
task.cancel()
|
||||||
|
|
||||||
|
assert filename.exists(), "Autosave should write to the file"
|
||||||
|
async with await anyio.open_file(filename) as f:
|
||||||
|
data = json.loads(await f.read())
|
||||||
|
|
||||||
|
assert data["property_attr"]["value"] == service.property_attr
|
||||||
|
|||||||
@@ -185,6 +185,7 @@ async def test_update_value(
|
|||||||
new_value: dict[str, Any],
|
new_value: dict[str, Any],
|
||||||
ok: bool,
|
ok: bool,
|
||||||
pydase_server: pydase.DataService,
|
pydase_server: pydase.DataService,
|
||||||
|
caplog: pytest.LogCaptureFixture,
|
||||||
) -> None:
|
) -> None:
|
||||||
async with aiohttp.ClientSession("http://localhost:9998") as session:
|
async with aiohttp.ClientSession("http://localhost:9998") as session:
|
||||||
resp = await session.put(
|
resp = await session.put(
|
||||||
@@ -250,3 +251,43 @@ async def test_trigger_method(
|
|||||||
if resp.ok:
|
if resp.ok:
|
||||||
content = Deserializer.deserialize(json.loads(await resp.text()))
|
content = Deserializer.deserialize(json.loads(await resp.text()))
|
||||||
assert content == expected
|
assert content == expected
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"headers, log_id",
|
||||||
|
[
|
||||||
|
({}, "id=None"),
|
||||||
|
(
|
||||||
|
{
|
||||||
|
"X-Client-Id": "client-header",
|
||||||
|
},
|
||||||
|
"id=client-header",
|
||||||
|
),
|
||||||
|
(
|
||||||
|
{
|
||||||
|
"Remote-User": "Remote User",
|
||||||
|
},
|
||||||
|
"user=Remote User",
|
||||||
|
),
|
||||||
|
(
|
||||||
|
{
|
||||||
|
"X-Client-Id": "client-header",
|
||||||
|
"Remote-User": "Remote User",
|
||||||
|
},
|
||||||
|
"user=Remote User",
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
@pytest.mark.asyncio()
|
||||||
|
async def test_client_information_logging(
|
||||||
|
headers: dict[str, str],
|
||||||
|
log_id: str,
|
||||||
|
pydase_server: pydase.DataService,
|
||||||
|
caplog: pytest.LogCaptureFixture,
|
||||||
|
) -> None:
|
||||||
|
async with aiohttp.ClientSession("http://localhost:9998") as session:
|
||||||
|
await session.get(
|
||||||
|
"/api/v1/get_value?access_path=readonly_attr", headers=headers
|
||||||
|
)
|
||||||
|
|
||||||
|
assert log_id in caplog.text
|
||||||
|
|||||||
312
tests/server/web_server/test_sio_setup.py
Normal file
312
tests/server/web_server/test_sio_setup.py
Normal file
@@ -0,0 +1,312 @@
|
|||||||
|
import threading
|
||||||
|
from collections.abc import Generator
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import pydase
|
||||||
|
import pytest
|
||||||
|
import socketio
|
||||||
|
from pydase.utils.serialization.deserializer import Deserializer
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def pydase_server() -> Generator[None, None, None]:
|
||||||
|
class SubService(pydase.DataService):
|
||||||
|
name = "SubService"
|
||||||
|
|
||||||
|
subservice_instance = SubService()
|
||||||
|
|
||||||
|
class MyService(pydase.DataService):
|
||||||
|
def __init__(self) -> None:
|
||||||
|
super().__init__()
|
||||||
|
self._readonly_attr = "MyService"
|
||||||
|
self._my_property = 12.1
|
||||||
|
self.sub_service = SubService()
|
||||||
|
self.list_attr = [1, 2]
|
||||||
|
self.dict_attr = {
|
||||||
|
"foo": subservice_instance,
|
||||||
|
"dotted.key": subservice_instance,
|
||||||
|
}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def my_property(self) -> float:
|
||||||
|
return self._my_property
|
||||||
|
|
||||||
|
@my_property.setter
|
||||||
|
def my_property(self, value: float) -> None:
|
||||||
|
self._my_property = value
|
||||||
|
|
||||||
|
@property
|
||||||
|
def readonly_attr(self) -> str:
|
||||||
|
return self._readonly_attr
|
||||||
|
|
||||||
|
def my_method(self, input_str: str) -> str:
|
||||||
|
return f"{input_str}: my_method"
|
||||||
|
|
||||||
|
async def my_async_method(self, input_str: str) -> str:
|
||||||
|
return f"{input_str}: my_async_method"
|
||||||
|
|
||||||
|
server = pydase.Server(MyService(), web_port=9997)
|
||||||
|
thread = threading.Thread(target=server.run, daemon=True)
|
||||||
|
thread.start()
|
||||||
|
|
||||||
|
yield
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"access_path, expected",
|
||||||
|
[
|
||||||
|
(
|
||||||
|
"readonly_attr",
|
||||||
|
{
|
||||||
|
"full_access_path": "readonly_attr",
|
||||||
|
"doc": None,
|
||||||
|
"readonly": False,
|
||||||
|
"type": "str",
|
||||||
|
"value": "MyService",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"sub_service.name",
|
||||||
|
{
|
||||||
|
"full_access_path": "sub_service.name",
|
||||||
|
"doc": None,
|
||||||
|
"readonly": False,
|
||||||
|
"type": "str",
|
||||||
|
"value": "SubService",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"list_attr[0]",
|
||||||
|
{
|
||||||
|
"full_access_path": "list_attr[0]",
|
||||||
|
"doc": None,
|
||||||
|
"readonly": False,
|
||||||
|
"type": "int",
|
||||||
|
"value": 1,
|
||||||
|
},
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'dict_attr["foo"]',
|
||||||
|
{
|
||||||
|
"full_access_path": 'dict_attr["foo"]',
|
||||||
|
"doc": None,
|
||||||
|
"name": "SubService",
|
||||||
|
"readonly": False,
|
||||||
|
"type": "DataService",
|
||||||
|
"value": {
|
||||||
|
"name": {
|
||||||
|
"doc": None,
|
||||||
|
"full_access_path": 'dict_attr["foo"].name',
|
||||||
|
"readonly": False,
|
||||||
|
"type": "str",
|
||||||
|
"value": "SubService",
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
@pytest.mark.asyncio()
|
||||||
|
async def test_get_value(
|
||||||
|
access_path: str,
|
||||||
|
expected: dict[str, Any],
|
||||||
|
pydase_server: None,
|
||||||
|
) -> None:
|
||||||
|
client = socketio.AsyncClient()
|
||||||
|
await client.connect(
|
||||||
|
"http://localhost:9997", socketio_path="/ws/socket.io", transports=["websocket"]
|
||||||
|
)
|
||||||
|
response = await client.call("get_value", access_path)
|
||||||
|
assert response == expected
|
||||||
|
await client.disconnect()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"access_path, new_value, ok",
|
||||||
|
[
|
||||||
|
(
|
||||||
|
"sub_service.name",
|
||||||
|
{
|
||||||
|
"full_access_path": "sub_service.name",
|
||||||
|
"doc": None,
|
||||||
|
"readonly": False,
|
||||||
|
"type": "str",
|
||||||
|
"value": "New Name",
|
||||||
|
},
|
||||||
|
True,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"list_attr[0]",
|
||||||
|
{
|
||||||
|
"full_access_path": "list_attr[0]",
|
||||||
|
"doc": None,
|
||||||
|
"readonly": False,
|
||||||
|
"type": "int",
|
||||||
|
"value": 11,
|
||||||
|
},
|
||||||
|
True,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'dict_attr["foo"].name',
|
||||||
|
{
|
||||||
|
"full_access_path": 'dict_attr["foo"].name',
|
||||||
|
"doc": None,
|
||||||
|
"readonly": False,
|
||||||
|
"type": "str",
|
||||||
|
"value": "foo name",
|
||||||
|
},
|
||||||
|
True,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"readonly_attr",
|
||||||
|
{
|
||||||
|
"full_access_path": "readonly_attr",
|
||||||
|
"doc": None,
|
||||||
|
"readonly": True,
|
||||||
|
"type": "str",
|
||||||
|
"value": "Other Name",
|
||||||
|
},
|
||||||
|
False,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"invalid_attribute",
|
||||||
|
{
|
||||||
|
"full_access_path": "invalid_attribute",
|
||||||
|
"doc": None,
|
||||||
|
"readonly": False,
|
||||||
|
"type": "float",
|
||||||
|
"value": 12.0,
|
||||||
|
},
|
||||||
|
False,
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
@pytest.mark.asyncio()
|
||||||
|
async def test_update_value(
|
||||||
|
access_path: str,
|
||||||
|
new_value: dict[str, Any],
|
||||||
|
ok: bool,
|
||||||
|
pydase_server: None,
|
||||||
|
caplog: pytest.LogCaptureFixture,
|
||||||
|
) -> None:
|
||||||
|
client = socketio.AsyncClient()
|
||||||
|
await client.connect(
|
||||||
|
"http://localhost:9997", socketio_path="/ws/socket.io", transports=["websocket"]
|
||||||
|
)
|
||||||
|
response = await client.call(
|
||||||
|
"update_value",
|
||||||
|
{"access_path": access_path, "value": new_value},
|
||||||
|
)
|
||||||
|
|
||||||
|
if ok:
|
||||||
|
assert response is None
|
||||||
|
else:
|
||||||
|
assert response["type"] == "Exception"
|
||||||
|
|
||||||
|
await client.disconnect()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"access_path, expected, ok",
|
||||||
|
[
|
||||||
|
(
|
||||||
|
"my_method",
|
||||||
|
"Hello from function: my_method",
|
||||||
|
True,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"my_async_method",
|
||||||
|
"Hello from function: my_async_method",
|
||||||
|
True,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"invalid_method",
|
||||||
|
None,
|
||||||
|
False,
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
@pytest.mark.asyncio()
|
||||||
|
async def test_trigger_method(
|
||||||
|
access_path: str,
|
||||||
|
expected: Any,
|
||||||
|
ok: bool,
|
||||||
|
pydase_server: pydase.DataService,
|
||||||
|
) -> None:
|
||||||
|
client = socketio.AsyncClient()
|
||||||
|
await client.connect(
|
||||||
|
"http://localhost:9997", socketio_path="/ws/socket.io", transports=["websocket"]
|
||||||
|
)
|
||||||
|
response = await client.call(
|
||||||
|
"trigger_method",
|
||||||
|
{
|
||||||
|
"access_path": access_path,
|
||||||
|
"kwargs": {
|
||||||
|
"full_access_path": "",
|
||||||
|
"type": "dict",
|
||||||
|
"value": {
|
||||||
|
"input_str": {
|
||||||
|
"docs": None,
|
||||||
|
"full_access_path": "",
|
||||||
|
"readonly": False,
|
||||||
|
"type": "str",
|
||||||
|
"value": "Hello from function",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
if ok:
|
||||||
|
content = Deserializer.deserialize(response)
|
||||||
|
assert content == expected
|
||||||
|
else:
|
||||||
|
assert response["type"] == "Exception"
|
||||||
|
|
||||||
|
await client.disconnect()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"headers, log_id",
|
||||||
|
[
|
||||||
|
({}, "sid="),
|
||||||
|
(
|
||||||
|
{
|
||||||
|
"X-Client-Id": "client-header",
|
||||||
|
},
|
||||||
|
"id=client-header",
|
||||||
|
),
|
||||||
|
(
|
||||||
|
{
|
||||||
|
"Remote-User": "Remote User",
|
||||||
|
},
|
||||||
|
"user=Remote User",
|
||||||
|
),
|
||||||
|
(
|
||||||
|
{
|
||||||
|
"X-Client-Id": "client-header",
|
||||||
|
"Remote-User": "Remote User",
|
||||||
|
},
|
||||||
|
"user=Remote User",
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
@pytest.mark.asyncio()
|
||||||
|
async def test_client_information_logging(
|
||||||
|
headers: dict[str, str],
|
||||||
|
log_id: str,
|
||||||
|
pydase_server: pydase.DataService,
|
||||||
|
caplog: pytest.LogCaptureFixture,
|
||||||
|
) -> None:
|
||||||
|
client = socketio.AsyncClient()
|
||||||
|
await client.connect(
|
||||||
|
"http://localhost:9997",
|
||||||
|
socketio_path="/ws/socket.io",
|
||||||
|
transports=["websocket"],
|
||||||
|
headers=headers,
|
||||||
|
)
|
||||||
|
await client.call("get_value", "readonly_attr")
|
||||||
|
|
||||||
|
assert log_id in caplog.text
|
||||||
|
|
||||||
|
await client.disconnect()
|
||||||
@@ -289,3 +289,171 @@ async def test_manual_start_with_multiple_service_instances(
|
|||||||
await asyncio.sleep(0.01)
|
await asyncio.sleep(0.01)
|
||||||
|
|
||||||
assert "Task 'my_task' was cancelled" in caplog.text
|
assert "Task 'my_task' was cancelled" in caplog.text
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(scope="function")
|
||||||
|
async def test_restart_on_exception(caplog: LogCaptureFixture) -> None:
|
||||||
|
class MyService(pydase.DataService):
|
||||||
|
@task(restart_on_exception=True, restart_sec=0.1)
|
||||||
|
async def my_task(self) -> None:
|
||||||
|
logger.info("Triggered task.")
|
||||||
|
raise Exception("Task failure")
|
||||||
|
|
||||||
|
service_instance = MyService()
|
||||||
|
state_manager = StateManager(service_instance)
|
||||||
|
DataServiceObserver(state_manager)
|
||||||
|
service_instance.my_task.start()
|
||||||
|
|
||||||
|
await asyncio.sleep(0.01)
|
||||||
|
assert "Task 'my_task' encountered an exception" in caplog.text
|
||||||
|
caplog.clear()
|
||||||
|
await asyncio.sleep(0.1)
|
||||||
|
assert service_instance.my_task.status == TaskStatus.RUNNING
|
||||||
|
assert "Task 'my_task' encountered an exception" in caplog.text
|
||||||
|
assert "Triggered task." in caplog.text
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(scope="function")
|
||||||
|
async def test_restart_sec(caplog: LogCaptureFixture) -> None:
|
||||||
|
class MyService(pydase.DataService):
|
||||||
|
@task(restart_on_exception=True, restart_sec=0.1)
|
||||||
|
async def my_task(self) -> None:
|
||||||
|
logger.info("Triggered task.")
|
||||||
|
raise Exception("Task failure")
|
||||||
|
|
||||||
|
service_instance = MyService()
|
||||||
|
state_manager = StateManager(service_instance)
|
||||||
|
DataServiceObserver(state_manager)
|
||||||
|
service_instance.my_task.start()
|
||||||
|
|
||||||
|
await asyncio.sleep(0.001)
|
||||||
|
assert "Triggered task." in caplog.text
|
||||||
|
caplog.clear()
|
||||||
|
await asyncio.sleep(0.05)
|
||||||
|
assert "Triggered task." not in caplog.text
|
||||||
|
await asyncio.sleep(0.05)
|
||||||
|
assert "Triggered task." in caplog.text # Ensures the task restarted after 0.2s
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(scope="function")
|
||||||
|
async def test_exceeding_start_limit_interval_sec_and_burst(
|
||||||
|
caplog: LogCaptureFixture,
|
||||||
|
) -> None:
|
||||||
|
class MyService(pydase.DataService):
|
||||||
|
@task(
|
||||||
|
restart_on_exception=True,
|
||||||
|
restart_sec=0.0,
|
||||||
|
start_limit_interval_sec=1.0,
|
||||||
|
start_limit_burst=2,
|
||||||
|
)
|
||||||
|
async def my_task(self) -> None:
|
||||||
|
raise Exception("Task failure")
|
||||||
|
|
||||||
|
service_instance = MyService()
|
||||||
|
state_manager = StateManager(service_instance)
|
||||||
|
DataServiceObserver(state_manager)
|
||||||
|
service_instance.my_task.start()
|
||||||
|
|
||||||
|
await asyncio.sleep(0.1)
|
||||||
|
assert "Task 'my_task' exceeded restart burst limit" in caplog.text
|
||||||
|
assert service_instance.my_task.status == TaskStatus.NOT_RUNNING
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(scope="function")
|
||||||
|
async def test_non_exceeding_start_limit_interval_sec_and_burst(
|
||||||
|
caplog: LogCaptureFixture,
|
||||||
|
) -> None:
|
||||||
|
class MyService(pydase.DataService):
|
||||||
|
@task(
|
||||||
|
restart_on_exception=True,
|
||||||
|
restart_sec=0.1,
|
||||||
|
start_limit_interval_sec=0.1,
|
||||||
|
start_limit_burst=2,
|
||||||
|
)
|
||||||
|
async def my_task(self) -> None:
|
||||||
|
raise Exception("Task failure")
|
||||||
|
|
||||||
|
service_instance = MyService()
|
||||||
|
state_manager = StateManager(service_instance)
|
||||||
|
DataServiceObserver(state_manager)
|
||||||
|
service_instance.my_task.start()
|
||||||
|
|
||||||
|
await asyncio.sleep(0.5)
|
||||||
|
assert "Task 'my_task' exceeded restart burst limit" not in caplog.text
|
||||||
|
assert service_instance.my_task.status == TaskStatus.RUNNING
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(scope="function")
|
||||||
|
async def test_exit_on_failure(
|
||||||
|
monkeypatch: pytest.MonkeyPatch, caplog: LogCaptureFixture
|
||||||
|
) -> None:
|
||||||
|
class MyService(pydase.DataService):
|
||||||
|
@task(restart_on_exception=False, exit_on_failure=True)
|
||||||
|
async def my_task(self) -> None:
|
||||||
|
logger.info("Triggered task.")
|
||||||
|
raise Exception("Critical failure")
|
||||||
|
|
||||||
|
def mock_os_kill(pid: int, signal: int) -> None:
|
||||||
|
logger.critical("os.kill called with signal=%s and pid=%s", signal, pid)
|
||||||
|
|
||||||
|
monkeypatch.setattr("os.kill", mock_os_kill)
|
||||||
|
|
||||||
|
service_instance = MyService()
|
||||||
|
state_manager = StateManager(service_instance)
|
||||||
|
DataServiceObserver(state_manager)
|
||||||
|
service_instance.my_task.start()
|
||||||
|
|
||||||
|
await asyncio.sleep(0.1)
|
||||||
|
assert "os.kill called with signal=" in caplog.text
|
||||||
|
assert "Task 'my_task' encountered an exception" in caplog.text
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(scope="function")
|
||||||
|
async def test_exit_on_failure_exceeding_rate_limit(
|
||||||
|
monkeypatch: pytest.MonkeyPatch, caplog: LogCaptureFixture
|
||||||
|
) -> None:
|
||||||
|
class MyService(pydase.DataService):
|
||||||
|
@task(
|
||||||
|
restart_on_exception=True,
|
||||||
|
restart_sec=0.0,
|
||||||
|
start_limit_interval_sec=0.1,
|
||||||
|
start_limit_burst=2,
|
||||||
|
exit_on_failure=True,
|
||||||
|
)
|
||||||
|
async def my_task(self) -> None:
|
||||||
|
raise Exception("Critical failure")
|
||||||
|
|
||||||
|
def mock_os_kill(pid: int, signal: int) -> None:
|
||||||
|
logger.critical("os.kill called with signal=%s and pid=%s", signal, pid)
|
||||||
|
|
||||||
|
monkeypatch.setattr("os.kill", mock_os_kill)
|
||||||
|
|
||||||
|
service_instance = MyService()
|
||||||
|
state_manager = StateManager(service_instance)
|
||||||
|
DataServiceObserver(state_manager)
|
||||||
|
service_instance.my_task.start()
|
||||||
|
|
||||||
|
await asyncio.sleep(0.5)
|
||||||
|
assert "os.kill called with signal=" in caplog.text
|
||||||
|
assert "Task 'my_task' encountered an exception" in caplog.text
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio(scope="function")
|
||||||
|
async def test_gracefully_finishing_task(
|
||||||
|
monkeypatch: pytest.MonkeyPatch, caplog: LogCaptureFixture
|
||||||
|
) -> None:
|
||||||
|
class MyService(pydase.DataService):
|
||||||
|
@task()
|
||||||
|
async def my_task(self) -> None:
|
||||||
|
print("Hello")
|
||||||
|
await asyncio.sleep(0.1)
|
||||||
|
|
||||||
|
service_instance = MyService()
|
||||||
|
state_manager = StateManager(service_instance)
|
||||||
|
DataServiceObserver(state_manager)
|
||||||
|
service_instance.my_task.start()
|
||||||
|
|
||||||
|
await asyncio.sleep(0.05)
|
||||||
|
assert service_instance.my_task.status == TaskStatus.RUNNING
|
||||||
|
await asyncio.sleep(0.1)
|
||||||
|
assert service_instance.my_task.status == TaskStatus.NOT_RUNNING
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
import logging
|
import logging
|
||||||
|
|
||||||
from pytest import LogCaptureFixture
|
import pytest
|
||||||
|
from pydase.utils.logging import configure_logging_with_pydase_formatter
|
||||||
|
|
||||||
|
|
||||||
def test_log_error(caplog: LogCaptureFixture):
|
def test_log_error(caplog: pytest.LogCaptureFixture) -> None:
|
||||||
logger = logging.getLogger("pydase")
|
logger = logging.getLogger("pydase")
|
||||||
logger.setLevel(logging.ERROR)
|
logger.setLevel(logging.ERROR)
|
||||||
|
|
||||||
@@ -20,7 +21,7 @@ def test_log_error(caplog: LogCaptureFixture):
|
|||||||
assert any(record.levelname == "ERROR" for record in caplog.records)
|
assert any(record.levelname == "ERROR" for record in caplog.records)
|
||||||
|
|
||||||
|
|
||||||
def test_log_warning(caplog: LogCaptureFixture):
|
def test_log_warning(caplog: pytest.LogCaptureFixture) -> None:
|
||||||
logger = logging.getLogger("pydase")
|
logger = logging.getLogger("pydase")
|
||||||
logger.setLevel(logging.WARNING)
|
logger.setLevel(logging.WARNING)
|
||||||
|
|
||||||
@@ -37,7 +38,7 @@ def test_log_warning(caplog: LogCaptureFixture):
|
|||||||
assert any(record.levelname == "ERROR" for record in caplog.records)
|
assert any(record.levelname == "ERROR" for record in caplog.records)
|
||||||
|
|
||||||
|
|
||||||
def test_log_debug(caplog: LogCaptureFixture):
|
def test_log_debug(caplog: pytest.LogCaptureFixture) -> None:
|
||||||
logger = logging.getLogger("pydase")
|
logger = logging.getLogger("pydase")
|
||||||
logger.setLevel(logging.DEBUG)
|
logger.setLevel(logging.DEBUG)
|
||||||
|
|
||||||
@@ -53,7 +54,7 @@ def test_log_debug(caplog: LogCaptureFixture):
|
|||||||
assert "This is an error message" in caplog.text
|
assert "This is an error message" in caplog.text
|
||||||
|
|
||||||
|
|
||||||
def test_log_info(caplog: LogCaptureFixture):
|
def test_log_info(caplog: pytest.LogCaptureFixture) -> None:
|
||||||
logger = logging.getLogger("pydase")
|
logger = logging.getLogger("pydase")
|
||||||
logger.setLevel(logging.INFO)
|
logger.setLevel(logging.INFO)
|
||||||
|
|
||||||
@@ -67,3 +68,21 @@ def test_log_info(caplog: LogCaptureFixture):
|
|||||||
assert "This is an info message" in caplog.text
|
assert "This is an info message" in caplog.text
|
||||||
assert "This is a warning message" in caplog.text
|
assert "This is a warning message" in caplog.text
|
||||||
assert "This is an error message" in caplog.text
|
assert "This is an error message" in caplog.text
|
||||||
|
|
||||||
|
|
||||||
|
def test_before_configuring_root_logger(caplog: pytest.LogCaptureFixture) -> None:
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
logger.info("Hello world")
|
||||||
|
|
||||||
|
assert "Hello world" not in caplog.text
|
||||||
|
|
||||||
|
|
||||||
|
def test_configure_root_logger(caplog: pytest.LogCaptureFixture) -> None:
|
||||||
|
configure_logging_with_pydase_formatter()
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
logger.info("Hello world")
|
||||||
|
|
||||||
|
assert (
|
||||||
|
"INFO tests.utils.test_logging:test_logging.py:83 Hello world"
|
||||||
|
in caplog.text
|
||||||
|
)
|
||||||
|
|||||||
Reference in New Issue
Block a user