ATEST-205

This commit is contained in:
Fabian Märki
2015-08-10 11:44:08 +02:00
parent a1cb60c56f
commit c751aab8de
4 changed files with 86 additions and 48 deletions

View File

@ -1,5 +1,5 @@
# #
#Thu Aug 06 09:29:10 CEST 2015 #Fri Aug 07 13:38:07 CEST 2015
org.eclipse.jdt.core.compiler.debug.localVariable=generate org.eclipse.jdt.core.compiler.debug.localVariable=generate
org.eclipse.jdt.core.compiler.compliance=1.8 org.eclipse.jdt.core.compiler.compliance=1.8
org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve

View File

@ -31,22 +31,25 @@ It is possible to overwrite properties by defining new values in `${HOME}/.confi
### Request ### Request
``` ```
GET http://<host>:<port>/channels POST http://<host>:<port>/channels
or
GET http://<host>:<port>/channels/{regex}
``` ```
#### Data
```
{"regex": "TRFCA|TRFCB", "dbMode": "databuffer"}
```
##### Explanation
- **regex**: Reqular expression used to filter channel names (default: no filtering). Filtering is done using JAVA's [Pattern](https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html), more precisely [Matcher.find()](https://docs.oracle.com/javase/8/docs/api/java/util/regex/Matcher.html#find--)).
- **dbMode**: Defines the database to access (values: **databuffer**|archiverappliance)
### Example ### Example
``` ```
curl -H "Content-Type: application/json" -X GET http://sf-nube-14.psi.ch:8080/channels curl -H "Content-Type: application/json" -X POST -d '{"regex": "TRFCA|TRFCB"}' http://sf-nube-14.psi.ch:8080/channels
or
curl -H "Content-Type: application/json" -X GET http://sf-nube-14.psi.ch:8080/channels/TRFCB
``` ```
<a name="query_data"/> <a name="query_data"/>
@ -59,10 +62,27 @@ curl -H "Content-Type: application/json" -X GET http://sf-nube-14.psi.ch:8080/ch
GET http://<host>:<port>/query GET http://<host>:<port>/query
``` ```
### Example #### Data
A request is performed using JSON. The JSON query defines the channels to be queried, the range, and how the data should be aggregated (this is optional but highly recommended). A request is performed using JSON. The JSON query defines the channels to be queried, the range, and how the data should be aggregated (this is optional but highly recommended).
There exist following fields:
- **channels**: Array of channel names to be queried.
- **startPulseId** and **endPulseId** : A pulse-id range request with start and end pulse-id.
- **startMillis/[startNanos]** and **endMillis/[endNanos]**: A time range request with start and end milliseconds since January 1, 1970 (the UNIX/JAVA epoch) and optionally supplemented with the nanosecond offset to the milliseconds (range [0..999999]).
- **startDate/[startNanos]** and **endDate/[endNanos]**: A time range request with start and end date (format yyyy/MM/dd HH:mm:ss.SSS or dd.MM.yyyy HH:mm:ss.SSS) and optionally supplemented with the nanosecond offset to the milliseconds (range [0..999999]).
- **ordering**: The ordering of the data (see [here](https://github.psi.ch/projects/ST/repos/ch.psi.daq.common/browse/src/main/java/ch/psi/daq/common/ordering/Ordering.java) for possible values).
- **fields**: The requested fields (see [here](https://github.psi.ch/projects/ST/repos/ch.psi.daq.query/browse/src/main/java/ch/psi/daq/query/model/QueryField.java) for possible values).
- **nrOfBins**: Activates data binning. Specifies the number of bins the pulse/time range should be devided into.
- **binSize**: Activates data binning. Specifies the number of pulses per bin for pulse-range queries or the number of milliseconds per bin for time-range queries.
- **aggregations**: Activates data aggregation. Array of requested aggregations (see [here](https://github.psi.ch/projects/ST/repos/ch.psi.daq.query/browse/src/main/java/ch/psi/daq/query/model/Aggregation.java) for possible values). These values will be added to the *data* array response.
- **aggregationType**: Specifies the type of aggregation (see [here](https://github.psi.ch/projects/ST/repos/ch.psi.daq.query/browse/src/main/java/ch/psi/daq/query/model/AggregationType.java)). The default type is *value* aggregation (e.g., sum([1,2,3])=6). Alternatively, it is possible to define *index* aggregation for multiple arrays in combination with binning (e.g., sum([1,2,3], [3,2,1]) = [4,4,4]).
- **aggregateChannels**: Specifies whether the data of the requested channels should be combined together using the defined aggregation (values: true|**false**)
### Example
``` ```
curl -H "Content-Type: application/json" -X GET -d '{"channels":["channel1","channel2"],"startPulseId":0,"endPulseId":4}' http://sf-nube-14.psi.ch:8080/channels curl -H "Content-Type: application/json" -X GET -d '{"channels":["channel1","channel2"],"startPulseId":0,"endPulseId":4}' http://sf-nube-14.psi.ch:8080/channels
``` ```
@ -126,24 +146,6 @@ The response is in JSON.
] ]
``` ```
### JSON Query
Queries are defined using JSON.
There exist following fields:
- **channels**: Array of channel names to be queried.
- **startPulseId** and **endPulseId** : A pulse-id range request with start and end pulse-id.
- **startMillis/[startNanos]** and **endMillis/[endNanos]**: A time range request with start and end milliseconds since January 1, 1970 (the UNIX/JAVA epoch) and optionally supplemented with the nanosecond offset to the milliseconds (range [0..999999]).
- **startDate/[startNanos]** and **endDate/[endNanos]**: A time range request with start and end date (format yyyy/MM/dd HH:mm:ss.SSS or dd.MM.yyyy HH:mm:ss.SSS) and optionally supplemented with the nanosecond offset to the milliseconds (range [0..999999]).
- **ordering**: The ordering of the data (see [here](https://github.psi.ch/projects/ST/repos/ch.psi.daq.common/browse/src/main/java/ch/psi/daq/common/ordering/Ordering.java) for possible values).
- **fields**: The requested fields (see [here](https://github.psi.ch/projects/ST/repos/ch.psi.daq.query/browse/src/main/java/ch/psi/daq/query/model/QueryField.java) for possible values).
- **nrOfBins**: Activates data binning. Specifies the number of bins the pulse/time range should be devided into.
- **binSize**: Activates data binning. Specifies the number of pulses per bin for pulse-range queries or the number of milliseconds per bin for time-range queries.
- **aggregations**: Activates data aggregation. Array of requested aggregations (see [here](https://github.psi.ch/projects/ST/repos/ch.psi.daq.query/browse/src/main/java/ch/psi/daq/query/model/Aggregation.java) for possible values). These values will be added to the *data* array response.
- **aggregationType**: Specifies the type of aggregation (see [here](https://github.psi.ch/projects/ST/repos/ch.psi.daq.query/browse/src/main/java/ch/psi/daq/query/model/AggregationType.java)). The default type is *value* aggregation (e.g., sum([1,2,3])=6). Alternatively, it is possible to define *index* aggregation for multiple arrays in combination with binning (e.g., sum([1,2,3], [3,2,1]) = [4,4,4]).
- **aggregateChannels**: Specifies whether the data of the requested channels should be combined together using the defined aggregation (values: true|**false**)
### Example JSON Queries ### Example JSON Queries
**TODO:** **TODO:**

View File

@ -23,6 +23,7 @@ import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.bind.annotation.RestController; import org.springframework.web.bind.annotation.RestController;
import ch.psi.daq.cassandra.request.validate.RequestProviderValidator;
import ch.psi.daq.cassandra.util.test.CassandraDataGen; import ch.psi.daq.cassandra.util.test.CassandraDataGen;
import ch.psi.daq.domain.DataEvent; import ch.psi.daq.domain.DataEvent;
import ch.psi.daq.query.analyzer.QueryAnalyzer; import ch.psi.daq.query.analyzer.QueryAnalyzer;
@ -41,11 +42,12 @@ public class QueryRestController {
private static final Logger LOGGER = LoggerFactory.getLogger(QueryRestController.class); private static final Logger LOGGER = LoggerFactory.getLogger(QueryRestController.class);
public static final String CHANNELS = "channels"; public static final String CHANNELS = "/channels";
public static final String QUERY = "query"; public static final String QUERY = "/query";
@Resource @Resource
private Validator queryValidator; private Validator queryValidator;
private Validator requestProviderValidator = new RequestProviderValidator();
@Resource @Resource
private ResponseStreamWriter responseStreamWriter; private ResponseStreamWriter responseStreamWriter;
@ -68,11 +70,13 @@ public class QueryRestController {
@InitBinder @InitBinder
protected void initBinder(WebDataBinder binder) { protected void initBinder(WebDataBinder binder) {
/* if (requestProviderValidator.supports(binder.getTarget().getClass())) {
* This allows to use the @Valid annotation in the methods below. binder.addValidators(requestProviderValidator);
*/ }
if (queryValidator.supports(binder.getTarget().getClass())) {
binder.addValidators(queryValidator); binder.addValidators(queryValidator);
} }
}
@RequestMapping( @RequestMapping(
value = CHANNELS, value = CHANNELS,
@ -104,7 +108,8 @@ public class QueryRestController {
QueryAnalyzer queryAnalizer = queryAnalizerFactory.apply(query); QueryAnalyzer queryAnalizer = queryAnalizerFactory.apply(query);
// all the magic happens here // all the magic happens here
Stream<Entry<String, Stream<? extends DataEvent>>> channelToDataEvents = getQueryProcessor(query.getDBMode()).process(queryAnalizer); Stream<Entry<String, Stream<? extends DataEvent>>> channelToDataEvents =
getQueryProcessor(query.getDBMode()).process(queryAnalizer);
// do post-process // do post-process
Stream<Entry<String, ?>> channelToData = queryAnalizer.postProcess(channelToDataEvents); Stream<Entry<String, ?>> channelToData = queryAnalizer.postProcess(channelToDataEvents);
@ -136,6 +141,41 @@ public class QueryRestController {
@RequestMapping(value = "/write") @RequestMapping(value = "/write")
public void writeDummyEntry() { public void writeDummyEntry() {
cassandraDataGen.writeData(3, 0, 100, "channel1", "channel2"); long nrOfElements = 4;
cassandraDataGen.writeData(3, 0, 4,
i -> i,
i -> 0,
i -> i,
i -> i,
i -> 0,
i -> new long[] {i, i, i, i},
"TRFCA-channel1");
cassandraDataGen.writeData(3, 0, 4,
i -> i,
i -> 0,
i -> i,
i -> i,
i -> 0,
i -> new long[] {nrOfElements - i, nrOfElements - i, nrOfElements - i, nrOfElements - i},
"TRFCA-channel2");
cassandraDataGen.writeData(3, 0, 4,
i -> i,
i -> 0,
i -> i,
i -> i,
i -> 0,
i -> i,
"TRFCB-channel3");
cassandraDataGen.writeData(3, 0, 4,
i -> i,
i -> 0,
i -> i,
i -> i,
i -> 0,
i -> nrOfElements - i,
"TRFCB-channel4");
} }
} }

View File

@ -15,7 +15,6 @@ import ch.psi.daq.query.model.QueryField;
import ch.psi.daq.query.model.impl.AbstractQuery; import ch.psi.daq.query.model.impl.AbstractQuery;
import ch.psi.daq.cassandra.request.Request; import ch.psi.daq.cassandra.request.Request;
import ch.psi.daq.cassandra.request.range.RequestRangeTime; import ch.psi.daq.cassandra.request.range.RequestRangeTime;
import ch.psi.daq.cassandra.request.validate.RequestValidator;
import ch.psi.daq.queryrest.config.QueryRestConfig; import ch.psi.daq.queryrest.config.QueryRestConfig;
public class QueryValidator implements Validator { public class QueryValidator implements Validator {
@ -26,8 +25,6 @@ public class QueryValidator implements Validator {
@Resource(name = QueryRestConfig.BEAN_NAME_DEFAULT_RESPONSE_AGGREGATIONS) @Resource(name = QueryRestConfig.BEAN_NAME_DEFAULT_RESPONSE_AGGREGATIONS)
private Set<Aggregation> defaultResponseAggregations; private Set<Aggregation> defaultResponseAggregations;
private RequestValidator requestValidator = new RequestValidator();
/** /**
* {@inheritDoc} * {@inheritDoc}
*/ */
@ -45,11 +42,10 @@ public class QueryValidator implements Validator {
AbstractQuery query = (AbstractQuery) target; AbstractQuery query = (AbstractQuery) target;
Request request = query.getRequest(); Request request = query.getRequest();
requestValidator.validate(request, errors);
if (DBMode.archiverappliance.equals(query.getDBMode())) { if (DBMode.archiverappliance.equals(query.getDBMode())) {
if (!(request.getRequestRange() instanceof RequestRangeTime)) { if (!(request.getRequestRange() instanceof RequestRangeTime)) {
errors.reject("", "ArchiverAppliance supports time range queries only!"); errors.reject("dbMode", "ArchiverAppliance supports time range queries only!");
} }
} }