Hi @vitor,
I managed to get just about what I wanted… or close!
I parsed the data correctly and wrote an analysis script after understanding how getData and filters were working. BTW, I was reading the SDK pages and I was wondering if there was more information/examples for all the tago SDK content? For example, it is not so obvious what the hard coded options are for the filters and how to use them beside looking at the code snippets.
Then, I created an action that triggered an analysis when a certain packet type was received (type ‘b’ or 0x62). Here’s the code:
const { Analysis, Device, Utils } = require("@tago-io/sdk");
// The function myAnalysis will run when you execute your analysis
async function myAnalysis(context) {
// reads the values from the environment and saves it in the variable env_vars
const env_vars = Utils.envToJson(context.environment);
if (!env_vars.device_token) {
return context.log("Missing device_token environment variable");
}
//context.log("Log test");
const device = new Device({ token: env_vars.device_token });
// create the filter options to get the data from TagoIO
const filter_axis = {
variable: "accel_axis",
query: "last_item",
};
const filter_chunk = {
variable: "chunk_no",
query: "last_item",
};
const filter_data = {
variable: "accel_data",
query: "last_item",
};
const resultArray1 = await device.getData(filter_axis).catch(() => null);
const resultArray2 = await device.getData(filter_chunk).catch(() => null);
const resultArray3 = await device.getData(filter_data).catch(() => null);
// Check if the array is not empty
if (!resultArray1 || !resultArray1[0] || !resultArray2 || !resultArray2[0] || !resultArray3 || !resultArray3[0]){
return context.log("Empty Array");
}
//context.log(resultArray1[0]);
//context.log(resultArray2[0]);
//context.log(resultArray3[0]);
// query:last_item always returns only one value
const axis = resultArray1[0].value;
const chunk = resultArray2[0].value;
const data = resultArray3[0].value;
// print to the console at TagoIO
context.log(axis);
context.log(chunk);
context.log(data);
// Get acceleration data length. Divide by 4 since samples are int16
var data_len = (data.length)/4;
// Create a buffer from acceleration data chunk
const data_buffer = Buffer.from(data, 'hex');
// Create obj variable to store acceleration samples
var obj = [];
var sample;
for(i=0;i<data_len;i++){
sample = data_buffer.readInt16LE(2*i);
//context.log(sample);
// Update obj with acceleration data
if(axis == 'x'){
obj = [
{variable: "accel_x_data", serie: (i + chunk*data_len), value: sample, unit: 'mg' },
{variable: "accel_x_range", serie: (i + chunk*data_len), value: (i + chunk*data_len) },
];
}
else if(axis == 'y'){
obj = [
{variable: "accel_y_data", serie: (i + chunk*data_len), value: sample, unit: 'mg' },
{variable: "accel_y_range", serie: (i + chunk*data_len), value: (i + chunk*data_len) },
];
}
else if(axis == 'z'){
obj = [
{variable: "accel_z_data", serie: (i + chunk*data_len), value: sample, unit: 'mg' },
{variable: "accel_z_range", serie: (i + chunk*data_len), value: (i + chunk*data_len) },
];
}
else{
context.log("Could not resolve axis...");
}
try {
await device.sendData(obj);
//context.log("Successfully Inserted");
} catch (error) {
context.log("Error when inserting:", error);
}
}
}
module.exports = new Analysis(myAnalysis);
// To run analysis on your machine (external)
// module.exports = new Analysis(myAnalysis, { token: "YOUR-TOKEN" });
BTW, I tried to get more than one variable at once, but I was not able to do so. I am pretty sure you can tell me how…
Using the other post you referred to in your last post, I created a new variable for plotting purposes and added the “serie” member to both variables. I was able to properly plot the data, but a problem came when I tried to process large chunks of data (I was troubleshooting with a 14 bytes chunk).
I have a int16 accel_x[1200] array to process with 20 x 120 bytes chunks and the LoRa packets come in with a 3 seconds interval. The first chunks go well and get plotted in real time, but after a couple chunks it seems like Tago cannot process them fast enough and the triggered analysis are kind of overlapping themselves (a new type ‘b’ packet comes before the last one has finished processing).
I then created a dynamic table to see the accel_x_range variable behavior and I saw that the values were not appearing in order (Ex.: 50, 65, 51, 66, 52, 67) as if there was 2 analysis instances feeding the bucket at the same time.
Is there a way to wait for one packet to be processed before processing the others (some kind of waiting room / buffer)??
Thanks!
Xavier