diff --git a/well-api2.py b/well-api2.py
index 0d7ea87..d7b38b3 100644
--- a/well-api2.py
+++ b/well-api2.py
@@ -1,4 +1,5 @@
#!/usr/bin/env python3
+#Vesion 2.0.0
import os
import sys
import ast
@@ -37,6 +38,20 @@ from sklearn.mixture import GaussianMixture
import openai
from openai import OpenAI
from typing import List, Tuple
+import redis
+import base64
+import requests
+
+# Try to import the module
+try:
+ from filter_short_groups import filter_short_groups_c
+ print("Successfully imported filter_short_groups_c")
+except ImportError as e:
+ print(f"Error importing module: {e}")
+ exit(1)
+
+device_lookup_cache = {}
+threshold_cache = {}
st = 0
if True:
@@ -67,14 +82,14 @@ location_names = {-1:"All",0:"?",5:"Office",6:"Hallway",7:"Garage",8:"Outside",9
56:"Bedroom",78:"Living Room",102:"Bathroom",103:"Dining Room",104:"Bathroom Main",105:"Bathroom Guest",
106:"Bedroom Master", 107:"Bedroom Guest", 108:"Conference Room", 109:"Basement", 110:"Attic", 200:"Other"}
-Loc2Color = {"?":(0,0,0),"Office":(255,255,0),"Hallway":(128,128,128),"Garage":(128,0,0),"Outside":(0,0,0),"Conference Room":(0,0,128),
- "Room":(64,64,64),"Kitchen":(255,0,0),"Bedroom":(16,255,16),"Living Room":(160,32,240),"Bathroom":(0,0,255),
- "Dining Room":(255,128,0),"Bathroom Main":(16,16,255), "Bedroom Master":(0,255,0),"Bathroom Guest":(32,32,255),
- "Bedroom Guest":(32,255,32), "Basement":(64,64,64), "Attic":(255,165,0), "Other":(192,192,192)}
+#Loc2Color = {"?":(0,0,0),"Office":(255,255,0),"Hallway":(128,128,128),"Garage":(128,0,0),"Outside":(0,0,0),"Conference Room":(0,0,128),
+ #"Room":(64,64,64),"Kitchen":(255,0,0),"Bedroom":(16,255,16),"Living Room":(160,32,240),"Bathroom":(0,0,255),
+ #"Dining Room":(255,128,0),"Bathroom Main":(16,16,255), "Bedroom Master":(0,255,0),"Bathroom Guest":(32,32,255),
+ #"Bedroom Guest":(32,255,32), "Basement":(64,64,64), "Attic":(255,165,0), "Other":(192,192,192)}
Loc2Color = {"Bedroom":((16,255,16),0),"Bedroom Master":((0,255,0),0),"Bedroom Guest":((32,255,32),0),"Bathroom":((0,0,255),1),
- "Bathroom Main":((16,16,255),1),"Bathroom Guest":((32,32,255),1),"Kitchen":((255,0,0),2),"Dining Room":((255,128,0),3),
- "Office":((255,255,0),4),"Conference Room":((0,0,128),5),"Room":((64,64,64),6),"Living Room":((160,32,240),7),"Hallway":((128,128,128),8),
+ "Bathroom Main":((16,16,255),1),"Bathroom Guest":((32,32,255),1),"Kitchen":((255,0,0),2),"Dining Room":((255,128,0),3),"Dining":((255,128,0),3),
+ "Office":((255,255,0),4),"Conference Room":((0,0,128),5),"Conference":((0,0,128),5),"Room":((64,64,64),6),"Living Room":((160,32,240),7),"Living":((160,32,240),7),"Hallway":((128,128,128),8),
"Garage":((128,0,0),9),"Basement":((64,64,64), 10),"Attic":((255,165,0), 11),"Other":((192,192,192),12),"?":((0,0,0),13),"Outside":((0,0,0),14)}
@@ -123,6 +138,28 @@ MASTER_PS = os.getenv('MASTER_PS')
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
model_engine = os.getenv('OPENAI_API_MODEL_ENGINE')
+
+# Redis Configuration
+REDIS_HOST = os.getenv('REDIS_HOST', 'localhost')
+REDIS_PORT = int(os.getenv('REDIS_PORT'))
+REDIS_DB = int(os.getenv('REDIS_DB', 0))
+REDIS_PASSWORD = os.getenv('REDIS_PASSWORD', None)
+
+ENABLE_AUDIO_PLAYBACK = True
+OPT_IN_KEYWORD = "WELLNUOJOIN"
+DEFAULT_TTS_VOICE = "female"
+DEFAULT_TTS_LANGUAGE = "en-US"
+CLIENT_STATE_PREFIX = "app_state"
+TELNYX_API_KEY = os.getenv('TELNYX_API_KEY')
+TELNYX_API_BASE_URL = os.getenv("TELNYX_API_BASE_URL")
+
+logger.debug(f"REDIS_PORT: {REDIS_PORT}")
+logger.debug(f"TELNYX_API_KEY: {TELNYX_API_KEY}")
+logger.debug(f"TELNYX_API_BASE_URL: {TELNYX_API_BASE_URL}")
+
+redis_host = os.getenv('REDIS_HOST', '192.168.68.70')
+redis_host = '192.168.68.70'
+
use_pdb = True
debug = False
debug_string = ""
@@ -146,6 +183,39 @@ sensor_legal_values = {"radar": (0,1000, 1), "co2": (smell_min, smell_max, 31),
smell_legal_values = {"s0": (smell_min, smell_max, 31), "s1": (smell_min, smell_max, 31), "s2": (smell_min, smell_max, 31), "s3": (smell_min, smell_max, 31), "s4": (smell_min, smell_max, 31),
"s5": (smell_min, smell_max, 31), "s6": (smell_min, smell_max, 31), "s7": (smell_min, smell_max, 31), "s8": (smell_min, smell_max, 31), "s9": (smell_min, smell_max, 31)}
+
+def GetRedisInt(key_name):
+ try:
+ result = int(redis_conn.get(key_name).decode('utf-8'))
+ except:
+ result = None
+ return result
+
+def GetRedisFloat(key_name):
+ try:
+ result = float(redis_conn.get(key_name).decode('utf-8'))
+ except:
+ result = None
+
+ return result
+
+def GetRedisString(key_name):
+ try:
+ result = redis_conn.get(key_name).decode('utf-8')
+ except:
+ result = None
+ return result
+
+def GetRedisMap(key_name):
+ try:
+ result_bytes = redis_conn.hgetall(key_name)
+ result = {k.decode('utf-8'): v.decode('utf-8') for k, v in result_bytes.items()}
+ except:
+ result = {}
+ return result
+
+
+
def read_file(file_name, source = "LOCAL", type_ = "TEXT", bucket_name="daily-maps"):
blob_data = ""
@@ -206,6 +276,73 @@ def verify_token(token):
except jwt.InvalidTokenError:
return None
+def SmartSplit(data_string):
+ """
+ Splits a comma-separated string into a list, properly handling nested structures
+ and converting values to appropriate Python types using only the ast library.
+ """
+ if not data_string:
+ return []
+
+ # Remove trailing comma if present
+ data_string = data_string.rstrip(',')
+
+ items = []
+ current_item = ""
+ bracket_count = 0
+ in_quotes = False
+ quote_char = None
+
+ i = 0
+ while i < len(data_string):
+ char = data_string[i]
+
+ # Handle quotes
+ if char in ('"', "'") and (i == 0 or data_string[i-1] != '\\'):
+ if not in_quotes:
+ in_quotes = True
+ quote_char = char
+ elif char == quote_char:
+ in_quotes = False
+ quote_char = None
+
+ # Track brackets only when not in quotes
+ if not in_quotes:
+ if char in '[{(':
+ bracket_count += 1
+ elif char in ']}':
+ bracket_count -= 1
+
+ # Split on comma only when not inside brackets/quotes
+ if char == ',' and bracket_count == 0 and not in_quotes:
+ items.append(current_item.strip())
+ current_item = ""
+ else:
+ current_item += char
+
+ i += 1
+
+ # Add the last item
+ if current_item.strip():
+ items.append(current_item.strip())
+
+ # Convert each item using ast.literal_eval when possible
+ result = []
+ for item in items:
+ if item == '':
+ result.append(None)
+ else:
+ try:
+ # Try to evaluate as Python literal
+ converted = ast.literal_eval(item)
+ result.append(converted)
+ except (ValueError, SyntaxError):
+ # If it fails, keep as string
+ result.append(item)
+
+ return result
+
+
def SaveObjectInBlob(file_name, obj):
"""
Saves a Python object to MinIO blob storage using JSON serialization
@@ -232,21 +369,79 @@ def SaveObjectInBlob(file_name, obj):
logger.error(f"Error saving object to blob: {traceback.format_exc()}")
return False
-def ReadObjectMinIO(bucket_name, file_name):
+def SaveGenericObjectInBlob(bucket_name, file_name, obj):
+ """
+ Saves a Python object to MinIO blob storage using JSON serialization
+
+ Args:
+ file_name (str): Name of the file to save in blob storage
+ obj: Python object to serialize and save
+ """
try:
+ # Convert object to JSON string
+ json_str = json.dumps(obj)
+ # Convert string to bytes
+ json_bytes = json_str.encode('utf-8')
+
+ # Save to MinIO
+ miniIO_blob_client.put_object(
+ bucket_name,
+ file_name,
+ io.BytesIO(json_bytes),
+ len(json_bytes)
+ )
+ return True
+ except Exception as e:
+ logger.error(f"Error saving object to blob: {traceback.format_exc()}")
+ return False
+
+
+
+def ReadObjectMinIO(bucket_name, file_name, filter_date=None):
+ """
+ Read object from MinIO with optional date filtering.
+
+ Args:
+ bucket_name (str): Name of the MinIO bucket
+ file_name (str): Name of the file/object
+ filter_date (str, optional): Date string in format "YYYY-MM-DD".
+ If provided, returns empty string if object
+ was modified before or on this date.
+
+ Returns:
+ str: Object content as string, empty string if filtered out, or None on error
+ """
+ try:
+ # If date filtering is requested, check object's last modified date first
+ if filter_date:
+ try:
+ # Get object metadata to check last modified date
+ stat = miniIO_blob_client.stat_object(bucket_name, file_name)
+ last_modified = stat.last_modified
+
+ # Parse filter date (assuming format YYYY-MM-DD)
+ target_date = datetime.datetime.strptime(filter_date, "%Y-%m-%d").date()
+
+ # If object was modified before or on target date, return empty string
+ if last_modified.date() <= target_date:
+ return None
+
+ except S3Error as e:
+ logger.error(f"Error getting metadata for {file_name}: {e}")
+ return None
+ except ValueError as e:
+ logger.error(f"Invalid date format '{filter_date}': {e}")
+ return None
+
# Retrieve the object data
response = miniIO_blob_client.get_object(bucket_name, file_name)
-
# Read the data from response
data_bytes = response.read()
-
- # Convert bytes to string and then load into a dictionary
+ # Convert bytes to string
data_string = data_bytes.decode('utf-8')
-
# Don't forget to close the response
response.close()
response.release_conn()
-
return data_string
except S3Error as e:
@@ -332,7 +527,7 @@ def GetPriviledges(conn, user_name, password):
with conn.cursor() as cur:
cur.execute(sql)
result = cur.fetchall()#cur.fetchone()
- if result != None:
+ if result != None and result != []:
if result[0][0] == password:
return result[0][1], result[0][2]
else:
@@ -355,6 +550,32 @@ def GetPriviledgesOnly(user):
else:
return "0"
+def GetUserId(user_name):
+ with get_db_connection() as conn:
+ sql = "SELECT user_id FROM public.person_details WHERE user_name = '" + user_name + "'"
+
+ with conn.cursor() as cur:
+ cur.execute(sql)
+ result = cur.fetchall()#cur.fetchone()
+ if result != None:
+ return result[0][0]
+ else:
+ return "0"
+
+def GetNameFromUserId(user_id):
+ with get_db_connection() as conn:
+ sql = f"SELECT user_name, first_name, last_name FROM public.person_details WHERE user_id = {user_id}"
+
+ with conn.cursor() as cur:
+ cur.execute(sql)
+ result = cur.fetchall()#cur.fetchone()
+ if result != None:
+ return result[0]
+ else:
+ return None
+
+
+
def ListDeployments(priviledges, user_id):
global user_id_2_user
@@ -416,17 +637,28 @@ def ListDeployments(priviledges, user_id):
logger.debug(f"Error: {traceback.format_exc()}")
return complete_result
-def ListCaretakers():
+def ListCaretakers(privileges, user_name):
conn = get_db_connection()
- sql = "SELECT * FROM public.person_details WHERE role_ids LIKE '%2%' ORDER BY last_name;" #2 is caretaker
+ if privileges == "-1":
+ sql = "SELECT * FROM public.person_details WHERE role_ids LIKE '%2%' ORDER BY last_name;" #2 is caretaker
- with conn.cursor() as cur:
- cur.execute(sql)
- result = cur.fetchall()#cur.fetchone()
- if result == None:
- result = []
+ with conn.cursor() as cur:
+ cur.execute(sql)
+ result = cur.fetchall()#cur.fetchone()
+ if result == None:
+ result = []
+ else:
+ #we need to check if
+ sql = f"SELECT * FROM public.person_details WHERE user_name = '{user_name}';" #2 is caretaker
+ with conn.cursor() as cur:
+ cur.execute(sql)
+ result = cur.fetchall()#cur.fetchone()
+ if result == None:
+ result = []
+
+ pass
return result
def ListBeneficiaries(privilidges, user_info):
@@ -477,6 +709,7 @@ def UserDetails(user_id):
cnt += 1
return caretaker_record
+
def DeviceDetails(mac):
conn = get_db_connection()
@@ -500,6 +733,30 @@ def DeviceDetails(mac):
return device_record
+def GetDeviceDetailsSingle(device_id):
+
+ conn = get_db_connection()
+
+ sql = "SELECT column_name FROM information_schema.columns WHERE table_schema = 'public' AND table_name = 'devices';"
+ with conn.cursor() as cur:
+ cur.execute(sql)
+ columns_names = cur.fetchall()
+
+ sql = "SELECT * FROM public.devices WHERE device_id = '" + device_id + "'"
+
+ device_record = {}
+ with conn.cursor() as cur:
+ cur.execute(sql)
+ result = cur.fetchone() #cur.fetchall()
+ if result != None:
+ cnt = 0
+ for field in columns_names:
+ device_record[field[0]] = result[cnt]
+ cnt += 1
+
+ return device_record
+
+
def DeploymentDetails(deployment_id):
deployment_record = {}
@@ -562,6 +819,20 @@ def ValidUser(user_name, password):
#AddToLog("Error !1 "+str(err))
+def GetMaxRole(user_name):
+
+ with get_db_connection() as db_conn:
+
+ sql = "SELECT role_ids FROM public.person_details WHERE user_name = '" + user_name + "'"
+
+ with db_conn.cursor() as cur:
+ cur.execute(sql)
+ result = cur.fetchall()#cur.fetchone()
+ if result != None and result != []:
+ return str(result[0])
+ return ""
+
+
def SelectOption(html_code, select_id, selected_item):
"""
Modifies HTML code to set the selected attribute for a specific option in a select element.
@@ -717,14 +988,14 @@ def StoreThresholds2DB(device_id, TR, BR, TLIFE, BLIFE):
return 0
-def StoreBeneficiary2DB(parameters, editing_user_id):
+def StoreBeneficiary2DB(parameters, editing_user_id, user_id):
#print('\nCreating create_caretaker\n')
# Create a caretaker object. This object has nested properties and various types including numbers, DateTimes and strings.
# This can be saved as JSON as is without converting into rows/columns.
conn = get_db_connection()
cur = conn.cursor()
-
+ error_string = ""
if editing_user_id == None or editing_user_id == "":
editing_user_id = "0"
@@ -749,7 +1020,7 @@ def StoreBeneficiary2DB(parameters, editing_user_id):
address_state = '{CleanObject(parameters.get('address_state'))}',
address_country = '{CleanObject(parameters.get('address_country'))}',
time_edit = {current_epoch_time},
- user_edit = {CleanObject(parameters.get('user_id'))},
+ user_edit = {user_id},
role_ids = '{CleanObject(parameters.get('role_ids'))}',
phone_number = '{CleanObject(parameters.get('phone_number'))}',
picture = '{CleanObject(parameters.get('picture'))}',
@@ -765,7 +1036,7 @@ def StoreBeneficiary2DB(parameters, editing_user_id):
('{CleanObject(parameters.get('role_ids'))}', '{CleanObject(parameters.get('email'))}', '{CleanObject(parameters.get('new_user_name'))}',
'{CleanObject(parameters.get('first_name'))}', '{CleanObject(parameters.get('last_name'))}', '{CleanObject(parameters.get('address_street'))}',
'{CleanObject(parameters.get('address_city'))}', '{CleanObject(parameters.get('address_zip'))}', '{CleanObject(parameters.get('address_state'))}',
- '{CleanObject(parameters.get('address_country'))}', {current_epoch_time}, {CleanObject(parameters.get('user_id'))}, '{CleanObject(parameters.get('phone_number'))}',
+ '{CleanObject(parameters.get('address_country'))}', {current_epoch_time}, {user_id}, '{CleanObject(parameters.get('phone_number'))}',
'{CleanObject(parameters.get('picture'))}', '{CleanObject(parameters.get('key'))}');
"""
logger.debug(f"sql= {sql}")
@@ -780,11 +1051,12 @@ def StoreBeneficiary2DB(parameters, editing_user_id):
conn.close()
AddToLog("Written/updated!")
- return 1
+ return 1, error_string
except Exception as err:
- AddToLog(traceback.format_exc())
- return 0
- return ok
+ error_string = traceback.format_exc()
+ AddToLog(error_string)
+ return 0, error_string
+
def DeleteRecordFromDB(form_data):
@@ -991,7 +1263,7 @@ def DeleteRecordFromDB(form_data):
conn.close()
return 0
-def StoreCaretaker2DB(parameters, editing_user_id):
+def StoreCaretaker2DB(parameters, editing_user_id, user_id):
#print('\nCreating create_caretaker\n')
# Create a caretaker object. This object has nested properties and various types including numbers, DateTimes and strings.
@@ -1025,7 +1297,7 @@ def StoreCaretaker2DB(parameters, editing_user_id):
address_state = '{parameters.get('address_state')}',
address_country = '{parameters.get('address_country')}',
time_edit = {current_epoch_time},
- user_edit = {parameters.get('user_id')},
+ user_edit = {user_id},
phone_number = '{parameters.get('phone_number')}',
picture = '{parameters.get('picture')}',
key = '{parameters.get('key')}'
@@ -1037,7 +1309,9 @@ def StoreCaretaker2DB(parameters, editing_user_id):
INSERT INTO public.person_details
(role_ids, access_to_deployments, email, user_name, first_name, last_name, address_street, address_city, address_zip, address_state, address_country, time_edit, user_edit, phone_number, picture, key)
VALUES
- ('{parameters.get('role_ids')}', '{parameters.get('access_to_deployments')}', '{parameters.get('email')}', '{parameters.get('new_user_name')}', '{parameters.get('first_name')}', '{parameters.get('last_name')}', '{parameters.get('address_street')}', '{parameters.get('address_city')}', '{parameters.get('address_zip')}', '{parameters.get('address_state')}', '{parameters.get('address_country')}', {current_epoch_time}, {parameters.get('user_id')}, '{parameters.get('phone_number')}', '{parameters.get('picture')}', '{parameters.get('key')}');
+ ('{parameters.get('role_ids')}', '{parameters.get('access_to_deployments')}', '{parameters.get('email')}', '{parameters.get('new_user_name')}', '{parameters.get('first_name')}',
+ '{parameters.get('last_name')}', '{parameters.get('address_street')}', '{parameters.get('address_city')}', '{parameters.get('address_zip')}', '{parameters.get('address_state')}',
+ '{parameters.get('address_country')}', {current_epoch_time}, {user_id}, '{parameters.get('phone_number')}', '{parameters.get('picture')}', '{parameters.get('key')}');
"""
logger.debug(f"sql= {sql}")
# Execute update query
@@ -1057,6 +1331,69 @@ def StoreCaretaker2DB(parameters, editing_user_id):
return 0
return ok
+def StoreFlow2DB(user_name, time_s, flow_json):
+
+ conn = get_db_connection()
+ cur = conn.cursor()
+
+ query = f"""
+ INSERT INTO public.node_reds (user_name, last_activity, flow)
+ VALUES ('{user_name}', {time_s}, '{flow_json}')
+ ON CONFLICT (user_name)
+ DO UPDATE SET
+ last_activity = EXCLUDED.last_activity,
+ flow = EXCLUDED.flow
+ """
+ logger.debug(f"sql= {query}")
+
+ try:
+ #cur.execute(query, (user_name, time_s, flow_json))
+ cur.execute(query)
+ conn.commit()
+ logger.debug(f"OK!")
+ return True
+ except Exception as e:
+ conn.rollback()
+ print(f"Error storing flow: {e}")
+ logger.debug(f"Error storing flow: {e}")
+ return False
+ finally:
+ cur.close()
+ conn.close()
+ logger.debug(f"Closing!")
+
+def StoreAlarms2DB(deployment_id, device_id, deployment_alarms_json, device_alarms_json):
+
+ conn = get_db_connection()
+ cur = conn.cursor()
+
+ # Extract the overlapping list
+
+ try:
+ sql = f"""
+ UPDATE public.deployments SET alarm_details = '{CleanObject(deployment_alarms_json)}' WHERE deployment_id = {deployment_id};
+ """
+
+ logger.debug(f"sql= {sql}")
+ cur.execute(sql)
+ conn.commit()
+
+ sql1 = f"""
+ UPDATE public.devices SET alert_details = '{CleanObject(device_alarms_json)}' WHERE device_id = {device_id};
+ """
+
+ logger.debug(f"sql= {sql1}")
+ cur.execute(sql1)
+ conn.commit()
+
+ cur.close()
+ conn.close()
+
+ AddToLog("Written/updated!")
+ return 1
+ except Exception as err:
+ return 0
+
def CleanObject(object_in, typee = "s"):
if typee == "n":
@@ -1268,11 +1605,23 @@ def UpdateDevicesTable(html_string, devices, users):
#ID, Well id, MAC, Last_Message, Location, Description, Deployment
table_rows_string = ""
for device in devices:
- result = next(item for item in users if item[0] == device[6])
- deployment_name = result[1]
- if result[2] != None:
- deployment_name = deployment_name + " " + result[2]
+ #result = next(item for item in users if item[0] == device[6])
+
+ deployment_name = "?"
+ for item in users:
+ if item[0] == device[6]:
+ result = item
+ deployment_name = result[1]
+ if result[2] != None:
+ deployment_name = deployment_name + " " + result[2]
+ break
+
+
+
+
mac = device[2]
+ if mac == "64B7088903B4":
+ print("stop")
mac_row_string = f'
{mac} | \n'
age = time.time() - device[3]
@@ -1390,12 +1739,15 @@ def GetDeviceDetails(cur, deployment_ids, location_id):
cnt = 0
for device_table_record in devices_ids_records:
if len(devices_times) > 0:
-
+ device_id = device_table_record[0]
if device_id in found_device_details:
last_message_time = found_device_details[device_id]
last_message_epoch = int(last_message_time.timestamp())
else:
- last_message_time = int(device_table_record[14])
+ try:
+ last_message_time = int(device_table_record[14])
+ except:
+ last_message_time = 0
last_message_epoch = last_message_time
else:
last_message_time = 0
@@ -1413,7 +1765,103 @@ def GetDeviceDetails(cur, deployment_ids, location_id):
if device_table_record[5] != "":
description = description + " Close to " + device_table_record[5]
location_id = device_table_record[4]
- row_data = [device_id, well_id, mac, last_message_epoch, location_names[location_id], description, deployment_ids[cnt][0]]
+ if location_id == None:
+ location_id = 0
+ try:
+ row_data = [device_id, well_id, mac, last_message_epoch, location_names[location_id], description, deployment_ids[cnt][0]]
+ except:
+ row_data = [device_id, well_id, mac, last_message_epoch, location_names[location_id], description, deployment_ids[cnt][0]]
+ cnt += 1
+ all_details.append(row_data)
+
+ return all_details
+
+def GetDeviceDetailsComplete(cur, deployment_ids, location_id):
+
+ #ID, Well id, MAC, Last_Message, Location, Description, Deployment
+ macs = [mac for _, mac in deployment_ids]
+ #macs = list(deployment_ids.keys())
+ macs_string_nq = ",".join(macs)
+ macs_string = "'" + "','".join(macs) + "'"
+
+ if location_id == -1:
+ sql = f"""
+ WITH ordered_macs AS (
+ SELECT unnest(string_to_array('{macs_string_nq}', ',')) as mac,
+ generate_series(1, array_length(string_to_array('{macs_string_nq}', ','), 1)) as position
+ )
+ SELECT d.*
+ FROM public.devices d
+ JOIN ordered_macs om ON d.device_mac = om.mac::text
+ WHERE device_mac IN ({macs_string})
+ ORDER BY om.position;
+ """
+ else:
+ sql = f"""
+ WITH ordered_macs AS (
+ SELECT unnest(string_to_array('{macs_string_nq}', ',')) as mac,
+ generate_series(1, array_length(string_to_array('{macs_string_nq}', ','), 1)) as position
+ )
+ SELECT d.*
+ FROM public.devices d
+ JOIN ordered_macs om ON d.device_mac = om.mac::text
+ WHERE device_mac IN ({macs_string}) AND location = {location_id}
+ ORDER BY om.position;
+ """
+
+ cur.execute(sql)
+ print(sql)
+ devices_ids_records = cur.fetchall()
+ all_details = []
+
+
+ devices_ids_list = [x[0] for x in devices_ids_records]
+ device_ids_string = ",".join(map(str, devices_ids_list))
+ #sql = f"SELECT device_id, MAX(time) as last_reading_time FROM sensor_readings WHERE device_id IN ({device_ids_string}) GROUP BY device_id" #to slow
+ sql = f"SELECT DISTINCT ON (device_id) device_id, time as last_reading_time FROM sensor_readings WHERE device_id IN ({device_ids_string}) AND time > now() - INTERVAL '1 day' ORDER BY device_id, time DESC"
+ cur.execute(sql)
+ print(sql)
+ devices_times = cur.fetchall()#cur.fetchone()
+ found_device_details = {}
+ for device_record in devices_times:
+ device_id, last_message_time = device_record
+ found_device_details[device_id] = last_message_time
+ cnt = 0
+ for device_table_record in devices_ids_records:
+ if len(devices_times) > 0:
+
+ if device_id in found_device_details:
+ last_message_time = found_device_details[device_id]
+ last_message_epoch = int(last_message_time.timestamp())
+ else:
+ try:
+ last_message_time = int(device_table_record[14])
+ except:
+ last_message_time = 0
+ last_message_epoch = last_message_time
+ else:
+ last_message_time = 0
+ last_message_epoch = 0
+
+ #print(last_message_epoch)
+ #print(type(last_message_epoch))
+ device_id = device_table_record[0]
+ mac = device_table_record[1]
+ well_id = device_table_record[2]
+ description = device_table_record[3]
+ alarm_details = device_table_record[16]
+ if description == None:
+ description = ""
+ if device_table_record[5] != None:
+ if device_table_record[5] != "":
+ description = description + " Close to " + device_table_record[5]
+ location_id = device_table_record[4]
+ if location_id == None:
+ location_id = 0
+ #try:
+ # row_data = [device_id, well_id, mac, last_message_epoch, location_names[location_id], description, deployment_ids[cnt][0], alarm_details]
+ #except:
+ row_data = [device_id, well_id, mac, last_message_epoch, location_names[location_id], description, deployment_ids[cnt][0], alarm_details]
cnt += 1
all_details.append(row_data)
@@ -1424,36 +1872,44 @@ def GetVisibleDevices(deployments):
devices_details = []
stt = time.time()
with get_db_connection() as conn:
- #list all devices that user has access to
- if deployments == "-1":
- sql = "SELECT deployment_id, devices FROM public.deployment_details"
- else:
- sql = f"SELECT deployment_id, devices FROM public.deployment_details WHERE deployment_id IN ({deployments})"
-
with conn.cursor() as cur:
- print(sql)
- cur.execute(sql)
- devices_groups = cur.fetchall()#cur.fetchone()
- deployment_ids = []
- for deployment_id, dev_group in devices_groups:
- if dev_group != None:
- if len(dev_group) > 10:
- if "[" not in dev_group:
- if "," not in dev_group:
- dev_group = '["' + dev_group + '"]'
- else:
- dev_group = dev_group.replace(" ", "")
- dev_group = dev_group.replace(",", '","')
- dev_group = '["' + dev_group + '"]'
+ #list all devices that user has access to
+ if deployments == "-1":
+ sql = "SELECT device_mac FROM public.devices ORDER BY device_id ASC"# SELECT deployment_id, devices FROM public.deployment_details"
+ macs_group = []
+ deployment_ids = []
+ print(sql)
+ cur.execute(sql)
+ macs_records = cur.fetchall()#cur.fetchone()
+ for record in macs_records:
+ deployment_ids.append((0, record[0]))
+ devices_details = GetDeviceDetails(cur, deployment_ids, -1)
+ else:
+ sql = f"SELECT deployment_id, devices FROM public.deployment_details WHERE deployment_id IN ({deployments})"
- macs_group = literal_eval(dev_group)
+ print(sql)
+ cur.execute(sql)
+ devices_groups = cur.fetchall()#cur.fetchone()
+ deployment_ids = []
+ for deployment_id, dev_group in devices_groups:
+ if dev_group != None:
+ if len(dev_group) > 10:
+ if "[" not in dev_group:
+ if "," not in dev_group:
+ dev_group = '["' + dev_group + '"]'
+ else:
+ dev_group = dev_group.replace(" ", "")
+ dev_group = dev_group.replace(",", '","')
+ dev_group = '["' + dev_group + '"]'
- for mac in macs_group:
- deployment_ids.append((deployment_id, mac))
- else:
- print(f"Deployment {deployment_id} has dev_group empty")
- devices_details = GetDeviceDetails(cur, deployment_ids, -1)
- #devices_details.append(devices_detail)
+ macs_group = literal_eval(dev_group)
+
+ for mac in macs_group:
+ deployment_ids.append((deployment_id, mac))
+ else:
+ print(f"Deployment {deployment_id} has dev_group empty")
+ devices_details = GetDeviceDetails(cur, deployment_ids, -1)
+ #devices_details.append(devices_detail)
return devices_details
@@ -1487,6 +1943,36 @@ def GetVisibleDevicesPerLocation(deployments, location):
return devices_details
+def GetVisibleDevicesPerLocationComplete(deployments, location):
+
+ devices_details = []
+
+ with get_db_connection() as conn:
+ #list all devices that user has access to
+ if deployments == "-1" or deployments == "0":
+ sql = "SELECT deployment_id, devices FROM public.deployment_details"
+ else:
+ sql = f"SELECT deployment_id, devices FROM public.deployment_details WHERE deployment_id IN ({deployments})"
+
+ with conn.cursor() as cur:
+ cur.execute(sql)
+ devices_groups = cur.fetchall()#cur.fetchone()
+ deployment_ids = []
+ for deployment_id, dev_group in devices_groups:
+ if dev_group != None:
+ if len(dev_group) > 10:
+ if dev_group[0] == "[":
+ macs_group = literal_eval(dev_group)
+ else:
+ macs_group = dev_group.split(',')
+ for mac in macs_group:
+ deployment_ids.append((deployment_id, mac))
+
+ devices_details = GetDeviceDetailsComplete(cur, deployment_ids, location_indexes[location])
+ #devices_details.append(devices_detail)
+
+ return devices_details
+
def GetUsersFromDeployments(deployments):
#list all devices that user has access to
deployments_dets = []
@@ -1533,7 +2019,641 @@ def CovertToIsoTime(date_s, n_minute):
iso_timestamp = final_datetime.isoformat()
return iso_timestamp
-def GetSensorsDetailsFromDeployment(deployment_id, ddate, filter_minutes):
+
+def sleep_length(presence_list, short_absence_threshold=15):
+ """
+ Calculate the total sleep duration and wake time based on presence data.
+
+ This function correctly interprets the presence_list to determine sleep duration by:
+ 1. Properly aggregating the total sleep time from all significant in-bed periods
+ 2. Considering short absences as part of the same sleep session
+ 3. Determining the wake time when the main sleep session ended
+
+ Args:
+ presence_list (list): List of tuples indicating bed presence/absence
+ short_absence_threshold (int, optional): Maximum duration in decas to consider
+ an absence "short" and still count as sleep.
+ Default is 15 (2.5 minutes)
+
+ Returns:
+ tuple: (sleep_duration_minutes, wake_time_minutes)
+ sleep_duration_minutes: Total sleep duration in minutes
+ wake_time_minutes: Minute in the day when person was determined to be
+ done sleeping (minutes since midnight)
+ """
+ # Extract in-bed periods and out-of-bed periods
+ in_bed_periods = []
+ out_bed_periods = []
+
+ # First process the raw data into periods
+ for i in range(len(presence_list)):
+ deca_index, deca_count = presence_list[i]
+
+ # Skip separator tuples where deca_count is 0
+ if deca_count == 0:
+ continue
+
+ if deca_count > 0: # In bed
+ # Special case for the midnight (first) tuple
+ if i == 0 and deca_index == 0:
+ # This is time in bed before midnight
+ start_deca = -deca_count # Negative because it's before midnight
+ end_deca = 0 # Midnight
+ else:
+ start_deca = deca_index
+ end_deca = deca_index + deca_count
+
+ in_bed_periods.append({
+ 'start': start_deca,
+ 'end': end_deca,
+ 'duration': deca_count
+ })
+ else: # Out of bed
+ out_bed_periods.append({
+ 'start': deca_index,
+ 'end': deca_index + abs(deca_count),
+ 'duration': abs(deca_count)
+ })
+
+ # Sort periods to ensure chronological order
+ in_bed_periods.sort(key=lambda p: p['start'])
+ out_bed_periods.sort(key=lambda p: p['start'])
+
+ # Now determine which periods are part of the main night's sleep
+ # For this, we need to identify consecutive in-bed periods separated by short absences
+
+ # Start by finding the key sleep segments - significant periods in bed during night time
+ night_time_end_deca = 4320 # 12 hours after midnight
+ sleep_segments = []
+
+ # Merge in-bed periods that are separated by short absences
+ merged_periods = []
+ current_period = None
+
+ for period in in_bed_periods:
+ # If we're at the start or after a long break, begin a new period
+ if current_period is None:
+ current_period = period.copy()
+ else:
+ # Check if this period starts shortly after the previous one ends
+ gap = period['start'] - current_period['end']
+
+ # If the gap is negative, the periods overlap (data error), treat as continuous
+ if gap < 0:
+ gap = 0
+
+ # If the gap is short enough, merge the periods
+ if gap <= short_absence_threshold:
+ # Extend the current period
+ current_period['end'] = period['end']
+ current_period['duration'] += period['duration'] - gap
+ else:
+ # Gap too long, add the completed period and start a new one
+ merged_periods.append(current_period)
+ current_period = period.copy()
+
+ # Add the last period if there is one
+ if current_period is not None:
+ merged_periods.append(current_period)
+
+ # Find the main sleep period - prioritize periods that span midnight or early morning
+ # and have significant duration
+ significant_sleep_threshold = 180 # 30 minutes (180 decas)
+ night_periods = [p for p in merged_periods if
+ (p['start'] <= 0 or p['start'] <= night_time_end_deca) and
+ p['duration'] >= significant_sleep_threshold]
+
+ if night_periods:
+ # Find the period with the largest duration
+ main_sleep_period = max(night_periods, key=lambda p: p['duration'])
+
+ # Calculate total sleep duration
+ sleep_duration_minutes = round(main_sleep_period['duration'] / 6) # Convert to minutes
+
+ # Wake time is when this period ended
+ wake_time_minutes = max(0, round(main_sleep_period['end'] / 6)) # Ensure it's not negative
+
+ return (sleep_duration_minutes, wake_time_minutes)
+
+ # No significant sleep periods found
+ return (0, 0)
+
+
+# Example usage:
+# sleep_minutes = sleep_length(presence_list) # Use default threshold
+# sleep_minutes = sleep_length(presence_list, short_absence_threshold=30) # Allow longer absences (5 minutes)
+
+
+
+# Example usage:
+# presence_list = [
+# [0, 554], [3303, 3857], [3303, 0], [3387, -84], [3387, 0], [3388, 1], [3388, 0],
+# [3668, -280], [3668, 0], [3669, 1], [3669, 0], [3699, -30], [3699, 0], [3700, 1],
+# [3700, 0], [3863, -163], [3863, 0], [3864, 1], [3864, 0], [4418, -554], [4418, 0],
+# [4419, 1], [4419, 0], [4547, -128], [4547, 0], [4548, 1], [4548, 0], [4603, -55],
+# [4603, 0], [4604, 1], [4604, 0], [4965, -361], [4965, 0], [4966, 1], [4966, 0],
+# [4984, -18], [4984, 0], [4985, 1], [4985, 0], [8639, -3654]
+# ]
+# print(f"Sleep duration: {sleep_length(presence_list)} minutes")
+
+
+# Example usage:
+# sleep_minutes = sleep_length(presence_list) # Use default threshold
+# sleep_minutes = sleep_length(presence_list, short_absence_threshold=30) # Allow longer absences (5 minutes)
+
+
+
+# Example usage:
+# presence_list = [
+# [0, 554], [3303, 3857], [3303, 0], [3387, -84], [3387, 0], [3388, 1], [3388, 0],
+# [3668, -280], [3668, 0], [3669, 1], [3669, 0], [3699, -30], [3699, 0], [3700, 1],
+# [3700, 0], [3863, -163], [3863, 0], [3864, 1], [3864, 0], [4418, -554], [4418, 0],
+# [4419, 1], [4419, 0], [4547, -128], [4547, 0], [4548, 1], [4548, 0], [4603, -55],
+# [4603, 0], [4604, 1], [4604, 0], [4965, -361], [4965, 0], [4966, 1], [4966, 0],
+# [4984, -18], [4984, 0], [4985, 1], [4985, 0], [8639, -3654]
+# ]
+# print(f"Sleep duration: {sleep_length(presence_list)} minutes")
+
+
+# Example usage:
+# sleep_minutes = sleep_length(presence_list) # Use default threshold
+# sleep_minutes = sleep_length(presence_list, short_absence_threshold=30) # Allow longer absences (5 minutes)
+
+
+
+# Example usage:
+# presence_list = [
+# [0, 554], [3303, 3857], [3303, 0], [3387, -84], [3387, 0], [3388, 1], [3388, 0],
+# [3668, -280], [3668, 0], [3669, 1], [3669, 0], [3699, -30], [3699, 0], [3700, 1],
+# [3700, 0], [3863, -163], [3863, 0], [3864, 1], [3864, 0], [4418, -554], [4418, 0],
+# [4419, 1], [4419, 0], [4547, -128], [4547, 0], [4548, 1], [4548, 0], [4603, -55],
+# [4603, 0], [4604, 1], [4604, 0], [4965, -361], [4965, 0], [4966, 1], [4966, 0],
+# [4984, -18], [4984, 0], [4985, 1], [4985, 0], [8639, -3654]
+# ]
+# print(f"Sleep duration: {sleep_length(presence_list)} minutes")
+
+def filter_short_groups_c_wc_old(presence_list, filter_size, device_id_str, from_date, to_date, time_zone_s, refresh = False):
+
+ #days = presence_list
+ #for from_date, to_date
+
+ tz = pytz.timezone(time_zone_s)
+ # Get current time in that timezone
+ current_time = datetime.datetime.now(tz)
+ # Return just the date part as string
+ now_date_str = current_time.strftime("%Y-%m-%d")
+
+ start_date = datetime.datetime.strptime(from_date, "%Y-%m-%d")
+ end_date = datetime.datetime.strptime(to_date, "%Y-%m-%d")
+
+ last_offset = 0
+ #if to_date == now_date_str:
+ # last_offset = 1
+
+ # Loop through each date (including end_date)
+ current_date = start_date
+ dates_list = []
+ days_difference = 1 + (end_date - start_date).days
+ whole_result = [0] * 6 * 1440 * (days_difference)
+
+ is_long = False
+ if len(presence_list)/(6 * 1440) > (days_difference): #long version
+ is_long = True
+
+ while current_date <= end_date:
+ current_date_str = current_date.strftime("%Y-%m-%d")
+ print(current_date_str)
+ dates_list.append(current_date_str)
+ current_date += timedelta(days=1)
+
+ for day in range(1, days_difference-last_offset+1):
+ print(day)
+ end_index = (1 + day) * 6 * 1440
+ if end_index > len(presence_list):
+ end_index = len(presence_list)
+
+ if is_long:
+ start_index = end_index - 2 * 6 * 1440
+ else:
+ start_index = end_index - 6 * 1440
+
+ current_date_str = dates_list[day-1]
+ filename_day_presence = f"/{device_id_str}/{device_id_str}_{current_date_str}_{filter_size}_presence.bin"
+ filtered_day_str = None
+ if refresh == False:
+ filtered_day_str = ReadObjectMinIO("filtered-presence", filename_day_presence)
+
+ if filtered_day_str == None:
+ filtered_day = filter_short_groups_c(presence_list[start_index:end_index], filter_size, device_id_str, from_date)
+ SaveGenericObjectInBlob("filtered-presence", filename_day_presence, filtered_day)
+ else:
+ filtered_day = json.loads(filtered_day_str)
+ whole_result[start_index:end_index] = filtered_day
+
+ if current_date_str != to_date:
+ end_index = len(presence_list)
+ start_index = end_index - 2 * 6 * 1440
+ filtered_day = filter_short_groups_c(presence_list[start_index:end_index], filter_size, device_id_str, from_date)
+ whole_result[start_index:end_index] = filtered_day
+
+ return whole_result
+
+def filter_short_groups_c_wc(presence_list, filter_size, device_id_str, from_date, to_date, time_zone_s, refresh=False):
+ """
+ Filter out short groups across multiple days.
+ Preserves original logic of using previous day's data for midnight boundary handling.
+ """
+ # Setup timezone and current time
+ tz = pytz.timezone(time_zone_s)
+ current_time = datetime.datetime.now(tz)
+ now_date_str = current_time.strftime("%Y-%m-%d")
+
+ start_date = datetime.datetime.strptime(from_date, "%Y-%m-%d")
+ end_date = datetime.datetime.strptime(to_date, "%Y-%m-%d")
+
+ # Build dates list
+ current_date = start_date
+ dates_list = []
+ while current_date <= end_date:
+ current_date_str = current_date.strftime("%Y-%m-%d")
+ print(current_date_str)
+ dates_list.append(current_date_str)
+ current_date += timedelta(days=1)
+
+ days_difference = len(dates_list)
+
+ # Handle current day limitation
+ samples_per_day = 6 * 1440
+ total_samples = samples_per_day * days_difference
+
+ # If today is the last day, limit the data
+ effective_total_samples = total_samples
+ if to_date == now_date_str:
+ current_minute_of_day = current_time.hour * 60 + current_time.minute
+ current_sample_of_day = min(current_minute_of_day * 6, samples_per_day)
+ effective_total_samples = (days_difference - 1) * samples_per_day + current_sample_of_day
+ print(f"Today detected: limiting to {current_sample_of_day} samples for last day")
+
+ # Initialize result - use effective total samples
+ whole_result = [0] * effective_total_samples
+
+ # Determine if we have "long" data (more than expected days)
+ is_long = len(presence_list) > (days_difference * samples_per_day)
+
+ # Process each day (0-indexed to avoid confusion)
+ for day_idx in range(days_difference):
+ current_date_str = dates_list[day_idx]
+ print(f"Processing day {day_idx + 1}: {current_date_str}")
+
+ # Calculate result array indices for this day
+ result_start_idx = day_idx * samples_per_day
+ result_end_idx = (day_idx + 1) * samples_per_day
+
+ # For the last day, if it's today, limit the end index
+ if day_idx == days_difference - 1 and to_date == now_date_str:
+ result_end_idx = result_start_idx + current_sample_of_day
+
+ # Skip if this day's range is beyond our result array
+ if result_start_idx >= len(whole_result):
+ break
+
+ # Ensure we don't exceed result array bounds
+ result_end_idx = min(result_end_idx, len(whole_result))
+
+ # Calculate input data range
+ if is_long:
+ # Use 2 days of context (previous day + current day)
+ input_end_idx = min(len(presence_list), result_end_idx)
+ input_start_idx = max(0, input_end_idx - 2 * samples_per_day)
+ else:
+ # Use 1 day of data
+ input_end_idx = min(len(presence_list), result_end_idx)
+ input_start_idx = max(0, input_end_idx - samples_per_day)
+
+ # Skip if no input data available
+ if input_start_idx >= input_end_idx:
+ print(f"No input data available for {current_date_str}")
+ continue
+
+ # Try to load cached data
+ filename_day_presence = f"/{device_id_str}/{device_id_str}_{current_date_str}_{filter_size}_presence.bin"
+ filtered_day_str = None
+
+ if not refresh:
+ filtered_day_str = ReadObjectMinIO("filtered-presence", filename_day_presence, current_date_str)
+
+ if filtered_day_str is not None and filtered_day_str != "":
+ has_larger = bool(re.search(r'\b(?:[2-9]|\d{2,})\.\d+\b', filtered_day_str))
+ if has_larger:
+ filtered_day_str = None
+
+ if filtered_day_str is None or filtered_day_str == "":
+ # Filter the input data
+ input_data = presence_list[input_start_idx:input_end_idx]
+ print(input_start_idx, input_end_idx, filter_size, device_id_str, from_date, len(input_data))
+ filtered_data = filter_short_groups_c(input_data, filter_size, device_id_str, from_date)
+
+ # Extract the portion corresponding to this day
+ if is_long:
+ # We have 2 days of data, take the second day
+ day_data_start = samples_per_day
+ else:
+ # We have 1 day of data, take it all
+ day_data_start = 0
+
+ # Calculate how much data we need for this day
+ needed_samples = result_end_idx - result_start_idx
+ day_data_end = day_data_start + needed_samples
+
+ # Extract the day's portion, ensuring we don't exceed bounds
+ if day_data_start < len(filtered_data):
+ filtered_day = filtered_data[day_data_start:min(day_data_end, len(filtered_data))]
+ else:
+ filtered_day = []
+
+ # Cache the result
+ SaveGenericObjectInBlob("filtered-presence", filename_day_presence, filtered_day)
+ else:
+ filtered_day = json.loads(filtered_day_str)
+
+ # Copy to result array
+ copy_length = min(len(filtered_day), result_end_idx - result_start_idx)
+ if copy_length > 0:
+ whole_result[result_start_idx:result_start_idx + copy_length] = filtered_day[:copy_length]
+
+ print(f"Completed {current_date_str}: copied {copy_length} samples")
+
+ return whole_result
+
+def GetLastDurationMinutes(deployment_id, selected_devices, filter, ddate):
+
+ global threshold_cache, device_lookup_cache
+
+ max_sleep = 0
+ max_device_id = 0
+ max_woke_up = 0
+ presence_list = []
+ to_date = ddate
+
+
+ date_obj = datetime.datetime.strptime(ddate, "%Y-%m-%d")
+ # Subtract one day
+ previous_day = date_obj - timedelta(days=1)
+ # Convert back to string
+ prev_date = previous_day.strftime("%Y-%m-%d")
+
+ data_type = "z-graph"
+
+ time_zone_s = GetTimeZoneOfDeployment(deployment_id)
+ timee = LocalDateToUTCEpoch(ddate, time_zone_s)+5 #add so date boundary is avoided
+
+
+ devices_list, device_ids = GetProximityList(deployment_id, timee)
+
+ #Lets filter bedrooms only
+ just_selected_devices = []
+ for device_details in devices_list:
+ if device_details[1] in selected_devices:
+ just_selected_devices.append(device_details)
+
+ devices_list = just_selected_devices
+
+ time_from_str, _ = GetLocalTimeForDate(ddate, time_zone_s)
+ _, time_to_str = GetLocalTimeForDate(to_date, time_zone_s)
+
+ time_from = datetime.datetime.strptime(time_from_str, '%Y-%m-%d %H:%M:%S%z')
+ time_to = datetime.datetime.strptime(time_to_str, '%Y-%m-%d %H:%M:%S%z')
+ epoch_time = calendar.timegm(time_from.utctimetuple())
+
+ presence_map = {}
+ presence_map["time_start"] = epoch_time
+ presence_map["time_zone"] = time_zone_s
+
+ # Calculate the difference in days
+ days_difference = (time_to - time_from).days
+
+ if data_type == "all" or data_type == "z-graph" or data_type == "multiple":
+
+ # Convert string to datetime object
+ date_obj = datetime.datetime.strptime(time_from_str, "%Y-%m-%d %H:%M:%S%z")
+ # Subtract one day
+ previous_day = date_obj - timedelta(days=1)
+
+ # Format back to string in the same format
+ time_from_z_str = previous_day.strftime("%Y-%m-%d %H:%M:%S%z")
+
+
+ device_id_2_threshold = {}
+ device_id_2_location = {0: "Outside"}
+
+ for details in devices_list:
+
+ well_id, device_id, location_name, description, MAC, radar_threshold_group_st, close_to = details #(266, 559, 'Bathroom', None, '64B70888FAB0', '["s3_max",12]')
+ if radar_threshold_group_st == None:
+ radar_threshold_group_st = '["s3_max",12]' #last value is threshold to s28 composite
+
+ if len(radar_threshold_group_st) > 8:
+ radar_threshold_group = json.loads(radar_threshold_group_st)
+ else:
+ radar_threshold_group = ["s3_max",12]
+
+ print(well_id, radar_threshold_group)
+
+ device_id_2_location[device_id] = location_name
+ device_id_2_threshold[device_id] = radar_threshold_group
+
+ ids_list = []
+ well_ids = []
+ id2well_id = {}
+ radar_fields_of_interest = []
+ device_field_indexes = {}
+ for details in devices_list:
+ threshold_str = details[5]
+ try:
+ threshold_lst = json.loads(threshold_str)
+ except:
+ threshold_lst = ["s3",12]
+ #threshold_lst = ["s3_max",12]
+
+ radar_field = threshold_lst[0]
+ #since we are getting 10 sec dat, no more need for min or max...
+ radar_field = radar_field.split("_")[0]
+ if radar_field not in radar_fields_of_interest:
+ device_field_indexes[radar_field] = len(radar_fields_of_interest)
+ radar_fields_of_interest.append(radar_field)
+
+ ids_list.append(details[1])
+ id2well_id[details[1]] = details[0]
+ well_ids.append(details[0])
+ presence_map["well_ids"] = well_ids
+
+ if len(devices_list) > 0:
+
+ devices_list_str = ','.join(str(device[1]) for device in devices_list)
+ #sql = get_deployment_radar_only_colapsed_query(devices_list_str, time_from_str, time_to_str, ids_list, radar_fields_of_interest)
+ sql = get_deployment_radar_10sec_snapped_query(devices_list_str, time_from_str, time_to_str, ids_list, radar_fields_of_interest)
+ print(sql)
+ if data_type == "z-graph" or data_type == "all" or data_type == "multiple":
+ #zsql = get_deployment_radar_only_colapsed_query(devices_list_str, time_from_z_str, time_to_str, ids_list, radar_fields_of_interest)
+ zsql = get_deployment_radar_10sec_snapped_query(devices_list_str, time_from_z_str, time_to_str, ids_list, radar_fields_of_interest)
+ print(zsql)
+
+ with get_db_connection() as conn:
+ with conn.cursor() as cur:
+ cur.execute(sql)
+ my_data = None
+ myz_data = None
+
+ my_data = cur.fetchall()
+ if data_type == "z-graph" or data_type == "all" or data_type == "multiple":
+ cur.execute(zsql)
+ myz_data = cur.fetchall()
+
+ if my_data != None:
+
+ device_id_2_threshold = {}
+ device_id_2_location = {0: "Outside"}
+ row_nr_2_device_id = {}
+ cnt = 0
+ row_nr_2_device_id[0] = 0
+
+ #presence_map['longpresence'] and temporary_map_day_plus are similar, except one is used for Z-graph, and another for multiple persons detection
+
+ if data_type == "presence" or data_type == "all" or data_type == "z-graph" or data_type == "multiple":
+ presence_map['presence'] = {}
+ presence_map['longpresence'] = {}
+
+ if data_type == "raw" or data_type == "all":
+ presence_map['raw'] = {}
+
+ for details in devices_list:
+ #(266, 559, 'Bathroom', None, '64B70888FAB0', '["s3_max",12]','')
+ well_id, device_id, location_name, description, MAC, radar_threshold_group_st, close_to = details
+
+ if data_type == "raw" or data_type == "all":
+ zeros_list = [0] * 6 * 1440 * days_difference
+ presence_map['raw'][well_id] = zeros_list
+
+ if data_type == "presence" or data_type == "all" or data_type == "z-graph" or data_type == "multiple":
+ zeros_list = [0] * 6 * 1440 * days_difference
+ presence_map['presence'][well_id] = zeros_list
+
+
+ #presence_map[][well_id] = zeros_list
+ cnt += 1
+ row_nr_2_device_id[cnt] = well_id
+
+ if radar_threshold_group_st == None:
+ radar_threshold_group_st = '["s3",12]' #last value is threshold to s28 composite
+
+ if len(radar_threshold_group_st) > 8:
+ radar_threshold_group = json.loads(radar_threshold_group_st)
+ else:
+ radar_threshold_group = ["s3",12]
+
+ device_id_2_location[well_id] = location_name
+ device_id_2_threshold[well_id] = radar_threshold_group
+ if len(my_data) > 1:
+
+ start_time_ = my_data[0][0]
+ parsed_time = datetime.datetime.strptime(time_from_str, '%Y-%m-%d %H:%M:%S%z')
+
+ start_time = datetime.datetime(
+ parsed_time.year,
+ parsed_time.month,
+ parsed_time.day,
+ parsed_time.hour, # Adjust for UTC-7
+ parsed_time.minute,
+ parsed_time.second,
+ tzinfo=datetime.timezone(datetime.timedelta(hours=-7))
+ )
+
+ presence_map = optimized_radar_processing(my_data, start_time_, id2well_id, device_id_2_threshold, device_field_indexes, presence_map, data_type)
+
+
+ if myz_data != None:
+ temporary_map_day_plus = {}
+ presence_map['z_graph'] = {}
+ for details in devices_list:
+ #(266, 559, 'Bathroom', None, '64B70888FAB0', '["s3_max",12]','')
+ well_id, device_id, location_name, description, MAC, radar_threshold_group_st, close_to = details
+
+ if data_type == "z-graph" or data_type == "all" or data_type == "multiple":
+ zeros_list = [0] * 6 * 1440 * (days_difference + 1) #+1 is for previous day
+ presence_map['z_graph'][well_id] = [] #just place holder
+ temporary_map_day_plus[well_id] = zeros_list
+ presence_map['longpresence'][well_id] = zeros_list #just place holder
+
+ print(deployment_id)
+ print(time_from_z_str)
+ print(devices_list)
+ parsed_time = datetime.datetime.strptime(time_from_z_str, '%Y-%m-%d %H:%M:%S%z')
+
+ start_time = datetime.datetime(
+ parsed_time.year,
+ parsed_time.month,
+ parsed_time.day,
+ parsed_time.hour, # Adjust for UTC-7
+ parsed_time.minute,
+ parsed_time.second,
+ tzinfo=datetime.timezone(datetime.timedelta(hours=-7))
+ )
+
+ #start_time_ = myz_data[0][0]
+ st = time.time()
+ device_lookup_cache = {}
+ threshold_cache = {}
+ temporary_map_day_plus = optimized_processing(myz_data, start_time, id2well_id, device_id_2_threshold, device_field_indexes, temporary_map_day_plus, data_type)
+
+
+ if data_type == "all" or data_type == "z-graph" or data_type == "presence" or data_type == "multiple":
+ for device_id in ids_list:
+ device_id_str = str(device_id)
+ if data_type == "presence" or data_type == "all" or data_type == "z-graph":
+ if filter > 1:
+ #presence_list = filter_short_groups_numpy(presence_map["presence"][id2well_id[device_id]], filter, device_id, ddate+"-"+to_date)
+ presence_list = filter_short_groups_c_wc(presence_map["presence"][id2well_id[device_id]], filter, device_id_str, ddate, to_date, time_zone_s)
+ presence_map["presence"][id2well_id[device_id]] = presence_list
+ #longpresence_list = filter_short_groups_numpy(presence_map["longpresence"][id2well_id[device_id]], filter, device_id, ddate+"-"+to_date)
+ longpresence_list = filter_short_groups_c_wc(presence_map["longpresence"][id2well_id[device_id]], filter, device_id_str, prev_date, to_date, time_zone_s)
+ presence_map["longpresence"][id2well_id[device_id]] = longpresence_list
+
+ else: #straight decas
+ presence_list = presence_map["presence"][id2well_id[device_id]]
+ longpresence_list = presence_map["longpresence"][id2well_id[device_id]]
+
+
+ if data_type == "z-graph" or data_type == "all" or data_type == "multiple":
+ max_sleep = 0
+ max_device_id = 0
+ max_woke_up = 0
+ for device_id in ids_list:
+ #print(device_id_2_threshold[id2well_id[device_id]])
+ presence_list = CreateZGraph(id2well_id[device_id], presence_map["longpresence"][id2well_id[device_id]]) #temporary_map_day_plus[id2well_id[device_id]])
+ sleep_minutes, woke_up = sleep_length(presence_list)
+ if sleep_minutes > max_sleep:
+ max_sleep = sleep_minutes
+ max_device_id = device_id
+ max_woke_up = woke_up
+ presence_map = {}
+ return max_sleep, max_device_id, max_woke_up, presence_list
+
+def GetTemperature(bedroom_device_id, ddate):
+ result = 0
+ sql = f"""
+ SELECT *
+ FROM public.sensor_readings
+ WHERE device_id = {bedroom_device_id}
+ ORDER BY "time" DESC
+ LIMIT 1;
+ """
+ with get_db_connection() as conn:
+ with conn.cursor() as cur:
+ cur.execute(sql)
+ result = cur.fetchone()
+ return result
+
+def GetSensorsDetailsFromDeployment(deployment_id, ddate, filter_minutes, fast=True):
#list all devices that user has access to
deployments_dets = []
with get_db_connection() as conn:
@@ -1670,11 +2790,13 @@ def GetSensorsDetailsFromDeployment(deployment_id, ddate, filter_minutes):
file_modified_local = file_modified_utc1.astimezone(pytz.timezone(time_zone_s))
file_modified_date_local = file_modified_local.date()
file_date_utc = MapFileToDate(locations_file)
+ logger.debug(f"file_modified_utc1={str(file_modified_utc1.date())} file_date_utc={str(file_date_utc)}")
if file_modified_utc1.date() < file_date_utc:
force_recreate = True
else: #same date
current_time = datetime.datetime.now(pytz.timezone(time_zone_s))
time_passed = current_time - file_modified_local
+ logger.debug(f"current_time={current_time} file_modified_local={file_modified_local} time_passed={time_passed}")
if time_passed.seconds > 30: #recreate if older than 5 minutes
force_recreate = True
else:
@@ -1683,10 +2805,11 @@ def GetSensorsDetailsFromDeployment(deployment_id, ddate, filter_minutes):
if force_recreate:
CreateLocationsStripe(locations_file, time_zone_s)
- locations_list_s = ReadObjectMinIO("daily-maps", locations_file+".bin")
+ locations_list_s = ReadObjectMinIO("daily-maps", locations_file+".bin")
+ logger.debug(f"locations_list_s={locations_list_s}")
if (locations_list_s is not None):
@@ -1777,14 +2900,45 @@ def GetSensorsDetailsFromDeployment(deployment_id, ddate, filter_minutes):
# wellness_score_percent
wellness_score_percent = 90
- # bedroom_temperature
- bedroom_temperature = 0
+ sleep_filter_minutes = 5
+
+ time_from_str, _ = GetLocalTimeForDate(ddate, time_zone_s)
- # sleep_bathroom_visit_count
sleep_bathroom_visit_count = 0
-
# bedroom_co2
- bedroom_co2 = 400
+ bedroom_co2 = 500
+ device_detail = None
+ bedroom_temperature = 0
+ sleep_hours = 0
+ if fast == False:
+
+ if len(bedrooms) > 0:
+ sleep_minutes, bedroom_device_id, woke_up, presence_list = GetLastDurationMinutes(deployment_id, bedrooms, sleep_filter_minutes, ddate)
+ sleep_hours = sleep_minutes/ 60
+ # bedroom_temperature
+ temp_offset = -16.0
+ device_detail = GetTemperature(bedroom_device_id, ddate)
+
+ # sleep_bathroom_visit_count
+ date_obj = datetime.datetime.strptime(time_from_str, "%Y-%m-%d %H:%M:%S%z")
+ if sleep_minutes < woke_up: # went to sleep after midnight
+ date_sleep = ddate
+ to_sleep = woke_up - sleep_minutes
+ else:# went to sleep before midnight
+ to_sleep = 1440 + woke_up - sleep_minutes
+
+ # Convert string to datetime object
+ previous_day = date_obj - timedelta(days=1)
+ date_sleep = previous_day.strftime("%Y-%m-%d %H:%M:%S%z")
+
+
+ if device_detail != None:
+ bedroom_temperature = device_detail[2] + temp_offset
+
+
+
+
+
# shower_detected_time
shower_detected_time = last_bathroom_time
@@ -1801,37 +2955,41 @@ def GetSensorsDetailsFromDeployment(deployment_id, ddate, filter_minutes):
#lets find last time seen at Bathroom, Kitchen, Bedroom pd.first_name, pd.last_name, pd.address_street, pd.picture
picture_url = deployments_dets[4]
- report = {"user_id":deployments_dets[0],
- "name":deployments_dets[1] + " " + deployments_dets[2],
- "address":deployments_dets[3],
- "time_zone":time_zone_s,
- "picture":picture_url,
- "bathroom_at": last_bathroom_time,
- "kitchen_at": last_kitchen_time,
- "bedroom_at": last_bedroom_time,
- "temperature": (sensor_dets[2] - 16) if sensor_dets != None else 0,
- "smell": "clean",
- "bathroom_delayed": [6, 12],
- "kitchen_delayed": [6, 12],
- "bedroom_delayed": [13, 16],
- "last_location": dev_id_to_location[last_present_device],
- "last_detected_time": last_present_time,
- "wellness_score_percent": wellness_score_percent,
- "wellness_descriptor_color": "bg-green-100 text-green-700",
- "bedroom_temperature": bedroom_temperature,
- "sleep_bathroom_visit_count": sleep_bathroom_visit_count,
- "bedroom_co2": bedroom_co2,
- "shower_detected_time": shower_detected_time,
- "breakfast_detected_time": breakfast_detected_time,
- "living_room_time_spent": living_room_time_spent,
- "outside_hours": outside_hours,
- "wellness_descriptor": "Great!",
- "last_seen_alert": "Alert = None",
- "last_seen_alert_colors": "bg-green-100 text-green-700", #https://tailwindcss.com/docs/colors
- "most_time_spent_in": "Bedroom",
- "sleep_hours": "7.9"
- }
+ report = {}
+ try:
+ report = {"user_id":deployments_dets[0],
+ "name":deployments_dets[1] + " " + deployments_dets[2],
+ "address":deployments_dets[3],
+ "time_zone":time_zone_s,
+ "picture":picture_url,
+ "bathroom_at": last_bathroom_time,
+ "kitchen_at": last_kitchen_time,
+ "bedroom_at": last_bedroom_time,
+ "temperature": (sensor_dets[2] - 16) if sensor_dets != None else 0,
+ "smell": "clean",
+ "bathroom_delayed": [6, 12],
+ "kitchen_delayed": [6, 12],
+ "bedroom_delayed": [13, 16],
+ "last_location": dev_id_to_location[last_present_device],
+ "last_detected_time": last_present_time,
+ "wellness_score_percent": wellness_score_percent,
+ "wellness_descriptor_color": "bg-green-100 text-green-700",
+ "bedroom_temperature": round(bedroom_temperature, 2),
+ "sleep_bathroom_visit_count": sleep_bathroom_visit_count,
+ "bedroom_co2": bedroom_co2,
+ "shower_detected_time": shower_detected_time,
+ "breakfast_detected_time": breakfast_detected_time,
+ "living_room_time_spent": round(living_room_time_spent, 2),
+ "outside_hours": round(outside_hours, 2),
+ "wellness_descriptor": "Great!",
+ "last_seen_alert": "Alert = None",
+ "last_seen_alert_colors": "bg-green-100 text-green-700", #https://tailwindcss.com/docs/colors
+ "most_time_spent_in": "Bedroom",
+ "sleep_hours": round(sleep_hours, 2)
+ }
+ except Exception as e:
+ print(traceback.format_exc())
return report
def ToList(input_data):
@@ -1936,7 +3094,7 @@ def GetProximityList(deployment_id, epoch_from_file_s):
if devices_string == "":
- return []
+ return [], []
macs_list = ToList(devices_string)
device_ids, device_list = MACsToWellIds(cur, macs_list)
@@ -1953,6 +3111,7 @@ def FilterList(to_filter: str, allowed: str) -> str:
# Join back to comma-separated string
return ','.join(filtered)
+
def GetMatchingDevices(privileges, group, deployment, location):
global LocationsMap
@@ -1976,6 +3135,29 @@ def GetMatchingDevices(privileges, group, deployment, location):
devices = GetVisibleDevicesPerLocation(deployment, location)
return devices
+def GetMatchingDevicesComplete(privileges, group, deployment, location):
+
+ global LocationsMap
+
+ results=[]
+ if privileges != "-1":
+ if deployment == "" or deployment == "0":
+ deployment = privileges
+
+ privileges_list = privileges.split(',')
+ if deployment != "0":
+ if "," in deployment:
+ deployment = FilterList(deployment, privileges)
+ else:
+ if deployment not in privileges_list:
+ return results
+ else:
+ if deployment == "0":
+ deployment = "-1"
+
+ devices = GetVisibleDevicesPerLocationComplete(deployment, location)
+ return devices
+
def getOldestDeploymentHistoryFromBeneficiary(deployment_id):
#this will return oldest entry as well as last proximity (devices)
st = time.time()
@@ -2195,7 +3377,7 @@ def GetDeploymentDatesBoth(deployment_in):
# Generate a list of date strings from oldest_date to today in inverted order
date_list = [(today_date - timedelta(days=x)).strftime('%Y-%m-%d') for x in range((today_date - oldest_date_dt_local).days + 1)]
print(f"&3 ----{time.time() - st}")
- return date_list, devices_all
+ return date_list, devices_all, time_zone_st
def check_file_exists(file_name, bucket_name="daily-maps"):
try:
@@ -4764,7 +5946,7 @@ def CreatePresenceMap(location_image_file, devices_list, selected_date,
fields = ['m0_max', 'm1_max', 'm2_max', 'm3_max', 'm4_max', 'm5_max',
'm6_max', 'm7_max', 'm8_max', 'm08_max', 's2_max', 's3_max',
- 's4_max', 's5_max', 's6_max', 's7_max', 's8_max', 's28_max', 's28_min']
+ 's4_max', 's5_max', 's6_max', 's7_max', 's8_max', 's28_max', 's28_min'] #Why 'm8_max' and 'm08_max' ?because m08 is m0 + m1 .. to 8!
fields_n = len(fields)
@@ -4964,6 +6146,124 @@ def ConvertToBase(time_from_str, time_zone_s):
dt = datetime.datetime.strptime(time_from_str, "%Y-%m-%d %H:%M:%S%z")
return dt
+def GetTimeAndEvents(data):
+ """
+ Calculates non-zero elements and consecutive non-zero groups using itertools.
+ This is often the most readable and efficient pure Python approach.
+ """
+ # Fast way to count non-zeros since they are all 1.0
+ #non_zeros = int(sum(data))
+ non_zeros = sum(1 for x in data if x != 0)
+ # Count groups of non-zero elements
+ events = sum(1 for key, group in itertools.groupby(data) if key != 0.0)
+ return non_zeros, events
+
+def current_date_at_tz(timezone_str):
+ """
+ Returns the current date in the specified timezone in yyyy-mm-dd format.
+
+ Args:
+ timezone_str (str): Timezone string like "America/Los_Angeles"
+
+ Returns:
+ str: Current date in yyyy-mm-dd format
+ """
+ # Get the timezone object
+ tz = pytz.timezone(timezone_str)
+
+ # Get current datetime in the specified timezone
+ current_dt = datetime.datetime.now(tz)
+
+ # Format as yyyy-mm-dd
+ return current_dt.strftime('%Y-%m-%d')
+
+
+def GetActivities(device_id, well_id, date_str, filter_size, refresh, timezone_str, radar_threshold_group_st):
+ #filtered_day has non 0 points that exceeded threshold of radar reads
+ device_id_str = str(device_id)
+
+ try:
+
+ time_from_str, time_to_str = GetLocalTimeForDate(date_str, timezone_str)
+ filename_day_presence = f"/{device_id_str}/{device_id_str}_{date_str}_{filter_size}_presence.bin"
+ filtered_day_str = None
+ if refresh == False and date_str != current_date_at_tz(timezone_str):
+ has_larger = False
+ filtered_day_str = ReadObjectMinIO("filtered-presence", filename_day_presence, date_str)
+ if filtered_day_str != None and filtered_day_str != "":
+ has_larger = bool(re.search(r'\b(?:[2-9]|\d{2,})\.\d+\b', filtered_day_str))
+ if has_larger:
+ filtered_day_str = None
+ if filtered_day_str == None:
+
+ radar_fields_of_interest = []
+
+ try:
+ threshold_lst = json.loads(radar_threshold_group_st)
+ except:
+ threshold_lst = ["s3_max",12]
+ radar_fields_of_interest = [threshold_lst[0]]
+ ids_list = [int(device_id)]
+ devices_list_str = device_id_str
+ #sql = get_deployment_radar_only_colapsed_query(devices_list_str, time_from_str, time_to_str, ids_list, radar_fields_of_interest)
+ sql = get_deployment_radar_10sec_snapped_query_min_max(devices_list_str, time_from_str, time_to_str, ids_list, radar_fields_of_interest)
+ print(sql)
+
+ with get_db_connection() as conn:
+ with conn.cursor() as cur:
+ cur.execute(sql)
+ my_data = None
+ my_data = cur.fetchall()
+
+ days_difference = 1
+ zeros_list = [0] * 6 * 1440 * days_difference
+ presence_map = {'presence': {}}
+ presence_map['presence'][well_id] = zeros_list
+
+ if radar_threshold_group_st == None:
+ radar_threshold_group_st = '["s3",12]' #last value is threshold to s28 composite
+
+ if len(radar_threshold_group_st) > 8:
+ radar_threshold_group = json.loads(radar_threshold_group_st)
+ else:
+ radar_threshold_group = ["s3",12]
+
+ device_id_2_location = {well_id: ""}
+ device_id_2_threshold = {well_id: radar_threshold_group}
+ device_field_indexes = {radar_threshold_group[0].split("_")[0]: 1} #len(radar_fields_of_interest)
+ id2well_id = {device_id: well_id}
+
+ if len(my_data) > 1:
+
+ start_time_ = my_data[0][0]
+ parsed_time_ = datetime.datetime.strptime(time_from_str, '%Y-%m-%d %H:%M:%S%z')
+
+ #start_time = datetime.datetime(
+ #parsed_time.year,
+ #parsed_time.month,
+ #parsed_time.day,
+ #parsed_time.hour, # Adjust for UTC-7
+ #parsed_time.minute,
+ #parsed_time.second,
+ #tzinfo=datetime.timezone(datetime.timedelta(hours=-7))
+ #)
+
+ presence_map = optimized_radar_processing(my_data, start_time_, id2well_id, device_id_2_threshold, device_field_indexes, presence_map, "presence")
+
+ presence_list = filter_short_groups_c_wc(presence_map["presence"][id2well_id[device_id]], filter_size, device_id_str, date_str, date_str, timezone_str)
+ filtered_day_str = ReadObjectMinIO("filtered-presence", filename_day_presence)
+ filtered_day = json.loads(filtered_day_str)
+ else:
+ filtered_day = json.loads(filtered_day_str)
+
+ non_zeros, events = GetTimeAndEvents(filtered_day)
+
+ return(non_zeros / 360, events) #decas to hours
+ except Exception as e:
+ print(filename_day_presence)
+ print(filtered_day_str)
+ print(traceback.format_exc())
+ return(0, 0)
def CreateFullLocationMap(location_image_file, devices_list, selected_date,
map_type, force_recreate, chart_type, bw, motion, scale_global, fast, filter_minutes, time_zone_s):
#global Id2MACDict
@@ -5316,7 +6616,7 @@ def CreateFullLocationMap(location_image_file, devices_list, selected_date,
try:
threshold_lst = json.loads(threshold_str)
except:
- threshold_lst = ["s3_max",50]
+ threshold_lst = ["s3_max",12]
radar_field = threshold_lst[0]
if radar_field not in radar_fields_of_interest:
@@ -5349,12 +6649,12 @@ def CreateFullLocationMap(location_image_file, devices_list, selected_date,
row_nr_2_device_id[cnt] = device_id
if radar_threshold_group_st == None:
- radar_threshold_group_st = '["s3_max",50]' #last value is threshold to s28 composite
+ radar_threshold_group_st = '["s3_max",12]' #last value is threshold to s28 composite
if len(radar_threshold_group_st) > 8:
radar_threshold_group = json.loads(radar_threshold_group_st)
else:
- radar_threshold_group = ["s3_max",50]
+ radar_threshold_group = ["s3_max",12]
device_id_2_location[device_id] = location_name
@@ -6093,7 +7393,7 @@ def CreateDailyLocationMap(location_image_file, devices_list, selected_date, fil
try:
threshold_lst = json.loads(threshold_str)
except:
- threshold_lst = ["s3_max",50]
+ threshold_lst = ["s3_max",12]
radar_field = threshold_lst[0]
if radar_field not in radar_fields_of_interest:
@@ -6126,12 +7426,12 @@ def CreateDailyLocationMap(location_image_file, devices_list, selected_date, fil
row_nr_2_device_id[cnt] = device_id
if radar_threshold_group_st == None:
- radar_threshold_group_st = '["s3_max",50]' #last value is threshold to s28 composite
+ radar_threshold_group_st = '["s3_max",12]' #last value is threshold to s28 composite
if len(radar_threshold_group_st) > 8:
radar_threshold_group = json.loads(radar_threshold_group_st)
else:
- radar_threshold_group = ["s3_max",50]
+ radar_threshold_group = ["s3_max",12]
device_id_2_location[device_id] = location_name
@@ -6680,6 +7980,109 @@ def get_deployment_radar_only_colapsed_query(devices_list_str, time_from_str, ti
"""
Generate a TimeScaleDB query for sensor and radar readings based on device IDs.
+ Parameters:
+ devices_list_str (str): Comma-separated string of device IDs
+ time_from_str (str): Start time for the query
+ time_to_str (str): End time for the query
+ ids_list (list): List of device IDs in priority order for sorting
+ radar_fields_of_interest (list) list of different unique fields required across all devices
+
+ Returns:
+ str: Generated SQL query
+ """
+
+ # Generate the CASE statement for ordering based on the provided ids_list
+ case_statements = []
+ for index, device_id in enumerate(ids_list, start=1):
+ case_statements.append(f"WHEN {device_id} THEN {index}")
+
+ case_order = "\n ".join(case_statements)
+ radar_fields_to_get = ""
+ q_parts = ""
+ for field in radar_fields_of_interest:
+ if field == "s28_min":
+ q_part = "MIN((s2+s3+s4+s5+s6+s7+s8)/7) AS s28_min"
+ elif field == "s28_max":
+ q_part = "MAX((s2+s3+s4+s5+s6+s7+s8)/7) AS s28_max"
+ elif field == "m08_max":
+ q_part = "MAX((m0+m1+m2+m3+m4+m5+m6+m7+m8)/9) AS m08_max"
+ elif field == "s2_max":
+ q_part = "MAX(s2) AS s2_max"
+ elif field == "s3_max":
+ q_part = "MAX(s3) AS s3_max"
+ elif field == "s4_max":
+ q_part = "MAX(s4) AS s4_max"
+ elif field == "s5_max":
+ q_part = "MAX(s5) AS s5_max"
+ elif field == "s6_max":
+ q_part = "MAX(s6) AS s6_max"
+ elif field == "s7_max":
+ q_part = "MAX(s7) AS s7_max"
+ elif field == "s8_max":
+ q_part = "MAX(s8) AS s8_max"
+ elif field == "m0_max":
+ q_part = "MAX(m0) AS m0_max"
+ elif field == "m1_max":
+ q_part = "MAX(m1) AS m1_max"
+ elif field == "m2_max":
+ q_part = "MAX(m2) AS m2_max"
+ elif field == "m3_max":
+ q_part = "MAX(m3) AS m3_max"
+ elif field == "m4_max":
+ q_part = "MAX(m4) AS m4_max"
+ elif field == "m5_max":
+ q_part = "MAX(m5) AS m5_max"
+ elif field == "m6_max":
+ q_part = "MAX(m6) AS m6_max"
+ elif field == "m7_max":
+ q_part = "MAX(m7) AS m7_max"
+ elif field == "m8_max":
+ q_part = "MAX(m8) AS m8_max"
+ else:
+ q_part = field
+
+ if q_parts == "":
+ q_parts = q_part
+ else:
+ q_parts = q_parts + ", " + q_part
+ if radar_fields_to_get == "":
+ radar_fields_to_get = field
+ else:
+ radar_fields_to_get = radar_fields_to_get + ", " + field
+ sql = f"""
+ SELECT
+ minute,
+ device_id,
+ {radar_fields_to_get}
+ FROM (
+ SELECT
+ time_bucket('1 minute', time) AS minute,
+ device_id,
+ {q_parts}
+ FROM
+ radar_readings
+ WHERE
+ device_id IN ({devices_list_str})
+ AND time >= '{time_from_str}'
+ AND time < '{time_to_str}'
+ GROUP BY
+ minute,
+ device_id
+ ) rr
+
+ ORDER BY
+ CASE device_id
+ {case_order}
+ END,
+ minute
+ """
+ return sql
+
+def get_deployment_radar_only_colapsed_query_wid(devices_list_str, time_from_str, time_to_str, ids_list, radar_fields_of_interest):
+ #radar detailed
+ """
+ Generate a TimeScaleDB query for sensor and radar readings based on device IDs.
+
Parameters:
devices_list_str (str): Comma-separated string of device IDs
time_from_str (str): Start time for the query
@@ -7319,6 +8722,120 @@ def get_deployment_deca_query(devices_list_str, time_from_str, time_to_str, ids_
"""
return sql
+def get_deployment_radar_10sec_snapped_query(devices_list_str, time_from_str, time_to_str, ids_list, radar_fields_of_interest):
+ """
+ Generate a TimeScaleDB query for radar readings based on device IDs with time snapped to 10-second intervals.
+
+ Parameters:
+ devices_list_str (str): Comma-separated string of device IDs
+ time_from_str (str): Start time for the query
+ time_to_str (str): End time for the query
+ ids_list (list): List of device IDs in priority order for sorting
+ radar_fields_of_interest (list): List of field names required across all devices
+
+ Returns:
+ str: Generated SQL query
+ """
+
+ # Generate the CASE statement for ordering based on the provided ids_list
+ case_statements = []
+ for index, device_id in enumerate(ids_list, start=1):
+ case_statements.append(f"WHEN {device_id} THEN {index}")
+
+ case_order = "\n ".join(case_statements)
+
+ # Handle fields processing
+ select_fields = []
+ for field in radar_fields_of_interest:
+ if field == "s28":
+ select_fields.append("(s2+s3+s4+s5+s6+s7+s8)/7 AS s28")
+ else:
+ select_fields.append(field)
+
+ fields_str = ", ".join(select_fields)
+
+ sql = f"""
+ SELECT
+ time_bucket('10 seconds', time) AS ten_seconds,
+ device_id,
+ {fields_str}
+ FROM
+ radar_readings
+ WHERE
+ device_id IN ({devices_list_str})
+ AND time >= '{time_from_str}'
+ AND time < '{time_to_str}'
+ ORDER BY
+ CASE device_id
+ {case_order}
+ END,
+ ten_seconds
+ """
+ return sql
+
+def get_deployment_radar_10sec_snapped_query_min_max(devices_list_str, time_from_str, time_to_str, ids_list, radar_fields_of_interest):
+ """
+ Generate a TimeScaleDB query for radar readings based on device IDs with time snapped to 10-second intervals.
+
+ Parameters:
+ devices_list_str (str): Comma-separated string of device IDs
+ time_from_str (str): Start time for the query
+ time_to_str (str): End time for the query
+ ids_list (list): List of device IDs in priority order for sorting
+ radar_fields_of_interest (list): List of field names required across all devices
+
+ Returns:
+ str: Generated SQL query
+ """
+
+ # Generate the CASE statement for ordering based on the provided ids_list
+ case_statements = []
+ for index, device_id in enumerate(ids_list, start=1):
+ case_statements.append(f"WHEN {device_id} THEN {index}")
+
+ case_order = "\n ".join(case_statements)
+
+ # Handle fields processing
+ select_fields = []
+ for field in radar_fields_of_interest:
+
+ radar_fields = field.split("_")
+ field_t = radar_fields[0]
+ if field_t == "s28":
+ if radar_fields[1] == "max":
+ select_fields.append("MAX((s2+s3+s4+s5+s6+s7+s8)/7) AS s28")
+ else:
+ select_fields.append("MIN((s2+s3+s4+s5+s6+s7+s8)/7) AS s28")
+ else:
+ if radar_fields[1] == "max":
+ select_fields.append(f"MAX({field_t}) as {field}")
+ else:
+ select_fields.append(f"MIN({field_t}) as {field}")
+
+ fields_str = ", ".join(select_fields)
+
+ sql = f"""
+ SELECT
+ time_bucket('10 seconds', time) AS ten_seconds,
+ device_id,
+ {fields_str}
+ FROM
+ radar_readings
+ WHERE
+ device_id IN ({devices_list_str})
+ AND time >= '{time_from_str}'
+ AND time < '{time_to_str}'
+ GROUP BY
+ ten_seconds,
+ device_id
+ ORDER BY
+ CASE device_id
+ {case_order}
+ END,
+ ten_seconds
+ """
+ return sql
+
def export_query_to_minio_chunked(connection_params, query, minio_client, bucket_name, blob_name=None, chunksize=10000):
"""
Export query results to MinIO as CSV in chunks to handle large datasets
@@ -7940,7 +9457,8 @@ def check_and_parse(data_str):
# Parse the string regardless of type
parsed = json.loads(cleaned)
else:
- parsed = cleaned.split(",")
+ #parsed = cleaned.split(",")
+ parsed = SmartSplit(cleaned)
return is_list_of_lists, parsed
def clean_data_with_rolling_spline(line_part_t, window=5, threshold=2.0):
@@ -8790,7 +10308,10 @@ def GenerateLocationsMap(date_st, devices_list, devices_map, locations_list, tim
for mac in devices_list:
well_id, device_id, room = devices_map[mac]
#room = devices[well_id][0]
- color = Loc2Color[room][0]
+ if room in Loc2Color:
+ color = Loc2Color[room][0]
+ else:
+ color = Loc2Color[room.split()[0]][0]
presence_data = filter_device(locations_list, device_id)
room_details = (room, {"color": color, "presence": presence_data})
devices_list_t.append(room_details)
@@ -8992,7 +10513,11 @@ def GeneratePresenceHistoryChart(filename, recreate_in, deployment_id, filter_mi
devices_map = {}
devices_list = []
for device_entry in devices_list_a:
- devices_map[device_entry[4]] = [device_entry[0], device_entry[1], device_entry[2]]
+ #if T:
+ if device_entry[3] == None or device_entry[3].strip() == "":
+ devices_map[device_entry[4]] = [device_entry[0], device_entry[1], device_entry[2]]
+ else:
+ devices_map[device_entry[4]] = [device_entry[0], device_entry[1], device_entry[2] + " " + device_entry[3]]
devices_list.append(device_entry[4])
locations = GenerateLocationsMap(ddate, devices_list, devices_map, locations_list, time_zone_s)
@@ -9407,9 +10932,13 @@ def add_boundary_points(line_part_t, time_zone):
first_dt = datetime.datetime.fromtimestamp(line_part_t[0][0], tz)
date = first_dt.date()
+ last_dt = datetime.datetime.fromtimestamp(line_part_t[-1][0], tz)
+ last_date = last_dt.date()
+
+
# Create datetime objects for start and end of the day
start_dt = tz.localize(datetime.datetime.combine(date, datetime.datetime.min.time()))
- end_dt = tz.localize(datetime.datetime.combine(date, datetime.datetime.max.time()))
+ end_dt = tz.localize(datetime.datetime.combine(last_date, datetime.datetime.max.time()))
# Convert to timestamps
start_ts = start_dt.timestamp()
@@ -9717,9 +11246,10 @@ def RunCommand(commmand, args_dictionary, deployment_id):
for x in range((end_date - start_date).days + 1)
]
- loclist = []
day_counter = 0
minutes_spent_there_list = []
+ minutes_locations_list = []
+ filename_4w = f"/{deployment_id}/{deployment_id}_{maps_dates[0]}_{maps_dates[-1]}_{filter_minutes}_{stretch_by}_4w_locations.png.bin"
for ddate in maps_dates:
timee = LocalDateToUTCEpoch(ddate, time_zone_s)+5 #add so date boundary is avoided
@@ -9733,6 +11263,7 @@ def RunCommand(commmand, args_dictionary, deployment_id):
filename_day = f"/{deployment_id}/{deployment_id}_{ddate}_{filter_minutes}_{stretch_by}_daily_locations.png.bin"
locations_list_s = ReadObjectMinIO("daily-maps", filename_day)
locations_list = ast.literal_eval(locations_list_s)
+ minutes_locations_list.append((ddate, locations_list))
#print(locations_list_s)
minutes_spent_there = {}
@@ -9747,33 +11278,12 @@ def RunCommand(commmand, args_dictionary, deployment_id):
for loc in minutes_spent_there:
minutes_spent_there[loc] = int(1000 * minutes_spent_there[loc] / 1440) / 10
minutes_spent_there_list.append((ddate, minutes_spent_there))
-
- dailyloclist = []
- for loc in locations_list:
- dailyloclist.append((Id2Location[loc[0]],loc[2])) # provide only loc[2] which is len_minutes or how long subject was there, ignore loc[1] which is minutes_from
- loclist.append((ddate, dailyloclist))
-
data_part = str(minutes_spent_there_list)
+ minutes_locations_list_str = str(minutes_locations_list)
+ obj_to_save = {"Location_indexes": str(Id2Location), "Locations": minutes_locations_list_str}
+ print(obj_to_save)
+ SaveObjectInBlob(filename_4w, obj_to_save)
print(data_part)
-
- prompt2 = "Consider:\n"
- prompt2 += "- older person living alone in home where each room has multi-sensor IoT device \n"
- prompt2 += "- from the data we can produce a list for each day of locations and minutes spent there\n"
- prompt2 += "- unknown location is listed as \"Outside/?\"\n"
- prompt2 += "- office and living room are equivalent for this individual. Entertainment is consumed on computer (office) and in living room TV.\n"
- prompt2 += "- person is also napping in living room\n"
- prompt2 += "\n"
- prompt2 += "Questions:\n"
- prompt2 += "- list all potential health related information can be recognized from this data (examples based on patterns of bathroom usage for urinating vs pooing, showering, sleep, and list all other)\n"
- prompt2 += "- for each pattern consider; how long data time span is required, reliability range, how to improve (what additional information could be useful from additional sensors or devices)\n"
- prompt2 += "- analyze example data \n"
- prompt2 += "\n"
- prompt2 += "Data example to analyze:\n"
- for day in loclist:
- prompt2 += f"Date: {day[0]}\n "
- prompt2 += ", ".join(f"{location} {minutes}min" for location, minutes in day[1])
- prompt2 += "\n"
- print(prompt2)
prompt = "Attached is 4 weeks of data representing % of time where person living alone is spending each day"
prompt = prompt + " Assess his last week compared to previous 3 weeks. Comment only on significant changes."
@@ -9980,6 +11490,18 @@ def GetPriviledgesOnly(user_name):
else:
return "0"
+def GetPriviledgesAndUserId(user_name):
+ with get_db_connection() as conn:
+ sql = "SELECT access_to_deployments, user_id FROM public.person_details WHERE user_name = '" + user_name + "'"
+
+ with conn.cursor() as cur:
+ cur.execute(sql)
+ result = cur.fetchall()#cur.fetchone()
+ if result != None:
+ return result[0]
+ else:
+ return "[0,0]"
+
def AddToLog(message):
"""Add message to log"""
logger.info(message)
@@ -10096,6 +11618,64 @@ def MQSendL(topic, content, qos=1):
except Exception as e:
print ("Err3b:", e)
+def StoreFloorPlan(deployment_id, layout):
+
+ conn = get_db_connection()
+ cur = conn.cursor()
+ print(layout)
+ data = json.loads(layout)
+
+ # Extract the overlapping list
+ overlapping_list = str(data["overlapping"])
+
+ try:
+ sql = f"""
+ UPDATE public.deployment_details SET floor_plan = '{CleanObject(layout)}' WHERE deployment_id = {deployment_id};
+ """
+
+ logger.debug(f"sql= {sql}")
+ cur.execute(sql)
+ conn.commit()
+ sql1 = f"""
+ INSERT INTO public.deployment_details (deployment_id, "overlapps")
+ VALUES ({deployment_id}, '{CleanObject(overlapping_list)}')
+ ON CONFLICT (deployment_id)
+ DO UPDATE SET "overlapps" = '{CleanObject(overlapping_list)}';
+ """
+ logger.debug(f"sql= {sql1}")
+ cur.execute(sql1)
+ conn.commit()
+
+ cur.close()
+ conn.close()
+
+ AddToLog("Written/updated!")
+ return 1
+ except Exception as err:
+ return 0
+
+def GetFloorPlan(deployment_id):
+
+ conn = get_db_connection()
+
+ try:
+ sql = f"""
+ SELECT floor_plan FROM public.deployment_details WHERE deployment_id = {deployment_id};
+ """
+ with conn.cursor() as cur:
+ cur.execute(sql)
+ result = cur.fetchall()#cur.fetchone()
+ if result != None:
+ return result[0][0]
+ else:
+ return ""
+
+ logger.debug(f"sql= {sql}")
+ conn.close()
+ return 1
+ except Exception as err:
+ return 0
+
# CORS Middleware
class CORSMiddleware:
def process_request(self, req, resp):
@@ -10199,6 +11779,941 @@ class RequestParser:
logger.debug("RequestParser: No body data read")
+def FindDeviceByRole(deployment_id, location_list):
+
+ #For purposes of activity report, Bedroom and Bathroom are determined in order of priority:
+ #Bedroom: "Bedroom Master", "Bedroom", "Bedroom Guest" (106, 56, 107)
+ #Bathroom: ""Bathroom Main","Bathroom","Bathroom Guest" (104, 103, 105)
+
+ #location_names_inverted = {"All":-1 ,"?": 0,"Office": 5,"Hallway": 6,"Garage": 7,"Outside": 8,"Conference Room": 9,"Room": 10,"Kitchen": 34,
+ # "Bedroom": 56,"Living Room": 78,"Bathroom": 102,"Dining Room": 103,"Bathroom Main": ,104,"Bathroom Guest": 105,
+ # "Bedroom Master": 106, "Bedroom Guest": 107, "Conference Room": 108, "Basement": 109, "Attic": 110, "Other": 200}
+
+
+ ttime = datetime.datetime.utcnow().timestamp()
+
+ devices_list, device_ids = GetProximityList(deployment_id, ttime)
+
+ if location_list != []:
+ for location in location_list:
+ for device in devices_list:
+ well_id = device[0]
+ device_id = device[1]
+ location_t = device[2]
+ if location_t == location:
+ return (device_id, location, well_id)
+
+ else:
+ conn = get_db_connection()
+ with conn.cursor() as cur:
+
+ #we need to find beneficiaries from list of deployments
+ #sql = f'SELECT device_id FROM public.devices where device_id in {device_ids} and other="other"'
+ sql = "SELECT device_id, location, well_id FROM public.devices WHERE device_id = ANY(%s) AND other = %s"
+ #print(sql)
+ cur.execute(sql, (device_ids, "other"))
+ result = cur.fetchall()#cur.fetchone()
+ if len(result) > 0:
+ return result[0]
+ else:
+
+ devices_list, device_ids = GetProximityList(deployment_id, ttime)
+ for device in devices_list:
+ well_id = device[0]
+ device_id = device[1]
+ location_t = device[2]
+ if "Bathroom" in location_t or "Bedroom" in location_t or "Kitchen" in location_t:
+ pass
+ else:
+ return (device_id, location_t, well_id)
+
+ return (0, 0, 0)
+
+
+def ensure_date_order(from_date, to_date):
+ """
+ Ensures that from_date is earlier than to_date.
+ If not, swaps the dates.
+
+ Args:
+ from_date: Date string in format 'YYYY-MM-DD'
+ to_date: Date string in format 'YYYY-MM-DD'
+
+ Returns:
+ Tuple of (from_date, to_date) in correct order
+ """
+ # Compare the date strings
+ # This works because the 'YYYY-MM-DD' format allows for string comparison
+ if from_date > to_date:
+ # Swap the dates
+ return to_date, from_date
+ else:
+ # Dates are already in correct order
+ return from_date, to_date
+
+def signum(x):
+ return (x > 0) - (x < 0)
+
+
+def get_week_days_and_dates(days_back, timezone_str="America/Los_Angeles"):
+ """
+ Generate weekdays and dates from 7 days ago until today for a given timezone.
+
+ Args:
+ timezone_str (str): Timezone string like "America/Los_Angeles"
+
+ Returns:
+ list: List of tuples containing (weekday_name, date_string)
+ """
+ # Get the timezone object
+ tz = pytz.timezone(timezone_str)
+
+ # Get current date in the specified timezone
+ today = datetime.datetime.now(tz).date()
+
+ # Generate dates from days_back days ago to today
+ result = []
+ for i in range(days_back-1, -1, -1): # days_back days ago to today (inclusive)
+ date = today - timedelta(days=i)
+ weekday_name = date.strftime("%A") # Full weekday name
+ date_string = date.strftime("%Y-%m-%d") # ISO format date
+ day_of_month = date.day
+ result.append((date_string, weekday_name, day_of_month))
+
+ return result
+
+
+def filter_short_groups_numpy_orig(presence_list, filter_size, device_id, dates_str):
+ """
+ Optimized version using NumPy to remove groups of consecutive zeros
+ or consecutive non-zeros (based on sign) shorter than filter_size.
+ Mimics the iterative, shortest-first logic of filter_short_groupss.
+
+ Args:
+ presence_list: List of numbers (can include floats, ints, 0s).
+ filter_size: Minimum size of consecutive groups (by sign) to keep.
+
+ Returns:
+ Filtered list with short groups removed. Output contains 0s and 1s.
+ (Note: Differs slightly from filter_short_groupss if negative numbers
+ were present, as this version converts them to 0, not 2).
+ """
+ st = time.time()
+
+ if not presence_list or filter_size <= 1:
+ # print(f"NumPy: Early exit/no processing time: {time.time() - st:.6f}s")
+ # Return a copy to avoid modifying the original list
+ return presence_list[:] if isinstance(presence_list, list) else list(presence_list)
+
+ # Work with a NumPy array for efficiency, ensure float type for consistency
+ result = np.array(presence_list, dtype=float)
+ n = len(result)
+
+ # Use a set to store hashable representations (tuples) of previous states for cycle detection
+ previous_states = set()
+
+ while True:
+ current_state_tuple = tuple(result)
+ if current_state_tuple in previous_states:
+ # print("NumPy: Cycle detected, breaking.")
+ break
+ previous_states.add(current_state_tuple)
+
+ # 1. Calculate the sign of each element (-1, 0, 1)
+ signs = np.sign(result)
+
+ # 2. Find indices where the sign changes
+ # np.diff calculates the difference between adjacent elements.
+ # A non-zero difference means the sign changed.
+ # np.where returns the indices *before* the change. Add 1 to get the start of the new run.
+ change_indices = np.where(np.diff(signs) != 0)[0] + 1
+
+ # 3. Define the boundaries of all consecutive runs (start and end indices)
+ # Include the start (0) and end (n) of the array.
+ boundaries = np.concatenate(([0], change_indices, [n]))
+
+ # 4. Identify short runs
+ short_runs_to_process = []
+ for i in range(len(boundaries) - 1):
+ start = boundaries[i]
+ end = boundaries[i+1] # Slicing is exclusive of the end index
+ length = end - start
+
+ if length > 0: # Ensure the run is not empty
+ # Determine the characteristic sign of the run (use the first element)
+ run_sign = signs[start]
+
+ if length < filter_size:
+ # --- Verification Step (Crucial) ---
+ # Check if the segment *still* consists of elements with the same sign.
+ # This handles cases where a previous modification might have altered
+ # part of what *was* a longer run.
+ current_segment_signs = np.sign(result[start:end])
+ if np.all(current_segment_signs == run_sign):
+ # If the run is short and its sign consistency is verified,
+ # add it to the list of candidates for modification.
+ short_runs_to_process.append({
+ 'start': start,
+ 'end': end,
+ 'sign': run_sign,
+ 'length': length
+ })
+ # --- End Verification ---
+
+ # 5. Check if any short runs were found
+ if not short_runs_to_process:
+ # No modifiable short runs found in this pass, the list is stable.
+ break
+
+ # 6. Sort the short runs: shortest first, then by start index for determinism
+ # This ensures we process the same run as the original iterative function would.
+ short_runs_to_process.sort(key=lambda r: (r['length'], r['start']))
+
+ # 7. Process ONLY the *first* (shortest) identified run in this pass
+ run_to_process = short_runs_to_process[0]
+ start = run_to_process['start']
+ end = run_to_process['end']
+ run_sign = run_to_process['sign']
+
+ # Determine the replacement value based on the sign of the run being removed
+ # Short runs of 0 become 1
+ # Short runs of non-zero (positive or negative) become 0
+ replacement_value = 1.0 if run_sign == 0 else 0.0
+
+ # 8. Apply the replacement to the segment using NumPy slicing
+ result[start:end] = replacement_value
+ # Loop continues because a change was made
+
+ print(f"filter_short_groups_numpy time: {time.time() - st:.6f}s")
+ if (time.time() - st) > 40:
+ print(presence_list)
+ # Convert back to a standard Python list for the return value
+ return result.tolist()
+
+
+
+def filter_short_groups_numpy(presence_list, filter_size, device_id, dates_str):
+ """
+ Optimized version using NumPy to remove groups of consecutive zeros
+ or consecutive non-zeros (based on sign) shorter than filter_size.
+ Mimics the iterative, shortest-first logic.
+
+ Optimization:
+ - Vectorized extraction of segment properties.
+ - Removed redundant sign verification within the segment analysis loop.
+ """
+ # Start timer (optional, for benchmarking)
+ st = time.time()
+
+ if not presence_list or filter_size <= 1:
+ # print(f"NumPy Optimized: Early exit/no processing time: {time.time() - st:.6f}s")
+ return presence_list[:] if isinstance(presence_list, list) else list(presence_list)
+
+ result = np.array(presence_list, dtype=float)
+ n = len(result)
+
+ previous_states = set()
+
+ while True:
+ # Cycle detection
+ current_state_tuple = tuple(result)
+ if current_state_tuple in previous_states:
+ # print("NumPy Optimized: Cycle detected, breaking.")
+ break
+ previous_states.add(current_state_tuple)
+
+ # 1. Calculate the sign of each element (-1, 0, 1)
+ signs = np.sign(result)
+
+ # 2. Find indices where the sign changes
+ change_indices = np.where(np.diff(signs) != 0)[0] + 1
+
+ # 3. Define the boundaries of all consecutive runs
+ boundaries = np.concatenate(([0], change_indices, [n]))
+
+ # If there's only one segment (e.g., all zeros, all ones, or array is too short to have changes),
+ # or if the array was empty (n=0 leading to boundaries=[0,0]), no further processing is needed.
+ if len(boundaries) <= 2: # e.g., boundaries is [0, n] or [0,0]
+ break
+
+ # 4. Vectorized extraction of run properties
+ run_starts = boundaries[:-1]
+ run_ends = boundaries[1:]
+ run_lengths = run_ends - run_starts
+ # The sign of the first element of a run (from the 'signs' array computed at the
+ # start of this 'while' iteration) is representative of the entire run's sign,
+ # by definition of how 'boundaries' were created.
+ run_signs = signs[run_starts]
+
+ # 5. Identify short runs and collect their properties
+ short_runs_to_process = []
+ for i in range(len(run_starts)): # Iterate over all identified runs
+ # Ensure run_length is positive (should be, due to boundary logic, but good check)
+ if run_lengths[i] > 0 and run_lengths[i] < filter_size:
+ short_runs_to_process.append({
+ 'start': run_starts[i],
+ 'end': run_ends[i],
+ 'sign': run_signs[i],
+ 'length': run_lengths[i]
+ })
+
+ # 6. Check if any modifiable short runs were found
+ if not short_runs_to_process:
+ # No short runs found in this pass, the list is stable.
+ break
+
+ # 7. Sort the short runs: shortest first, then by start index for determinism
+ short_runs_to_process.sort(key=lambda r: (r['length'], r['start']))
+
+ # 8. Process ONLY the *first* (shortest) identified run in this pass
+ run_to_process = short_runs_to_process[0]
+ start = run_to_process['start']
+ end = run_to_process['end']
+ run_sign = run_to_process['sign']
+
+ # Determine the replacement value
+ replacement_value = 1.0 if run_sign == 0 else 0.0
+
+ # 9. Apply the replacement
+ result[start:end] = replacement_value
+ # A change was made, so the 'while True' loop continues (unless a cycle is detected next)
+
+ # End timer and print (optional)
+ # Your original print statements for timing:
+ print(f"filter_short_groups_numpy time: {time.time() - st:.6f}s")
+ # if (time.time() - st) > 40:
+ # print(presence_list) # This would print the original input on long runs
+
+ return result.tolist()
+
+def filter_short_groups(presence_list, filter_size):
+ """
+ Corrected version to perform the same task as filter_short_groupss,
+ including handling of non-zero/non-one values based on signum.
+ Iteratively removes the shortest group < filter_size by flipping its
+ signum representation (0->1, pos->0, neg->2).
+
+ Args:
+ presence_list: List of numbers (0s, 1s, or any other number).
+ filter_size: Minimum size of groups (based on signum) to keep.
+ Returns:
+ Filtered list with short groups removed, potentially containing 0, 1, 2.
+ """
+ st = time.time()
+
+ if not presence_list or filter_size <= 1:
+ # print(f"filter_short_groups: Early exit/no processing time: {time.time() - st:.6f}s")
+ return presence_list.copy()
+
+ result = presence_list.copy()
+ n = len(result)
+
+ # Using a set for faster cycle detection lookups
+ previous_states = set()
+
+ while True:
+ current_state_tuple = tuple(result)
+ if current_state_tuple in previous_states:
+ # print("Cycle detected in filter_short_groups, breaking.")
+ break
+ previous_states.add(current_state_tuple)
+
+ # --- Start of logic mimicking filter_short_groupss ---
+ changes_made_outer = False
+
+ # 1. Find all segments based on signum
+ segments = []
+ i = 0
+ while i < n:
+ start = i
+ # Use signum to define the characteristic value of the run
+ current_signum = signum(result[i])
+
+ # Find the end of the group based on *consistent signum*
+ while i < n and signum(result[i]) == current_signum:
+ i += 1
+
+ group_length = i - start
+ # Store the signum value associated with the run
+ segments.append((start, i - 1, current_signum, group_length))
+
+ # 2. Sort segments by length (ascending) to process shortest first
+ segments.sort(key=lambda x: x[3])
+
+ # 3. Process the segments (find the first short one to modify)
+ for start, end, run_signum, length in segments:
+ if length < filter_size:
+ # Verify the segment hasn't been fundamentally altered (signum-wise)
+ # This check mirrors filter_short_groupss's intent, using signum consistently.
+ is_still_original_signum_segment = True
+ for k_idx in range(start, end + 1):
+ if signum(result[k_idx]) != run_signum:
+ is_still_original_signum_segment = False
+ break
+
+ if is_still_original_signum_segment:
+ # Calculate replacement value based on signum (0->1, pos->0, neg->2)
+ replacement_value = 1 - run_signum
+
+ # Apply replacement
+ segment_modified = False
+ for j in range(start, end + 1):
+ # Use direct comparison as replacement values are integers (0, 1, 2)
+ if result[j] != replacement_value:
+ result[j] = replacement_value
+ segment_modified = True
+
+ if segment_modified:
+ changes_made_outer = True
+ # Break after making *one* change and restart the whole process
+ # (finding segments, sorting, finding shortest modifiable)
+ break # Break from the 'for segment in segments' loop
+
+ # --- End of logic mimicking filter_short_groupss ---
+
+ if not changes_made_outer:
+ # If we went through all segments and made no changes, we're done.
+ break
+
+ print(f"filter_short_groups time: {time.time() - st:.6f}s")
+ return result
+
+
+def filter_short_groupss(presence_list, filter_size):
+ """
+ Iteratively remove groups of consecutive 0s or 1s that are shorter than filter_size.
+ Continues until no more changes are made.
+
+ Args:
+ presence_list: List of 0s and 1s
+ filter_size: Minimum size of groups to keep
+
+ Returns:
+ Filtered list with short groups removed
+ """
+ st = time.time()
+ if not presence_list or filter_size <= 1:
+ return presence_list.copy()
+
+ result = presence_list.copy()
+ changes_made = True
+
+ while changes_made:
+ changes_made = False
+
+ # First identify all segments
+ segments = []
+ i = 0
+ n = len(result)
+
+ while i < n:
+ # Find the start of a group
+ start = i
+ current_value = signum(result[i])
+
+ # Find the end of the group
+ while i < n and signum(result[i]) == current_value:
+ i += 1
+
+ # Calculate group length
+ group_length = i - start
+ segments.append((start, i-1, current_value, group_length))
+
+ # Sort segments by length (ascending) to process shortest first
+ segments.sort(key=lambda x: x[3])
+
+ # Process the segments
+ for start, end, value, length in segments:
+ # If segment is too short, replace with opposite value
+ if length < filter_size:
+ # Verify the segment hasn't been modified by previous replacements
+ if all(result[j] == value for j in range(start, end+1)):
+ replacement = 1 - value # Toggle between 0 and 1
+ for j in range(start, end+1):
+ result[j] = replacement
+ changes_made = True
+ #print(start, end)
+ break # Break after making a change and restart
+
+ print("s", time.time()-st)
+ return result
+
+def filter_short_segments(segments, filter_size):
+ """
+ Iteratively remove segments that are shorter than filter_size,
+ replacing them with data from the previous segment.
+
+ Args:
+ segments: List of tuples (start_time, end_time, num_persons, duration)
+ filter_size: Minimum duration to keep a segment
+
+ Returns:
+ Filtered list of segments covering the entire time range
+ """
+ if not segments or filter_size <= 0:
+ return segments.copy()
+
+ result = segments.copy()
+ changes_made = True
+
+ while changes_made:
+ changes_made = False
+
+ i = 1 # Start from the second segment
+ while i < len(result):
+ _, _, _, duration = result[i]
+
+ if duration < filter_size:
+ # Get the previous segment's person count
+ if i > 0:
+ _, _, prev_persons, _ = result[i-1]
+ start, end, _, dur = result[i]
+
+ # Replace with previous person count
+ result[i] = (start, end, prev_persons, dur)
+ changes_made = True
+
+ # Check if we can merge with previous segment
+ if i > 0:
+ prev_start, prev_end, prev_persons, prev_dur = result[i-1]
+ curr_start, curr_end, curr_persons, curr_dur = result[i]
+
+ if prev_persons == curr_persons and prev_end + 1 == curr_start:
+ # Merge segments
+ merged = (prev_start, curr_end, prev_persons, prev_dur + curr_dur)
+ result[i-1] = merged
+ result.pop(i)
+ i -= 1 # Adjust index after removing an element
+ changes_made = True
+
+ i += 1
+
+ # Sort segments by start time to ensure proper order
+ result.sort(key=lambda x: x[0])
+
+ return result
+
+def filter_out_short_high_segments(segments, filter_size):
+ """
+ Iteratively remove segments that are shorter than filter_size,
+ replacing them with data from the previous segment.
+
+ Args:
+ segments: List of tuples (start_time, end_time, num_persons, duration)
+ filter_size: Minimum duration to keep a segment
+
+ Returns:
+ Filtered list of segments covering the entire time range
+ """
+ if not segments:
+ return segments.copy()
+
+ result = segments.copy()
+ changes_made = True
+
+ while changes_made:
+ changes_made = False
+
+ i = 1 # Start from the second segment
+ while i < len(result):
+ _, _, _, duration = result[i]
+
+ if duration < filter_size:
+ # Get the previous segment's person count
+ if i > 0:
+ _, _, prev_persons, _ = result[i-1]
+ start, end, _, dur = result[i]
+
+ # Replace with previous person count
+ result[i] = (start, end, prev_persons, dur)
+ changes_made = True
+
+ # Check if we can merge with previous segment
+ if i > 0:
+ prev_start, prev_end, prev_persons, prev_dur = result[i-1]
+ curr_start, curr_end, curr_persons, curr_dur = result[i]
+
+ if prev_persons == curr_persons and prev_end + 1 == curr_start:
+ # Merge segments
+ merged = (prev_start, curr_end, prev_persons, prev_dur + curr_dur)
+ result[i-1] = merged
+ result.pop(i)
+ i -= 1 # Adjust index after removing an element
+ changes_made = True
+
+ i += 1
+
+ # Sort segments by start time to ensure proper order
+ result.sort(key=lambda x: x[0])
+
+ return result
+
+def filter_out_short_same_groups_iterative(presence_list, filter_size):
+ """
+ Iteratively remove groups of consecutive sames that are shorter than filter_size.
+ Continues until no more changes are made.
+
+ Args:
+ presence_list: List of values
+ filter_size: Minimum size of groups to keep
+
+ Returns:
+ Filtered list with short groups removed
+ """
+ if not presence_list:
+ return presence_list.copy()
+
+ result = presence_list.copy()
+
+
+ # First identify all segments
+ segments = []
+ i = 0
+ n = len(result)
+
+ while i < n:
+ # Find the start of a group
+ start = i
+ current_value = result[i]
+
+ # Find the end of the group
+ while i < n and result[i] == current_value:
+ i += 1
+
+ # Calculate group length
+ group_length = i - start
+ segments.append((start, i-1, current_value, group_length))
+
+
+ result = filter_out_short_high_segments(segments, filter_size)
+
+ return result
+
+def filter_out_short_highs_iterative(presence_list, filter_size):
+ """
+ Iteratively remove groups of consecutive sames that are shorter than filter_size.
+ Continues until no more changes are made.
+
+ Args:
+ presence_list: List of values
+ filter_size: Minimum size of groups to keep
+
+ Returns:
+ Filtered list with short groups removed
+ """
+ if not presence_list:
+ return presence_list.copy()
+
+ result = presence_list.copy()
+
+
+ # First identify all segments
+ segments = []
+ i = 0
+ n = len(result)
+
+ while i < n:
+ # Find the start of a group
+ start = i
+ current_value = result[i]
+
+ # Find the end of the group
+ while i < n and result[i] == current_value:
+ i += 1
+
+ # Calculate group length
+ group_length = i - start
+ segments.append((start, i-1, current_value, group_length))
+
+
+ result = filter_out_short_high_segments(segments, filter_size)
+
+ return result
+
+def filter_short_groups_iterative_analog(presence_list, filter_size):
+ """
+ Iteratively remove groups of consecutive similar values that are shorter than filter_size.
+ For non-zero values, replaces with 0. For zero values, needs context to determine replacement.
+ """
+ if not presence_list or filter_size <= 1:
+ return presence_list.copy()
+
+ result = presence_list.copy()
+ changes_made = True
+
+ while changes_made:
+ changes_made = False
+
+ # Identify all segments of consecutive similar values
+ segments = []
+ i = 0
+ n = len(result)
+
+ while i < n:
+ start = i
+ is_zero = (result[i] == 0)
+
+ # Find the end of the group with same characteristic (zero or non-zero)
+ while i < n and ((result[i] == 0) == is_zero):
+ i += 1
+
+ group_length = i - start
+ segments.append((start, i-1, is_zero, group_length))
+
+ # Process segments from shortest to longest
+ segments.sort(key=lambda x: x[3])
+
+ for start, end, is_zero, length in segments:
+ if length < filter_size:
+ # For short non-zero groups, replace with zeros
+ if not is_zero:
+ for j in range(start, end+1):
+ result[j] = 0
+ changes_made = True
+ break
+ else:
+ # For short zero groups, replace with average of surrounding non-zero values
+ # First, find surrounding values
+ left_value = 0
+ right_value = 0
+
+ # Look for non-zero value on the left
+ for j in range(start-1, -1, -1):
+ if result[j] != 0:
+ left_value = result[j]
+ break
+
+ # Look for non-zero value on the right
+ for j in range(end+1, n):
+ if result[j] != 0:
+ right_value = result[j]
+ break
+
+ # Calculate replacement value
+ if left_value > 0 and right_value > 0:
+ replacement = (left_value + right_value) / 2
+ elif left_value > 0:
+ replacement = left_value
+ elif right_value > 0:
+ replacement = right_value
+ else:
+ replacement = 0 # No surrounding non-zero values
+
+ # Apply replacement
+ for j in range(start, end+1):
+ result[j] = replacement
+
+ if replacement != 0: # Only mark as changed if we actually changed something
+ changes_made = True
+ break
+ return result
+
+
+def filter_short_high_groups_iterative_analog(presence_list, filter_size):
+ st = time.time()
+ """
+ More efficient implementation that still handles cascading effects.
+ """
+ if not presence_list or filter_size <= 1:
+ return presence_list.copy()
+
+ result = presence_list.copy()
+ changes_made = True
+
+ while changes_made:
+ changes_made = False
+ i = 0
+ n = len(result)
+
+ # Use a single pass to find all non-zero segments
+ segments = []
+ while i < n:
+ # Skip zeros
+ if result[i] == 0:
+ i += 1
+ continue
+
+ # Found non-zero, find the end of this segment
+ start = i
+ while i < n and result[i] != 0:
+ i += 1
+
+ # Add segment to our list
+ segments.append((start, i))
+
+ # Process all short segments in one iteration
+ for start, end in segments:
+ length = end - start
+ if length < filter_size:
+ # Set all elements in this segment to zero
+ for j in range(start, end):
+ result[j] = 0
+ changes_made = True
+ # Don't break - process all short segments in this pass
+
+ # If we've made changes, we need to check again for newly formed short segments
+ print(f"filter_short_high_groups_iterative_analog time: {time.time() - st:.6f}s")
+ return result
+
+def filter_short_high_groups_iterative_analog_orig(presence_list, filter_size):
+ """
+ Iteratively remove groups of consecutive similar values that are shorter than filter_size.
+ For non-zero values, replaces with 0. For zero values, needs context to determine replacement.
+ """
+ if not presence_list or filter_size <= 1:
+ return presence_list.copy()
+ st = time.time()
+ result = presence_list.copy()
+ changes_made = True
+
+ while changes_made:
+ changes_made = False
+
+ # Identify all segments of consecutive similar values
+ segments = []
+ i = 0
+ n = len(result)
+
+ while i < n:
+ start = i
+ is_zero = (result[i] == 0)
+
+ # Find the end of the group with same characteristic (zero or non-zero)
+ while i < n and ((result[i] == 0) == is_zero):
+ i += 1
+
+ group_length = i - start
+ segments.append((start, i-1, is_zero, group_length))
+
+ # Process segments from shortest to longest
+ segments.sort(key=lambda x: x[3])
+
+ for start, end, is_zero, length in segments:
+ if length < filter_size:
+ # For short non-zero groups, replace with zeros
+ if not is_zero:
+ for j in range(start, end+1):
+ result[j] = 0
+ changes_made = True
+ break
+
+ print(f"filter_short_high_groups_iterative_analog time: {time.time() - st:.6f}s")
+ #if (time.time() - st) > 40:
+ # print(presence_list)
+ return result
+
+
+
+def filter_short_groupsWhat(presence_list, filter_size):
+ """
+ Remove groups of consecutive 0s or 1s that are shorter than filter_size.
+ For short groups of 0s, replace with 1s.
+ For short groups of 1s, replace with 0s.
+
+ Args:
+ presence_list: List of 0s and 1s
+ filter_size: Minimum size of groups to keep
+
+ Returns:
+ Filtered list with short groups removed
+ """
+ if not presence_list or filter_size <= 1:
+ return presence_list.copy()
+
+ result = presence_list.copy()
+ n = len(result)
+
+ # Find groups and process them
+ i = 0
+ while i < n:
+ # Find the start of a group
+ start = i
+ current_value = result[i]
+
+ # Find the end of the group
+ while i < n and result[i] == current_value:
+ i += 1
+
+ # Calculate group length
+ group_length = i - start
+
+ # If group is too short, replace with opposite value
+ if group_length < filter_size:
+ replacement = 1 - current_value # Toggle between 0 and 1
+ for j in range(start, i):
+ result[j] = replacement
+
+ return result
+
+
+def GetOverlapps(deployment_id):
+
+ with get_db_connection() as db_conn:
+ with db_conn.cursor() as cur:
+ sql = f"SELECT overlapps FROM public.deployment_details WHERE deployment_id = '{deployment_id}'"
+ cur.execute(sql)
+ result = cur.fetchone() #cur.fetchall()
+ if result != None:
+ return result[0]
+
+
+def GetAmpitude(point_val, segment_lenght):
+ if point_val == 0:
+ return -segment_lenght
+ else:
+ return segment_lenght
+
+def CreateZGraph(well_id, presence_list):
+ """
+ return size and position of consecutive groups of 0s and 1s
+
+ Args:
+ presence_list: List of 0s and 1s
+
+ Returns:
+ list of times and lengths
+ """
+ if not presence_list:
+ return presence_list.copy()
+
+ #if well_id == 290:
+ # print("Stop")
+ dekas_in_day = 6 * 1440
+ result = []
+ print(well_id)
+ #result will look like this: [(0,34),(34,-56),(92,6),...] where (A,B)
+ #A: is minute of section, B: height of section +=presence -=absence
+ #lets find point 0 first moving backward in time
+ segment_lenght = 0
+ point_zero_val = signum(presence_list[dekas_in_day])
+ for i in range(dekas_in_day-1, 0, -1):
+ if point_zero_val != signum(presence_list[i]):
+ segment_lenght = dekas_in_day -1 - i
+ break
+ x = 0
+ y = GetAmpitude(point_zero_val, segment_lenght)
+ result.append((x, y))
+ #x = x + segment_lenght
+ last_y = y
+ last_val = point_zero_val
+ last_source_minute = dekas_in_day + 1
+ for i in range(last_source_minute, len(presence_list)):
+ if last_val != signum(presence_list[i]):
+ segment_lenght = i - dekas_in_day - x
+ x = x + segment_lenght
+ y = last_y + GetAmpitude(last_val, segment_lenght)
+ result.append((x, y))
+ result.append((x, 0))
+ last_y = 0
+ last_val = signum(presence_list[i])
+
+ #last point i is NOT 1 + last above... it is last above so 2879!
+ segment_lenght = i - dekas_in_day - x
+ x = i - dekas_in_day #last point
+ y = GetAmpitude(last_val, segment_lenght)
+ result.append((x, y))
+
+ return result
# Add this function to your code
@@ -10223,6 +12738,240 @@ def get_form_data(req):
logger.debug("No form data available, returning empty dict")
return {}
+def DetectMultiple(temporary_map_day_plus, overlaps_str_lst):
+ """
+ Written by Robert Zmrzli
+ Detects time intervals of multiple vs single/no presence and outputs
+ the signed duration of each interval at its end time.
+
+ Args:
+ temporary_map_day_plus: Map for each device radar reads that were detected to be above threshold
+ overlaps_lst: List of pairs of devices that have overlapping area
+ Returns:
+ A list of tuples representing the multiple presence timeline segments.
+ Each segment is represented by two tuples:
+ 1. (end_minute, signed_duration): signed_duration is the length of the
+ interval ending at end_minute (+ multiple, - single/none).
+ 2. (end_minute, 0): A marker for visualization.
+ """
+ for location_id, data_list in temporary_map_day_plus.items():
+ minutes_in_data = len(data_list)
+ break
+ events = []
+ min_time = 0
+ max_time = 0
+
+
+ #['267:273', '273:291']
+ seen_list = [0] * minutes_in_data
+ seen_where_list = [[] for _ in range(minutes_in_data)]
+
+ for location_id, data_list in temporary_map_day_plus.items():
+ for i in range(minutes_in_data):
+ if data_list[i] > 0: # Presence interval
+ seen_where_list[i].append(location_id)
+
+ seen_where_list_uf = seen_where_list.copy()
+ overlap_pairs = set()
+ for overlap_str in overlaps_str_lst:
+ nums = [int(x) for x in overlap_str.split(':')]
+ # Add both orderings of the pair for easier checking
+ overlap_pairs.add((nums[0], nums[1]))
+ overlap_pairs.add((nums[1], nums[0]))
+
+ # Process each sub-list in seen_where_list
+ for i in range(len(seen_where_list)):
+ locations = seen_where_list[i]
+
+ # Skip empty lists and lists with only 0 or 1 item
+ if len(locations) <= 1:
+ continue
+
+ has_non_overlapping_pair = False
+
+ for j in range(len(locations)):
+ for k in range(j+1, len(locations)):
+ loc1, loc2 = locations[j], locations[k]
+
+ # If this pair is not in our overlap_pairs, then they don't overlap
+ if (loc1, loc2) not in overlap_pairs:
+ has_non_overlapping_pair = True
+ break
+
+ if has_non_overlapping_pair:
+ break
+
+ # If all pairs overlap (no non-overlapping pairs found), clear the list
+ if not has_non_overlapping_pair:
+ seen_where_list[i] = []
+
+
+ variations = []
+ variation_index = {}
+
+ for i in range(minutes_in_data):
+ if len(seen_where_list[i]) > 1: # Presence interval
+ if seen_where_list[i] not in variations:
+ variations.append(seen_where_list[i])
+ variation_index[str(seen_where_list[i])] = len(variations) - 1
+
+ seen_list[i] = variation_index[str(seen_where_list[i])]
+
+
+ return seen_list, seen_where_list_uf
+
+def minutes_to_time(minutes):
+ """
+ Convert minutes in a day (0-1439) to HH:MM format
+
+ Args:
+ minutes (int): Minutes since midnight (0-1439)
+
+ Returns:
+ str: Time in HH:MM format
+ """
+ # Ensure the input is within valid range
+ #if not 0 <= minutes <= 1439:
+ # raise ValueError("Minutes must be between 0 and 1439")
+
+ # Calculate hours and remaining minutes
+ minutes = minutes % 1440
+ hours = minutes // 60
+ mins = minutes % 60
+
+ # Format as HH:MM with leading zeros
+ return f"{hours:02d}:{mins:02d}"
+
+def decas_to_time(decas):
+ """
+ Convert decas in a day (0-8639) to HH:MM format
+
+ Args:
+ decas (int): decas since midnight (0-1439)
+
+ Returns:
+ str: Time in HH:MM format
+ """
+ # Ensure the input is within valid range
+ #if not 0 <= minutes <= 1439:
+ # raise ValueError("Minutes must be between 0 and 1439")
+
+ # Calculate hours and remaining minutes
+ decas = decas % 8640
+ hours = decas // (6 * 60)
+ mins = (decas // 6) % 60
+ secs = 10 * (decas % 10)
+
+ # Format as HH:MM with leading zeros
+ return f"{hours:02d}:{mins:02d}:{secs:02d}"
+
+def ClearOverlaps(temporary_map_day_plus, overlaps_str_lst):
+ """
+ Detects reads that came from same person read by multiple devices that overlap, and removes weaker reads
+ Args:
+ temporary_map_day_plus: Map for each device radar reads that were detected to be above threshold
+ overlaps_lst: List of pairs of devices that have overlapping area
+ Returns:
+ An original temporary_map_day_plus with some reads removed
+ """
+ ## Get the number of minutes
+ #for location_id, data_list in temporary_map_day_plus.items():
+ #decas_in_data = len(data_list)
+ #break
+
+ if temporary_map_day_plus:
+ decas_in_data = len(next(iter(temporary_map_day_plus.values())))
+ else:
+ decas_in_data = 0
+
+ # Create seen_where_list with device-signal pairs
+ seen_where_list = [[] for _ in range(decas_in_data)]
+ for location_id, data_list in temporary_map_day_plus.items():
+ for i in range(decas_in_data):
+ if data_list[i] > 0: # Presence interval
+ #if i == (8721):
+ # print("stop")
+ seen_where_list[i].append((location_id, data_list[i]))
+
+ # Parse overlap pairs
+ overlap_pairs = set()
+ for overlap_str in overlaps_str_lst:
+ nums = [int(x) for x in overlap_str.split(':')]
+ overlap_pairs.add((nums[0], nums[1]))
+ overlap_pairs.add((nums[1], nums[0]))
+
+ # Process each time slot
+ for i in range(len(seen_where_list)):
+ locations = seen_where_list[i]
+
+ if len(locations) <= 1:
+ continue
+
+ #if i == (5713 + 8640):
+ # print("stop")
+ #if i == (8721):
+ # print("stop")
+ # Create a new list to store the filtered results
+ filtered_list = []
+
+ # Make a copy of locations to process
+ to_process = locations.copy()
+
+ # Process each device and decide whether to keep it
+ while to_process:
+ current = to_process.pop(0)
+ device_id, signal_strength = current
+
+ should_keep = True
+ devices_to_remove = []
+
+ # Compare with all other devices (including those already in filtered_list)
+ for other in locations:
+ other_device_id, other_signal_strength = other
+
+ # Skip if comparing with itself
+ if device_id == other_device_id:
+ continue
+
+ # Check if these devices overlap
+ if (device_id, other_device_id) in overlap_pairs:
+ # They overlap, keep only the stronger signal
+ if signal_strength < other_signal_strength:
+ # Other device is stronger, don't keep current
+ should_keep = False
+ break
+ elif signal_strength == other_signal_strength and device_id > other_device_id:
+ # For equal signals, use device_id as tiebreaker
+ should_keep = False
+ break
+
+ # If we should keep this device, add it to filtered list
+ if should_keep:
+ filtered_list.append(current)
+
+ # Update the original list with filtered results
+
+ #if i == (8721):
+ # print("stop")
+ seen_where_list[i] = filtered_list
+
+ # Create a new temporary_map_day_plus with the filtered data
+ result = {}
+ for location_id, data_list in temporary_map_day_plus.items():
+ result[location_id] = [0] * decas_in_data
+
+ # Fill in the filtered data
+ for i in range(decas_in_data):
+ #if len(seen_where_list[i]) > 1:
+ #if i == (8721):
+ # print("stop")
+ #print(i, decas_to_time(i), seen_where_list[i])
+
+ for device_id, signal_strength in seen_where_list[i]:
+ result[device_id][i] = signal_strength
+
+ return result
+
# Path handling middleware
class StripPathMiddleware:
def process_request(self, req, resp):
@@ -10248,6 +12997,810 @@ class StripPathMiddleware:
logger.info(f"Modified request path: {path}")
break
+
+def optimized_processing(myz_data, start_time, id2well_id, device_id_2_threshold, device_field_indexes, temporary_map_day_plus, data_type):
+ last_device_id = None
+
+ # Pre-compute seconds per minute
+ seconds_per_deka = 10
+
+ # Check if we need to process all data or just specific types
+ process_all = data_type in ("all", "z-graph", "multiple")
+
+ for radar_read in myz_data:
+ local_time = radar_read[0]
+ device_id = radar_read[1]
+
+ # Calculate deca once
+ deca = int((local_time - start_time).total_seconds() / seconds_per_deka)
+
+ # Use cached lookups when possible
+ if device_id != last_device_id:
+ last_device_id = device_id
+
+ # Check if we've cached this device info
+ if device_id not in device_lookup_cache:
+ well_id = id2well_id[device_id]
+ radar_threshold_group_st = device_id_2_threshold[well_id]
+ threshold_sig, threshold = radar_threshold_group_st
+ threshold_sig = threshold_sig.split("_")[0]
+
+ # Cache the values
+ device_lookup_cache[device_id] = {
+ 'well_id': well_id,
+ 'threshold_sig': threshold_sig,
+ 'threshold': threshold
+ }
+ else:
+ # Use cached values
+ cached = device_lookup_cache[device_id]
+ well_id = cached['well_id']
+ threshold_sig = cached['threshold_sig']
+ threshold = cached['threshold']
+
+ days_decas = len(temporary_map_day_plus[well_id])
+ else:
+ # Use already loaded values from last iteration
+ cached = device_lookup_cache[device_id]
+ well_id = cached['well_id']
+ threshold_sig = cached['threshold_sig']
+ threshold = cached['threshold']
+ days_decas = len(temporary_map_day_plus[well_id])
+
+ # Get radar value using cached index
+ radar_val = radar_read[2 + device_field_indexes[threshold_sig]]
+
+ # Process data if needed
+ if process_all and radar_val > threshold and deca < days_decas:
+ temporary_map_day_plus[well_id][deca] = radar_val
+ #if well_id == 269:
+ # print(local_time)
+
+ return temporary_map_day_plus
+
+def optimized_radar_processing(my_data, start_time, id2well_id, device_id_2_threshold,
+ device_field_indexes, presence_map, data_type):
+ last_device_id = 0
+
+ # Cache for threshold_sig calculation which is expensive due to dictionary lookups and string splitting
+ threshold_sig_cache = {}
+ field_index_cache = {}
+
+ for radar_read in my_data:
+ local_time = radar_read[0]
+ device_id = radar_read[1]
+
+ # Calculate deca once
+ deca = int((local_time - start_time).total_seconds() / 10)
+
+ # Device changed - update values that depend on device
+ if device_id != last_device_id:
+ last_device_id = device_id
+ well_id = id2well_id[device_id]
+
+ # Calculate days_decas exactly like original
+ if data_type == "raw" or data_type == "all":
+ days_decas = len(presence_map['raw'][well_id])
+ else:
+ days_decas = len(presence_map['presence'][well_id])
+
+ # Calculate threshold_sig with caching
+ if device_id not in threshold_sig_cache:
+ radar_threshold_group_st = device_id_2_threshold[well_id]
+ threshold_sig, threshold = radar_threshold_group_st
+ threshold_sig = threshold_sig.split("_")[0]
+ threshold_sig_cache[device_id] = (threshold_sig, threshold)
+ else:
+ threshold_sig, threshold = threshold_sig_cache[device_id]
+
+ # Calculate field index with caching
+ if threshold_sig not in field_index_cache:
+ field_index = 2 + device_field_indexes[threshold_sig]
+ field_index_cache[threshold_sig] = field_index
+ else:
+ field_index = field_index_cache[threshold_sig]
+ else:
+ # Use values from previous iteration for same device
+ well_id = id2well_id[device_id]
+
+ # Calculate days_decas exactly like original
+ if data_type == "raw" or data_type == "all":
+ days_decas = len(presence_map['raw'][well_id])
+ else:
+ days_decas = len(presence_map['presence'][well_id])
+
+ # Use cached values
+ threshold_sig, threshold = threshold_sig_cache[device_id]
+ field_index = field_index_cache[threshold_sig]
+
+ # Get radar value using cached field index
+ if field_index >= len(radar_read):
+ radar_val = radar_read[-1]
+ else:
+ radar_val = radar_read[field_index]
+
+ # Process presence data
+ if data_type == "presence" or data_type == "z-graph" or data_type == "all" or data_type == "multiple":
+ if radar_val > threshold:
+ if deca < days_decas:
+ presence_map['presence'][well_id][deca] = 1
+
+ # Process raw data if needed
+ if data_type == "raw" or data_type == "all":
+ if deca < days_decas:
+ presence_map['raw'][well_id][deca] = radar_val
+
+ return presence_map
+
+def CompressList(presence_devices_map):
+
+ for key in presence_devices_map:
+ presence_map_list = presence_devices_map[key]
+ presence_map_list_compressed = Compress(presence_map_list)
+ presence_devices_map[key] = presence_map_list_compressed
+
+ return presence_devices_map
+
+def Compress(presence_map_list):
+
+ presence_map_list_compressed = []
+ l = len(presence_map_list)
+ if l > 1:
+ last_data_point = presence_map_list[0]
+ presence_map_list_compressed.append([0, last_data_point])
+ for i in range(1, l):
+ data_point = presence_map_list[i]
+ if data_point != last_data_point:
+ presence_map_list_compressed.append([i - 1, last_data_point])
+ presence_map_list_compressed.append([i, data_point])
+ last_data_point = data_point
+ presence_map_list_compressed.append([i, data_point])
+ return presence_map_list_compressed
+
+def Decompress(pers_in_deka):
+
+ last = pers_in_deka[-1]
+ last_index = 1 + last[1]
+ result = [0] * last_index
+ for points in pers_in_deka:
+ start_deca = points[0]
+ end_deca = points[1]
+ value_deca = points[2]
+ for i in range(start_deca, 1+end_deca):
+ result[i] = value_deca
+ return result
+
+def store_to_file(my_list, filename):
+
+ try:
+ with open(filename, 'w') as f:
+ json.dump(my_list, f, indent=4) # indent for pretty printing
+ print(f"List saved to {filename} using JSON")
+ except IOError:
+ print(f"Error: Could not write to file {filename}")
+ except TypeError as e:
+ print(f"Error: Could not serialize list to JSON. {e}") # e.g. if list contains unsupported types like sets
+
+def find_custom_header(headers, name):
+ """Helper to find a custom header value (case-insensitive name)."""
+ if not headers: return None
+ for header in headers:
+ if header.get('name', '').lower() == name.lower(): return header.get('value')
+ return None
+
+def encode_state(parts):
+ """Joins parts with a pipe and base64 encodes the result."""
+ plain_state = "|".join(map(str, parts))
+ base64_state = base64.b64encode(plain_state.encode('utf-8')).decode('ascii')
+ # Assuming 'logger' is your app's logger instance
+ logger.debug(f"Encoded state: '{plain_state}' -> '{base64_state}'")
+ return base64_state
+
+def decode_state(b64_state):
+ """Decodes a base64 state and splits it by pipe."""
+ if not b64_state: return []
+ try:
+ decoded_plain = base64.b64decode(b64_state).decode('utf-8')
+ parts = decoded_plain.split('|')
+ logger.debug(f"Decoded state: '{b64_state}' -> '{decoded_plain}' -> {parts}")
+ return parts
+ except Exception as e:
+ logger.error(f"Failed to decode client_state '{b64_state}': {e}")
+ return []
+
+def send_telnyx_command(action_path, params, api_key):
+ """
+ Sends a command to the Telnyx Call Control API actions endpoint.
+ This function should REPLACE your existing send_telnyx_command.
+ """
+ if not api_key:
+ logger.error(f"CMDFAIL ('{action_path}'): API_KEY not available.")
+ return None
+
+ ccid = params.get("call_control_id")
+ if not ccid:
+ logger.error(f"CMDFAIL ('{action_path}'): call_control_id missing in params.")
+ return None
+
+ # Correct endpoint construction for V2 actions
+ endpoint = f"{TELNYX_API_BASE_URL}/calls/{ccid}/{action_path}"
+
+ # Body should not contain call_control_id for actions API
+ body = {k: v for k, v in params.items() if k != 'call_control_id'}
+
+ headers = {
+ "Authorization": f"Bearer {api_key}",
+ "Content-Type": "application/json",
+ "Accept": "application/json"
+ }
+
+ logger.info(f"SENDCMD ('{action_path}')")
+ logger.debug(f" Endpoint: POST {endpoint}")
+ logger.debug(f" JSON Payload: {json.dumps(body, indent=2)}")
+
+ try:
+ response = requests.post(endpoint, json=body, headers=headers, timeout=10)
+ response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
+ logger.info(f"CMDOK ('{action_path}'): Telnyx accepted. Status: {response.status_code}")
+ return response.json()
+ except requests.exceptions.HTTPError as e:
+ logger.error(f"CMDFAIL ('{action_path}'): Telnyx rejected. Status: {e.response.status_code}")
+ try:
+ logger.error(f" Telnyx Err Detail: {json.dumps(e.response.json(), indent=2)}")
+ except json.JSONDecodeError:
+ logger.error(f" Raw Err Body: {e.response.text[:500]}")
+ except requests.exceptions.RequestException as e:
+ logger.exception(f"CMDFAIL ('{action_path}'): Network error")
+
+ return None
+
+def StoreToDB(data):
+ event_type = data.get('event_type')
+ sql = ""
+
+ payload_json = json.dumps(data["payload"])
+ call_session_id = data["payload"]['call_session_id']
+ if event_type == "call.initiated":
+ timee = data.get('occurred_at')
+ sql = f"""
+ INSERT INTO public.alarms_voice (
+ index,
+ "time",
+ call_session_id,
+ initiated
+ )
+ VALUES (
+ (SELECT COALESCE(MAX(index), 0) + 1 FROM public.alarms_voice), -- Auto-increment index
+ '{timee}'::timestamptz, -- occurred_at value
+ '{call_session_id}', -- call_session_id value
+ '{payload_json}'
+ ); """
+
+ elif event_type == "call.answered":
+ sql = f"""
+ UPDATE public.alarms_voice
+ SET answered = '{payload_json}'
+ WHERE call_session_id = '{call_session_id}';"""
+ elif event_type == "call.playback.started":
+ sql = f"""
+ UPDATE public.alarms_voice
+ SET playback_started = '{payload_json}'
+ WHERE call_session_id = '{call_session_id}';"""
+ elif event_type == "call.playback.ended":
+ sql = f"""
+ UPDATE public.alarms_voice
+ SET playback_ended = '{payload_json}'
+ WHERE call_session_id = '{call_session_id}';"""
+ elif event_type == "call.hangup":
+ sql = f"""
+ UPDATE public.alarms_voice
+ SET hangup = '{payload_json}'
+ WHERE call_session_id = '{call_session_id}';"""
+
+ if sql != "":
+ with get_db_connection() as conn:
+ with conn.cursor() as cur:
+ print(sql)
+ cur.execute(sql)
+
+
+
+def handle_telnyx_webhook(webhook_data, remote_addr, request_id):
+ """Process Telnyx webhook events"""
+ logger.info(f"Processing Telnyx webhook from {remote_addr}, Request-ID: {request_id}")
+
+ try:
+ data = webhook_data.get('data', {})
+ event_type = data.get('event_type')
+ record_type = data.get('record_type')
+ payload = data.get('payload', {})
+
+ logger.info(f"Event: {event_type}, Record Type: {record_type}")
+
+ if not event_type or not record_type:
+ logger.error("Missing event_type or record_type in webhook data")
+ return False
+
+ call_control_id = payload.get('call_control_id')
+ call_session_id = payload.get('call_session_id')
+
+ # Voice Event Handling
+ if record_type == 'event':
+ logger.info(f"Processing voice event: {event_type}")
+
+ StoreToDB(data)
+
+ if event_type == 'call.initiated':
+ logger.info(f"Call initiated: From: {payload.get('from')}, To: {payload.get('to')}")
+ elif event_type == 'call.answered':
+ logger.info(f"Call answered: From: {payload.get('from')}, To: {payload.get('to')}")
+
+ # Get custom headers and log them
+ custom_headers = payload.get('custom_headers', [])
+ logger.debug(f"Custom headers: {json.dumps(custom_headers)}")
+
+ # Check for audio URL
+ audio_url = find_custom_header(custom_headers, 'X-Audio-Url')
+ tts_payload = find_custom_header(custom_headers, 'X-TTS-Payload')
+
+ logger.info(f"Audio URL: {audio_url}, TTS Payload: {tts_payload}")
+
+ # Play audio if URL is provided
+ if ENABLE_AUDIO_PLAYBACK and audio_url:
+ logger.info(f"Playing audio: {audio_url}")
+ client_state = create_client_state("answered", call_control_id, CLIENT_STATE_PREFIX)
+
+ play_params = {
+ "call_control_id": call_control_id,
+ "client_state": client_state,
+ "audio_url": audio_url
+ }
+
+ result = send_telnyx_command("actions/playback_start", play_params, TELNYX_API_KEY)
+ logger.info(f"Play command result: {result}")
+ return True
+
+ elif tts_payload:
+ logger.info(f"Speaking text: {tts_payload}")
+ client_state = create_client_state("answered", call_control_id, CLIENT_STATE_PREFIX)
+
+ speak_params = {
+ "payload": tts_payload,
+ "voice": DEFAULT_TTS_VOICE,
+ "language": DEFAULT_TTS_LANGUAGE,
+ "call_control_id": call_control_id,
+ "client_state": client_state
+ }
+
+ result = send_telnyx_command("actions/speak", speak_params, TELNYX_API_KEY)
+ logger.info(f"Speak command result: {result}")
+ return True
+
+ else:
+ logger.warning("No audio URL or TTS payload found in call. Hanging up.")
+ hangup_params = {
+ "call_control_id": call_control_id,
+ "client_state": create_client_state("nohdr_hup", call_control_id, CLIENT_STATE_PREFIX)
+ }
+ send_telnyx_command("actions/hangup", hangup_params, TELNYX_API_KEY)
+ return True
+
+ # Handle other voice events
+ elif event_type in ['call.speak.ended', 'call.playback.ended']:
+ status = payload.get('status')
+ ended_event_type = event_type.split('.')[-2]
+ logger.info(f"Call {ended_event_type} ended: Status={status}")
+
+ # Hang up after media finished playing
+ hangup_params = {
+ "call_control_id": call_control_id,
+ "client_state": create_client_state(f"{ended_event_type}_hup", call_control_id, CLIENT_STATE_PREFIX)
+ }
+ send_telnyx_command("actions/hangup", hangup_params, TELNYX_API_KEY)
+ return True
+
+ elif event_type == 'call.hangup':
+ logger.info(f"Call hung up: Cause={payload.get('cause')}")
+ return True
+
+ else:
+ logger.info(f"Other voice event: {event_type}")
+ return True
+
+ # SMS Event Handling
+ elif record_type == 'message':
+ logger.info(f"Processing SMS event: {event_type}")
+ # SMS handling code...
+ return True
+
+ else:
+ logger.warning(f"Unknown record type: {record_type}")
+ return False
+
+ except Exception as e:
+ logger.exception(f"Error in handle_telnyx_webhook: {e}")
+ return False
+
+# Assume these are defined globally or accessible (e.g., from app_args or .env)
+# logger = logging.getLogger(...)
+# ENABLE_AUDIO_PLAYBACK = True / False
+# CLIENT_STATE_PREFIX = "app_state"
+# DEFAULT_TTS_VOICE = "female"
+# DEFAULT_TTS_LANGUAGE = "en-US"
+# TELNYX_API_KEY = "YOUR_API_KEY"
+# DTMF_GATHER_TIMEOUT_SECONDS = 15 # Wait 15 seconds for DTMF input
+
+# Placeholder for your DB function
+# def StoreToDB(data):
+# app_logger.debug(f"Placeholder: Storing to DB: {json.dumps(data)[:100]}") # Use app_logger
+
+# (Your existing find_custom_header, create_client_state, send_telnyx_command should be here)
+# Make sure send_telnyx_command uses app_logger
+
+def handle_telnyx_webhook2(webhook_data, remote_addr, request_id): # Renamed logger to app_logger
+ """Process Telnyx webhook events with IVR logic."""
+ logger.info(f"Processing Telnyx webhook from {remote_addr}, Request-ID: {request_id}")
+
+ try:
+ data = webhook_data.get('data', {})
+ event_type = data.get('event_type')
+ record_type = data.get('record_type')
+ payload = data.get('payload', {})
+
+ logger.info(f"Event: {event_type}, Record Type: {record_type}")
+
+ if not event_type or not record_type:
+ logger.error("Missing event_type or record_type in webhook data")
+ return False # Indicate failure to process
+
+ call_control_id = payload.get('call_control_id')
+ call_session_id = payload.get('call_session_id')
+ # Attempt to decode client_state if present
+ b64_client_state_rcvd = data.get("payload",{}).get("client_state")
+ plain_client_state_rcvd = ""
+ if b64_client_state_rcvd:
+ try:
+ plain_client_state_rcvd = base64.b64decode(b64_client_state_rcvd).decode('utf-8')
+ logger.info(f" Decoded Client State Received: '{plain_client_state_rcvd}'")
+ except Exception as e:
+ logger.warning(f" Could not decode client_state: {b64_client_state_rcvd}, Error: {e}")
+ plain_client_state_rcvd = "undecodable_state"
+
+
+ # Store all events to DB if needed
+ StoreToDB(webhook_data) # Pass the full webhook_data
+
+ # Voice Event Handling
+ if record_type == 'event':
+ logger.info(f"Processing voice event: {event_type}, CCID: {call_control_id}")
+
+ # --- Initial Call Setup ---
+ if event_type == 'call.initiated':
+ logger.info(f" Call initiated: From: {payload.get('from')}, To: {payload.get('to')}")
+ # No action needed here, wait for call.answered
+
+ elif event_type == 'call.answered':
+ logger.info(f" Call answered: From: {payload.get('from')}, To: {payload.get('to')}")
+ custom_headers = payload.get('custom_headers', [])
+ logger.debug(f" Custom headers: {json.dumps(custom_headers)}")
+
+ audio_url = find_custom_header(custom_headers, 'X-Audio-Url')
+ tts_payload = find_custom_header(custom_headers, 'X-TTS-Payload')
+ logger.info(f" X-Audio-Url: {audio_url}, X-TTS-Payload: {tts_payload}")
+
+ # This state means the main message is about to be played.
+ # After it ends, we'll play the options prompt.
+ next_client_state = create_client_state("main_media_played", call_control_id, app_args.client_state_prefix) # Use app_args
+
+ action_taken = False
+ if app_args.enable_audio_playback and audio_url: # Use app_args
+ logger.info(f" -> Playing main audio: {audio_url}")
+ play_params = {"call_control_id": call_control_id, "client_state": next_client_state, "audio_url": audio_url}
+ send_telnyx_command("actions/playback_start", play_params, app_args.api_key) # Use app_args
+ action_taken = True
+ elif tts_payload:
+ logger.info(f" -> Speaking main TTS: {tts_payload}")
+ speak_params = {"payload": tts_payload, "voice": app_args.default_tts_voice, "language": app_args.default_tts_language, "call_control_id": call_control_id, "client_state": next_client_state} # Use app_args
+ send_telnyx_command("actions/speak", speak_params, app_args.api_key) # Use app_args
+ action_taken = True
+
+ if not action_taken:
+ logger.warning(" -> No audio URL or TTS payload for main message. Hanging up.")
+ hangup_params = {"call_control_id": call_control_id, "client_state": create_client_state("no_main_media_hup", call_control_id, app_args.client_state_prefix)}
+ send_telnyx_command("actions/hangup", hangup_params, app_args.api_key)
+
+ # --- Handling End of Main Media Playback ---
+ elif event_type in ['call.speak.ended', 'call.playback.ended']:
+ status = payload.get('status')
+ ended_event_type_root = event_type.split('.')[1] # speak or playback
+
+ logger.info(f" Call {ended_event_type_root} ended: Status={status}, Current Decoded State='{plain_client_state_rcvd}'")
+
+ # Check if the main media just finished playing
+ if plain_client_state_rcvd.startswith(f"{app_args.client_state_prefix}_main_media_played"):
+ logger.info(" -> Main media finished. Playing DTMF options prompt.")
+ options_prompt_tts = "press 0 to repeat the message or press pound to hang up."
+ # This state means the options prompt is playing, and we're waiting for DTMF.
+ # gather_using_speak will trigger call.gather.ended
+ next_client_state = create_client_state("waiting_dtmf", call_control_id, app_args.client_state_prefix)
+
+ gather_params = {
+ "call_control_id": call_control_id,
+ "client_state": next_client_state,
+ "payload": options_prompt_tts,
+ "voice": app_args.default_tts_voice,
+ "language": app_args.default_tts_language,
+ "valid_digits": "0#", # Only accept 0 or #
+ "max_digits": 1, # Expect only one digit
+ "timeout_millis": app_args.dtmf_timeout_seconds * 1000, # N seconds timeout
+ "terminating_digits": "#" # # will also terminate gather immediately
+ }
+ send_telnyx_command("actions/gather_using_speak", gather_params, app_args.api_key)
+
+ elif plain_client_state_rcvd.startswith(f"{app_args.client_state_prefix}_replaying_main_media"):
+ logger.info(" -> Replayed main media finished. Playing DTMF options prompt again.")
+ # Same logic as above for playing options prompt
+ options_prompt_tts = "press 0 to repeat the message or press pound to hang up."
+ next_client_state = create_client_state("waiting_dtmf", call_control_id, app_args.client_state_prefix)
+ gather_params = {
+ "call_control_id": call_control_id, "client_state": next_client_state,
+ "payload": options_prompt_tts, "voice": app_args.default_tts_voice, "language": app_args.default_tts_language,
+ "valid_digits": "0#", "max_digits": 1, "timeout_millis": app_args.dtmf_timeout_seconds * 1000, "terminating_digits": "#"
+ }
+ send_telnyx_command("actions/gather_using_speak", gather_params, app_args.api_key)
+
+ else:
+ logger.warning(f" -> {ended_event_type_root} ended, but client_state ('{plain_client_state_rcvd}') doesn't match expected flow for options. Hanging up.")
+ hangup_params = {"call_control_id": call_control_id, "client_state": create_client_state(f"{ended_event_type_root}_unexpected_hup", call_control_id, app_args.client_state_prefix)}
+ send_telnyx_command("actions/hangup", hangup_params, app_args.api_key)
+
+ # --- Handling DTMF Input Result ---
+ elif event_type == 'call.gather.ended':
+ digits_received = payload.get('digits')
+ gather_status = payload.get('status') # e.g., 'completed_by_terminating_digit', 'timeout', 'call_hangup'
+ logger.info(f" Call Gather Ended: Digits='{digits_received}', Status='{gather_status}', Current Decoded State='{plain_client_state_rcvd}'")
+
+ if plain_client_state_rcvd.startswith(f"{app_args.client_state_prefix}_waiting_dtmf"):
+ if digits_received == "0":
+ logger.info(" -> DTMF '0' received. Replaying main message.")
+ # Replay the original message. We need to fetch it again from custom headers.
+ # This assumes the call.gather.ended payload still contains the original custom_headers.
+ # If not, we might need to store the original TTS/Audio URL in the client_state.
+ # For simplicity, let's assume custom_headers are still available or we re-evaluate.
+ # A more robust way would be to store the main message type/content in the client_state
+ # when transitioning from main_media_played.
+
+ # Let's try to get custom_headers from the current payload.
+ # Telnyx usually includes original call details in subsequent events.
+ custom_headers = payload.get('custom_headers', []) # This might not be reliable for original headers
+
+ # A BETTER APPROACH: Store original media info in client_state or retrieve from DB
+ # For this example, we'll try to re-evaluate based on what might be in custom_headers
+ # of the *call.gather.ended* event, which is NOT guaranteed to be the original ones.
+ # This part needs careful testing or a strategy to pass original media info.
+
+ # Simplified: Assume we need to re-fetch original custom headers if they are not in this payload.
+ # For now, let's just log and assume we'd need a mechanism to get original X-TTS-Payload/X-Audio-Url
+ logger.warning(" -> Replay logic needs access to original X-TTS-Payload/X-Audio-Url. This example will try to use current custom_headers if any, or a default.")
+
+ original_audio_url = find_custom_header(custom_headers, 'X-Audio-Url') # May not be original
+ original_tts_payload = find_custom_header(custom_headers, 'X-TTS-Payload') # May not be original
+
+ next_client_state = create_client_state("replaying_main_media", call_control_id, app_args.client_state_prefix)
+ action_taken = False
+ if app_args.enable_audio_playback and original_audio_url:
+ logger.info(f" -> Replaying audio: {original_audio_url}")
+ play_params = {"call_control_id": call_control_id, "client_state": next_client_state, "audio_url": original_audio_url}
+ send_telnyx_command("actions/playback_start", play_params, app_args.api_key)
+ action_taken = True
+ elif original_tts_payload:
+ logger.info(f" -> Replaying TTS: {original_tts_payload}")
+ speak_params = {"payload": original_tts_payload, "voice": app_args.default_tts_voice, "language": app_args.default_tts_language, "call_control_id": call_control_id, "client_state": next_client_state}
+ send_telnyx_command("actions/speak", speak_params, app_args.api_key)
+ action_taken = True
+
+ if not action_taken:
+ logger.error(" -> Could not find original media to replay. Hanging up.")
+ hangup_params = {"call_control_id": call_control_id, "client_state": create_client_state("replay_fail_hup", call_control_id, app_args.client_state_prefix)}
+ send_telnyx_command("actions/hangup", hangup_params, app_args.api_key)
+
+ elif digits_received == "#" or (gather_status == 'completed_by_terminating_digit' and payload.get('terminating_digit') == '#'):
+ logger.info(" -> DTMF '#' received or terminating digit. Hanging up.")
+ hangup_params = {"call_control_id": call_control_id, "client_state": create_client_state("dtmf_pound_hup", call_control_id, app_args.client_state_prefix)}
+ send_telnyx_command("actions/hangup", hangup_params, app_args.api_key)
+ elif gather_status == 'timeout':
+ logger.info(" -> DTMF gather timed out. Hanging up.")
+ hangup_params = {"call_control_id": call_control_id, "client_state": create_client_state("dtmf_timeout_hup", call_control_id, app_args.client_state_prefix)}
+ send_telnyx_command("actions/hangup", hangup_params, app_args.api_key)
+ else:
+ logger.warning(f" -> Gather ended with unhandled digits '{digits_received}' or status '{gather_status}'. Hanging up.")
+ hangup_params = {"call_control_id": call_control_id, "client_state": create_client_state("dtmf_unhandled_hup", call_control_id, app_args.client_state_prefix)}
+ send_telnyx_command("actions/hangup", hangup_params, app_args.api_key)
+ else:
+ logger.warning(f" -> Gather ended, but client_state ('{plain_client_state_rcvd}') doesn't match waiting_dtmf. Ignoring.")
+
+
+ elif event_type == 'call.hangup':
+ app_logger.info(f" Call Hangup Event: Cause='{payload.get('cause')}', SIPCause='{payload.get('sip_hangup_cause')}', Source='{payload.get('hangup_source')}'")
+ # Call is already over, no command to send.
+
+ # Log other voice events not explicitly handled above for visibility
+ elif event_type not in ['call.initiated', 'call.answered', 'call.speak.ended', 'call.playback.ended', 'call.gather.ended', 'call.hangup', 'call.speak.started', 'call.playback.started']:
+ logger.info(f" Other Voice Event: Type='{event_type}'. Payload: {json.dumps(payload, indent=2)}")
+
+
+ # --- SMS Event Handling (Placeholder from your snippet) ---
+ elif record_type == 'message':
+ logger.info(f"Processing SMS event: {event_type}")
+ # Your existing SMS handling code would go here...
+ # For now, just acknowledge
+ logger.info(" -> SMS ACK (204)")
+ return Response(status=204) # Ensure SMS events are also ACKed
+
+ else:
+ logger.warning(f"Unknown record type: {record_type}")
+ # Acknowledge to prevent retries from Telnyx
+ logger.info(" -> Unknown Record Type ACK (204)")
+ return Response(status=204)
+
+ # If we reached here for a voice event and didn't send a command through send_telnyx_command,
+ # it means we are just acknowledging the event.
+ logger.info(" -> Voice Event Processed (no immediate command sent or command sent async). ACK (204) to Telnyx.")
+ return Response(status=204) # ALWAYS ACK THE WEBHOOK
+
+ except Exception as e:
+ logger.exception(f"Error in handle_telnyx_webhook: {e}")
+ # Still try to ACK Telnyx if possible, but log the error.
+ # Depending on the error, Telnyx might retry if it doesn't get a 2xx.
+ return "Internal Server Error", 500
+
+def handle_telnyx_webhook3(webhook_data, remote_addr, request_id):
+ """
+ Processes Telnyx webhook events with full IVR logic for repeating messages.
+ This function should be added to your well-api.py.
+ """
+ logger.info(f"Processing webhook in handle_telnyx_webhook3 from {remote_addr}, Request-ID: {request_id}")
+
+ # --- ADAPT THIS SECTION to your app's config management ---
+ # This example assumes config values are accessible as global constants or from a dict.
+ # Replace these with your actual config access method (e.g., self.config['...'])
+ config = {
+ 'api_key': TELNYX_API_KEY,
+ 'dtmf_timeout_seconds': 10,
+ 'initial_silence_ms': 500,
+ 'replay_silence_ms': 100,
+ 'default_tts_voice': 'female',
+ 'default_tts_language': 'en-US',
+ 'client_state_prefix': 'well_api_state',
+ 'inbound_greeting': 'Thank you for calling. We will be with you shortly.'
+ }
+ # --- END ADAPTATION SECTION ---
+
+ try:
+ #StoreToDB(webhook_data) # Call your DB storage function first
+
+ data, payload = webhook_data.get('data', {}), webhook_data.get('data', {}).get('payload', {})
+ event_type, record_type, ccid = data.get('event_type'), data.get('record_type'), payload.get('call_control_id')
+ logger.info(f"EVENT '{event_type}' ({record_type})" + (f", CCID: {ccid}" if ccid else ""))
+
+ if record_type != 'event':
+ logger.info(f" -> Non-voice event ('{record_type}') received. Ignoring in this handler.")
+ return True
+
+ b64_client_state = payload.get("client_state")
+ decoded_parts = decode_state(b64_client_state)
+ state_name = decoded_parts[0] if decoded_parts else None
+ if state_name: logger.info(f" State Name Received: '{state_name}'")
+
+ current_api_key = config['api_key']
+
+ # --- State Machine Logic ---
+ if event_type == 'call.answered':
+ if payload.get('direction') == 'incoming':
+ logger.info(" -> Inbound call detected. Playing generic greeting and hanging up.")
+ next_state = encode_state(['INBOUND_GREETING_HUP'])
+ speak_params = {"payload": config['inbound_greeting'], "voice": config['default_tts_voice'], "language": config['default_tts_language'], "call_control_id": ccid, "client_state": next_state}
+ send_telnyx_command("actions/speak", speak_params, current_api_key)
+ else: # Outgoing call
+ audio_url = find_custom_header(payload.get('custom_headers'), 'X-Audio-Url')
+ tts_payload = find_custom_header(payload.get('custom_headers'), 'X-TTS-Payload')
+ media_type = "audio" if audio_url else "tts" if tts_payload else "none"
+ media_value = audio_url or tts_payload
+ if media_value:
+ logger.info(f" -> Outbound call. Playing {config['initial_silence_ms']}ms silence buffer.")
+ next_state = encode_state(['INIT_PLAY_MAIN', media_type, media_value])
+ #send_telnyx_command("actions/play_silence", {"milliseconds": str(config['initial_silence_ms']), "call_control_id": ccid, "client_state": next_state}, current_api_key)
+ else:
+ logger.warning(" -> Outbound call, but no audio/tts payload. Hanging up.")
+ send_telnyx_command("actions/hangup", {"call_control_id": ccid}, current_api_key)
+
+ elif event_type == 'call.playback.ended':
+ if state_name == 'INIT_PLAY_MAIN': # Silence ended
+ logger.info(" -> Silence buffer ended. Playing main message.")
+ _, media_type, media_value = decoded_parts
+ next_state = encode_state(['MAIN_MEDIA_PLAYED', media_type, media_value])
+ if media_type == "audio":
+ send_telnyx_command("actions/playback_start", {"audio_url": media_value, "call_control_id": ccid, "client_state": next_state}, current_api_key)
+ elif media_type == "tts":
+ params = {"payload": media_value, "voice": config['default_tts_voice'], "language": config['default_tts_language'], "call_control_id": ccid, "client_state": next_state}
+ send_telnyx_command("actions/speak", params, current_api_key)
+ elif state_name == 'REPLAY_SILENCE': # Replay silence ended
+ logger.info(" -> Replay silence ended. Replaying main message.")
+ _, media_type, media_value = decoded_parts
+ next_state = encode_state(['REPLAYING_MEDIA', media_type, media_value])
+ if media_type == "audio":
+ send_telnyx_command("actions/playback_start", {"audio_url": media_value, "call_control_id": ccid, "client_state": next_state}, current_api_key)
+ elif media_type == "tts":
+ params = {"payload": media_value, "voice": config['default_tts_voice'], "language": config['default_tts_language'], "call_control_id": ccid, "client_state": next_state}
+ send_telnyx_command("actions/speak", params, current_api_key)
+ elif state_name in ['MAIN_MEDIA_PLAYED', 'REPLAYING_MEDIA']: # Actual audio file ended
+ logger.info(f" -> Main audio playback finished. Playing options menu.")
+ _, media_type, media_value = decoded_parts
+ next_state = encode_state(['WAITING_DTMF', media_type, media_value])
+ options_prompt = "press 0 to repeat the message or press pound to hang up."
+ gather_params = {
+ "payload": options_prompt, "voice": config['default_tts_voice'], "language": config['default_tts_language'],
+ "valid_digits": "0#", "max_digits": 1, "timeout_millis": config['dtmf_timeout_seconds'] * 1000, "terminating_digit": "#",
+ "call_control_id": ccid, "client_state": next_state
+ }
+ send_telnyx_command("actions/gather_using_speak", gather_params, current_api_key)
+ else:
+ logger.warning(f" -> Playback ended with unhandled state '{state_name}'. Hanging up.")
+ send_telnyx_command("actions/hangup", {"call_control_id": ccid}, current_api_key)
+
+ elif event_type == 'call.speak.ended':
+ if state_name in ['MAIN_MEDIA_PLAYED', 'REPLAYING_MEDIA']:
+ logger.info(f" -> Main message TTS finished. Playing options menu.")
+ _, media_type, media_value = decoded_parts
+ next_state = encode_state(['WAITING_DTMF', media_type, media_value])
+ options_prompt = "press 0 to repeat the message or press pound to hang up."
+ gather_params = {
+ "payload": options_prompt, "voice": config['default_tts_voice'], "language": config['default_tts_language'],
+ "valid_digits": "0#", "max_digits": 1, "timeout_millis": config['dtmf_timeout_seconds'] * 1000, "terminating_digit": "#",
+ "call_control_id": ccid, "client_state": next_state
+ }
+ send_telnyx_command("actions/gather_using_speak", gather_params, current_api_key)
+ elif state_name == 'INBOUND_GREETING_HUP':
+ logger.info(" -> Inbound greeting finished. Hanging up.")
+ send_telnyx_command("actions/hangup", {"call_control_id": ccid}, current_api_key)
+ else:
+ logger.warning(f" -> Speak ended with unhandled state '{state_name}'. Hanging up.")
+ send_telnyx_command("actions/hangup", {"call_control_id": ccid}, current_api_key)
+
+ elif event_type == 'call.dtmf.received':
+ digit = payload.get('digit')
+ logger.info(f" DTMF Received: Digit='{digit}'")
+ if digit == '#':
+ logger.info(" -> '#' received. Terminating call immediately.")
+ send_telnyx_command("actions/hangup", {"call_control_id": ccid}, current_api_key)
+
+ elif event_type == 'call.gather.ended':
+ logger.info(f" -> Gather ended. Digits received: '{payload.get('digits')}', Status: '{payload.get('status')}'")
+ if state_name == 'WAITING_DTMF':
+ digits = payload.get('digits')
+ _, media_type, media_value = decoded_parts
+ if digits == "0":
+ logger.info(f" -> '0' pressed. Playing {config['replay_silence_ms']}ms silence before replay.")
+ next_state = encode_state(['REPLAY_SILENCE', media_type, media_value])
+ send_telnyx_command("actions/play_silence", {"milliseconds": str(config['replay_silence_ms']), "call_control_id": ccid, "client_state": next_state}, current_api_key)
+ else:
+ logger.info(" -> Gather ended with non-repeat condition. Hanging up.")
+ send_telnyx_command("actions/hangup", {"call_control_id": ccid}, current_api_key)
+ else:
+ logger.warning(f" -> Gather ended with unhandled state '{state_name}'.")
+
+ elif event_type == 'call.hangup':
+ logger.info(f" Call Hangup Event: Cause='{payload.get('cause')}'")
+ else:
+ logger.info(f" -> Unhandled Voice Event: '{event_type}' with state '{state_name}'.")
+
+ return True # Return app-specific success
+ except Exception as e:
+ logger.exception(f"Error in handle_telnyx_webhook3: {e}")
+ return False
+
+#==================================== ADD FUNCTIONS BEFORE ============================================
+
# Main API class
class WellApi:
def on_get_healthz(self, req, resp):
@@ -10289,6 +13842,24 @@ class WellApi:
# Authentication and authorization
token = req.params.get('token')
user_name = req.params.get('user_name')
+ ps = req.params.get('ps')
+
+ if ps != "" and ps != None:
+ #was token sent in ps field? This allows for token and ps be populated by token or ps
+ user_info = verify_token(ps)
+ if user_info["username"] == user_name:
+ token = ps
+ else:
+ #is this valid password?
+ privileges, user_id = ValidUser(user_name, ps)
+ if privileges == "0":
+ resp.media = package_response("Log-Out", HTTP_401)
+ return
+ else:
+ token = generate_token(user_name)
+
+
+
user_info = verify_token(token)
if user_info == None or user_info["username"] != user_name:
@@ -10299,7 +13870,7 @@ class WellApi:
logger.debug(f"[{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}] - {__name__}.GET_API->{get_function_name}")
privileges = GetPriviledgesOnly(user_name)
- if token and user_name:
+ if (token and user_name) or (token and user_name):
user_info = verify_token(token)
if user_info is None or user_info["username"] != user_name:
resp.media = package_response("Log-Out", HTTP_401)
@@ -10319,8 +13890,8 @@ class WellApi:
elif get_function_name == "devices_list":
st = time.time()
- user_id = req.params.get('user_id')
- privileges = GetPriviledgesOnly(user_id)
+ user_name = req.params.get('user_name')
+ privileges = GetPriviledgesOnly(user_name)
first_s = req.params.get('first')
last_s = req.params.get('last')
@@ -10377,7 +13948,7 @@ class WellApi:
elif get_function_name == "device_add":
blob_data = read_file("edit_device.html")
- device = {'device_id': 0, 'device_mac': '', 'well_id': '', 'description': '', 'location': '', 'close_to': '', 'radar_threshold': '["s3_max",50]', 'temperature_calib': '0.0,1.0,0.0', 'humidity_calib': '0.0,1.0,0.0'}
+ device = {'device_id': 0, 'device_mac': '', 'well_id': '', 'description': '', 'location': '', 'close_to': '', 'radar_threshold': '["s3_max",12]', 'temperature_calib': '0.0,1.0,0.0', 'humidity_calib': '0.0,1.0,0.0'}
blob_data = FillFields(blob_data, device, 1)
resp.content_type = "text/html"
resp.text = blob_data
@@ -10784,6 +14355,9 @@ class WellApi:
#resp.media = package_response(f"Path: /{path}", HTTP_200)
def on_post(self, req, resp, path=""):
+ #ToDo make sure that any read/write data functions are authorized for this user_name
+ global threshold_cache, device_lookup_cache
+
"""Handle POST requests"""
logger.debug(f"on_post called with path: {path}")
logger.debug(f"Request method: {req.method}")
@@ -10792,39 +14366,97 @@ class WellApi:
logger.debug(f"Request headers: {req.headers}")
logger.debug(f"Request content type: {req.content_type}")
+
+ # First, check if this is a Telnyx webhook request
+ is_telnyx_webhook = (
+ req.content_type and 'application/json' in req.content_type and
+ req.headers.get('USER-AGENT') == 'telnyx-webhooks'
+ )
+
+ if is_telnyx_webhook:
+ logger.info("Processing Telnyx webhook request")
+ try:
+ # Read the raw request body
+ raw_body = req.stream.read().decode('utf-8')
+ logger.debug(f"Raw webhook request body: {raw_body}")
+
+ if not raw_body:
+ logger.error("Empty request body received from Telnyx")
+ resp.status = falcon.HTTP_400
+ resp.content_type = falcon.MEDIA_JSON
+ resp.text = json.dumps({"error": "Empty request body"})
+ return
+
+ # Parse JSON
+ webhook_data = json.loads(raw_body)
+ logger.debug(f"Parsed webhook data: {json.dumps(webhook_data)}")
+
+ # Get remote address and request ID
+ remote_addr = req.headers.get('X-REAL-IP') or req.headers.get('X-FORWARDED-FOR') or 'unknown'
+ request_id = req.headers.get("X-Request-Id") or req.headers.get("Telnyx-Request-Id") or req.headers.get("X-CALL-ID") or "N/A"
+
+ # Process the webhook
+ handle_telnyx_webhook3(webhook_data, remote_addr, request_id)
+
+ # Set response status - always acknowledge webhooks with 204 No Content
+ resp.status = falcon.HTTP_204
+ return
+
+ except json.JSONDecodeError as e:
+ logger.error(f"Failed to decode JSON from webhook request body: {e}")
+ resp.status = falcon.HTTP_400
+ resp.content_type = falcon.MEDIA_JSON
+ resp.text = json.dumps({"error": "Invalid JSON payload"})
+ return
+ except Exception as e:
+ logger.exception(f"Error processing webhook: {e}")
+ resp.status = falcon.HTTP_500
+ resp.content_type = falcon.MEDIA_JSON
+ resp.text = json.dumps({"error": "Internal Server Error"})
+ return
+
+ # If we get here, it's not a Telnyx webhook, so process as normal
+ try:
+ # For non-webhook requests, get form data
+ form_data = get_form_data(req)
+ logger.debug(f"Form data: {form_data}")
+
+ except Exception as e:
+ logger.exception(f"Error in on_post: {e}")
+ resp.status = falcon.HTTP_500
+ resp.content_type = falcon.MEDIA_JSON
+ resp.text = json.dumps({"error": "Internal Server Error"})
+
# Get form data using our helper function - but don't read stream again
- form_data = get_form_data(req)
+ #form_data = get_form_data(req)
logger.debug(f"Form data: {form_data}")
- ## Special cases for specific endpoints
- #if path == "users":
- #logger.info("POST request to users endpoint")
- #resp.status = HTTP_201
- #resp.content_type = falcon.MEDIA_JSON
- #resp.text = json.dumps({"id": "new-user-id", "message": "User created"})
- #return
- #elif path == "items":
- #logger.info("POST request to items endpoint")
- #resp.status = HTTP_201
- #resp.content_type = falcon.MEDIA_JSON
- #resp.text = json.dumps({"id": "new-item-id", "message": "Item created"})
- #return
try:
+
# Get basic parameters
function = form_data.get('function')
user_name = form_data.get('user_name')
logger.debug(f"Function: {function}, User: {user_name}")
-
if function != "credentials":
token = form_data.get('token')
+ ps = form_data.get('ps')
- user_info = verify_token(token)
-
- if user_info == None:
- resp.media = package_response("Log-Out", HTTP_401)
- return
+ if ps != "" and ps != None:
+ #was token sent in ps field? This allows for token and ps be populated by token or ps
+ user_info = verify_token(ps)
+ if user_info != None:
+ if user_info["username"] == user_name:
+ token = ps
+ else:
+ #is this valid password?
+ privileges, user_id = ValidUser(user_name, ps)
+ if privileges == "0":
+ resp.media = package_response("Log-Out", HTTP_401)
+ return
+ else:
+ token = generate_token(user_name)
user_info = verify_token(token)
@@ -10861,10 +14493,8 @@ class WellApi:
- if user_name == MASTER_ADMIN and ps == MASTER_PS:
- access_token = generate_token(user_name)
- privileges, user_id = ValidUser(user_name, ps)
- privileges = "-1"
+ if False:
+ pass
else:
#lets check for real
privileges, user_id = ValidUser(user_name, ps)
@@ -10874,7 +14504,16 @@ class WellApi:
else:
access_token = generate_token(user_name)
- token_payload = {'access_token': access_token, 'privileges': privileges, 'user_id': user_id}
+ if privileges == "-1":
+ max_role = -1
+ else:
+ max_role = GetMaxRole(user_name)
+ if "2" in max_role:
+ max_role = 2
+ else:
+ max_role = 1
+
+ token_payload = {'access_token': access_token, 'privileges': privileges, 'user_id': user_id, 'max_role': max_role}
resp.media = package_response(token_payload)
resp.status = falcon.HTTP_200
return
@@ -11197,12 +14836,182 @@ class WellApi:
return
+ elif function == "get_time_deltas":
+ deployment_id = form_data.get('deployment_id')
+ time_zone_s = GetTimeZoneOfDeployment(deployment_id)
+ sensor = form_data.get('sensor')
+ selected_date = form_data.get('date')
+ date_to = form_data.get('to_date')
+ radar_part = ""
+ sensor_data = {}
+ if date_to == None:
+ date_to = selected_date
+
+ start_date = datetime.datetime.strptime(selected_date, '%Y-%m-%d')
+ end_date = datetime.datetime.strptime(date_to, '%Y-%m-%d')
+
+ # Determine direction and swap dates if necessary
+ if start_date > end_date:
+ selected_date, date_to = date_to, selected_date
+
+ device_id = form_data.get('device_id')
+
+ data_type = form_data.get('data_type')
+ epoch_from_utc, _ = GetLocalTimeEpochsForDate(selected_date, time_zone_s) #>= #<
+ _, epoch_to_utc = GetLocalTimeEpochsForDate(date_to, time_zone_s) #>= #<
+
+
+ all_slices = {}
+
+ cleaned_values = {}
+ line_part = ReadSensor(device_id, sensor, epoch_from_utc, epoch_to_utc, data_type, radar_part)
+ st = time.time()
+ cleaned_values = [
+ (line_part[i][0], (line_part[i][0] - line_part[i-1][0]).total_seconds() * 1000)
+ for i in range(1, len(line_part))
+ ]
+ print(time.time()-st)
+
+ if True:
+ # Create CSV content as a string
+ csv_content = "Record_Index,Timestamp,Value,Time_Diff_Seconds,Time_Diff_Milliseconds\n"
+
+ for i in range(len(line_part)):
+ timestamp, value = line_part[i]
+
+ if i == 0:
+ # First record has no previous record to compare
+ time_diff_seconds = 0
+ time_diff_ms = 0
+ else:
+ # Calculate time difference from previous record
+ prev_timestamp = line_part[i-1][0]
+ time_diff = timestamp - prev_timestamp
+ time_diff_seconds = time_diff.total_seconds()
+ time_diff_ms = time_diff_seconds * 1000
+
+ # Format the row
+ row = f"{i},{timestamp.isoformat()},{value},{round(time_diff_seconds, 6)},{round(time_diff_ms, 3)}\n"
+ csv_content += row
+
+ # Write to file
+ with open(f'time_differences_{sensor}_{device_id}.csv', 'w', encoding='utf-8') as f:
+ f.write(csv_content)
+
+ print(f"CSV file 'time_differences_{sensor}_{device_id}.csv' created successfully!")
+
+ line_part_t = [(x[0].timestamp(), x[1]) for x in cleaned_values]
+
+ sensor_data[sensor] = line_part_t
+ dataa = {}
+ all_slices = {}
+ all_slices[device_id] = sensor_data
+ dataa['Function'] = "time_deltas"
+ dataa['all_slices'] = all_slices
+ dataa['time_zone_st'] = time_zone_s
+ dataa['device_id'] = device_id
+ resp.media = package_response(dataa)
+ resp.status = falcon.HTTP_200
+ return
+
+ elif function == "get_sensor_deltas":
+ deployment_id = form_data.get('deployment_id')
+ time_zone_s = GetTimeZoneOfDeployment(deployment_id)
+ sensor = form_data.get('sensor')
+ selected_date = form_data.get('date')
+ date_to = form_data.get('to_date')
+ radar_part = ""
+ sensor_data = {}
+ if date_to == None:
+ date_to = selected_date
+
+ start_date = datetime.datetime.strptime(selected_date, '%Y-%m-%d')
+ end_date = datetime.datetime.strptime(date_to, '%Y-%m-%d')
+
+ # Determine direction and swap dates if necessary
+ if start_date > end_date:
+ selected_date, date_to = date_to, selected_date
+
+ device_id = form_data.get('device_id')
+
+ data_type = form_data.get('data_type')
+ epoch_from_utc, _ = GetLocalTimeEpochsForDate(selected_date, time_zone_s) #>= #<
+ _, epoch_to_utc = GetLocalTimeEpochsForDate(date_to, time_zone_s) #>= #<
+
+
+ all_slices = {}
+
+ cleaned_values = {}
+ line_part = ReadSensor(device_id, sensor, epoch_from_utc, epoch_to_utc, data_type, radar_part)
+ st = time.time()
+ cleaned_values = [
+ (line_part[i][0], (line_part[i][1] - line_part[i-1][1]) / (line_part[i][0] - line_part[i-1][0]).total_seconds())
+ for i in range(1, len(line_part))
+ if (line_part[i][0] - line_part[i-1][0]).total_seconds() > 0
+ and abs((line_part[i][1] - line_part[i-1][1]) / (line_part[i][0] - line_part[i-1][0]).total_seconds()) <= 100
+ ]
+ print(time.time()-st)
+
+ if False:
+ # Create CSV content as a string
+ csv_content = "Record_Index,Timestamp,Value,Time_Diff_Seconds,Time_Diff_Milliseconds\n"
+
+ for i in range(len(line_part)):
+ timestamp, value = line_part[i]
+
+ if i == 0:
+ # First record has no previous record to compare
+ time_diff_seconds = 0
+ time_diff_ms = 0
+ else:
+ # Calculate time difference from previous record
+ prev_timestamp = line_part[i-1][0]
+ time_diff = timestamp - prev_timestamp
+ time_diff_seconds = time_diff.total_seconds()
+ time_diff_ms = time_diff_seconds * 1000
+
+ # Format the row
+ row = f"{i},{timestamp.isoformat()},{value},{round(time_diff_seconds, 6)},{round(time_diff_ms, 3)}\n"
+ csv_content += row
+
+ # Write to file
+ with open(f'time_differences_{sensor}_{device_id}.csv', 'w', encoding='utf-8') as f:
+ f.write(csv_content)
+
+ print(f"CSV file 'time_differences_{sensor}_{device_id}.csv' created successfully!")
+
+ line_part_t = [(x[0].timestamp(), x[1]) for x in cleaned_values]
+
+ sensor_data[sensor] = line_part_t
+ dataa = {}
+ all_slices = {}
+ all_slices[device_id] = sensor_data
+ dataa['Function'] = "time_deltas"
+ dataa['all_slices'] = all_slices
+ dataa['time_zone_st'] = time_zone_s
+ dataa['device_id'] = device_id
+ resp.media = package_response(dataa)
+ resp.status = falcon.HTTP_200
+ return
elif function == "request_single_slice":
deployment_id = form_data.get('deployment_id')
time_zone_s = GetTimeZoneOfDeployment(deployment_id)
selected_date = form_data.get('date')
+ date_to = form_data.get('to_date')
+ if date_to == None:
+ date_to = selected_date
+
+
+ start_date = datetime.datetime.strptime(selected_date, '%Y-%m-%d')
+ end_date = datetime.datetime.strptime(date_to, '%Y-%m-%d')
+
+ # Determine direction and swap dates if necessary
+ if start_date > end_date:
+ selected_date, date_to = date_to, selected_date
+
devices_list = form_data.get('devices_list')
+ radar_details = {}
#devices_list = '[267,560,"?",null,"64B70888F6F0"]'
#devices_list = '[[267,560,"?",null,"64B70888F6F0"],[268,561,"?",null,"64B70888F6F1"]]'
sensor_list_loc = [form_data.get('sensor_list')]
@@ -11215,20 +15024,76 @@ class WellApi:
well_ids_list =list(map(lambda x: x[0], device_details))
data_type = form_data.get('data_type')
- epoch_from_utc, epoch_to_utc = GetLocalTimeEpochsForDate(selected_date, time_zone_s) #>= #<
+ epoch_from_utc, _ = GetLocalTimeEpochsForDate(selected_date, time_zone_s) #>= #<
+ _, epoch_to_utc = GetLocalTimeEpochsForDate(date_to, time_zone_s) #>= #<
+
+ #we need to
+ buckets = ['no', '10s', '1m', '5m', '10m', '15m', '30m', '1h']
+
+ days = (epoch_to_utc - epoch_from_utc) / (60 * 1440)
+
+
+
+
- #epoch_to = '1730592010' #smal sample to test
- radar_part = form_data.get('radar_part')
well_id = well_ids_list[0]
all_slices = {}
- device_id2_mac = {device_details[1]: device_details[4]}
+ radar_part = ""
+ if len(device_details) > 4:
+ device_id2_mac = {device_details[1]: device_details[4]}
+ #epoch_to = '1730592010' #smal sample to test
+ #radar_part = form_data.get('radar_part') we need to find what radar part is configured in device settings
+ radar_part_all = device_details[5]
+ if len(radar_part_all) > 1:
+ radar_part = radar_part_all[0]
+ #we need only column name and not min or max here
+ if "_" in radar_part:
+ radar_parts = radar_part.split("_")
+ radar_part = radar_parts[0]
+ radar_details[device_details[1]] = radar_part_all
for device_id in device_ids_list:
- device_id2_mac
+
sensor_data = {}
for sensor in sensor_list_loc:
st = time.time()
- line_part = ReadSensor(device_id, sensor, epoch_from_utc, epoch_to_utc, data_type, radar_part)
+ if days < 3:
+ line_part = ReadSensor(device_id, sensor, epoch_from_utc, epoch_to_utc, data_type, radar_part)
+ elif days < 14:
+ bucket_size = "1m"
+ line_part = ReadSensor3(device_id, sensor, epoch_from_utc, epoch_to_utc, data_type, radar_part, bucket_size)
+ else:
+ bucket_size = "10m"
+ line_part = ReadSensor3(device_id, sensor, epoch_from_utc, epoch_to_utc, data_type, radar_part, bucket_size)
window = sensor_legal_values[sensor][2]
+
+ if False:
+ # Create CSV content as a string
+ csv_content = "Record_Index,Timestamp,Value,Time_Diff_Seconds,Time_Diff_Milliseconds\n"
+
+ for i in range(len(line_part)):
+ timestamp, value = line_part[i]
+
+ if i == 0:
+ # First record has no previous record to compare
+ time_diff_seconds = 0
+ time_diff_ms = 0
+ else:
+ # Calculate time difference from previous record
+ prev_timestamp = line_part[i-1][0]
+ time_diff = timestamp - prev_timestamp
+ time_diff_seconds = time_diff.total_seconds()
+ time_diff_ms = time_diff_seconds * 1000
+
+ # Format the row
+ row = f"{i},{timestamp.isoformat()},{value},{round(time_diff_seconds, 6)},{round(time_diff_ms, 3)}\n"
+ csv_content += row
+
+ # Write to file
+ with open('time_differences.csv', 'w', encoding='utf-8') as f:
+ f.write(csv_content)
+
+ print("CSV file 'time_differences.csv' created successfully!")
+
#print("@1", time.time() - st)
#first = 3300
#last = 3400
@@ -11244,7 +15109,7 @@ class WellApi:
line_part_t = [(x[0].timestamp(), x[1]) for x in line_part]
st = time.time()
cleaned_values_t = clean_data_pd(line_part_t, window=window, percentile=99)
- cleaned_values = add_boundary_points(cleaned_values_t, time_zone_s)
+ cleaned_values = cleaned_values_t #add_boundary_points(cleaned_values_t, time_zone_s)
#print("@2", time.time() - st)
#Lets add point in minute 0 and minute 1439
@@ -11254,17 +15119,23 @@ class WellApi:
#print("@3", time.time() - st)
sensor_data[sensor] = cleaned_values
- all_slices[device_id2_mac[device_id]] = sensor_data #use MAC instead of device_id, since device is sending data with MAC only
+
+
+ if len(device_details) > 4:
+ all_slices[device_id2_mac[device_id]] = sensor_data #use MAC instead of device_id, since device is sending data with MAC only
+ else:
+ all_slices[device_id] = sensor_data #use MAC instead of device_id, since device is sending data with MAC only
dataa = {}
dataa['Function'] = "single_slicedata"
dataa['devices_list'] = devices_list
dataa['all_slices'] = all_slices
+ dataa['radar_details'] = radar_details
dataa['time_zone_st'] = time_zone_s
dataa['well_id'] = well_id
resp.media = package_response(dataa)
resp.status = falcon.HTTP_200
- #return
+ return
elif function == "get_sensor_bucketed_data_by_room_sensor":
# Inputs:
# user_name and token
@@ -11323,7 +15194,11 @@ class WellApi:
chart_data = []
# example data in each element of devices_list is (266, 559, 'Bathroom', None, '64B70888FAB0', '["s3_max",12]')
for well_id, device_id, location_name, description, MAC, radar_threshold_group_st, close_to in devices_list:
- if location_name == location:
+ loc_and_desc = location_name
+ if description != None and description != "":
+ loc_and_desc = loc_and_desc + " " + description
+
+ if loc_and_desc == location:
line_part = ReadSensor3(device_id, sensor, epoch_from_utc, epoch_to_utc, data_type, radar_part, bucket_size)
window = sensor_legal_values[sensor][2]
line_part_t = []
@@ -11336,8 +15211,6 @@ class WellApi:
if units == "°F":#"America" in time_zone_s:
compressed_readings = CelsiusToFahrenheitList(compressed_readings)
-
-
sensor_data[sensor] = compressed_readings
chart_data.append({'name': location_name, 'data': compressed_readings})
result_dictionary['chart_data'] = chart_data
@@ -11553,7 +15426,7 @@ class WellApi:
sensor_list = sensor_list_loc.split(",")
device_ids_list = [device_id]
well_ids_list = [well_id]
- maps_dates, positions_list = GetDeploymentDatesBoth(deployment_id)
+ maps_dates, positions_list, timezone_s = GetDeploymentDatesBoth(deployment_id)
data_type = "RL"
#epoch_from_utc, epoch_to_utc = GetLocalTimeEpochsForDate(selected_date, time_zone_s) #>= #<
@@ -11669,6 +15542,164 @@ class WellApi:
resp.content_type = "text/html"
resp.text = blob_data
+ return
+ elif function == "get_deployment_j":
+ deployment_id = form_data.get('deployment_id')
+ time_zone_st = GetTimeZoneOfDeployment(deployment_id)
+ date = form_data.get('date')
+ if date == None:
+
+ # Get today's date
+ local_timezone = pytz.timezone(time_zone_st) # Replace with your local timezone
+ date = datetime.datetime.now(local_timezone).strftime('%Y-%m-%d')
+
+ #epoch_from_utc = int(datetime.datetime.strptime(date, "%Y-%m-%d").timestamp())
+ #devices_list, device_ids = GetProximityList(deployment_id, epoch_from_utc)
+
+ dataa = {}
+ dataa['Function'] = "deployment_details"
+ if privileges == "-1":
+ deployment = DeploymentDetails(deployment_id)
+ dataa['deployment_details'] = deployment
+ else:
+ privileges = privileges.split(",")
+ if deployment_id in privileges:
+ deployment = DeploymentDetails(deployment_id)
+ dataa['deployment_details'] = deployment
+
+
+ resp.media = package_response(dataa)
+ resp.status = falcon.HTTP_200
+
+ return
+ elif function == "set_floor_layout":
+ deployment_id = form_data.get('deployment_id')
+ layout = form_data.get('layout')
+
+ if privileges == "-1" or deployment_id in privileges:
+ ok = StoreFloorPlan(deployment_id, layout)
+ payload = {'ok': ok}
+ resp.media = package_response(payload)
+ resp.status = falcon.HTTP_200
+ else:
+ payload = {'ok': 0, 'error': "not allowed"}
+ resp.media = package_response(payload)
+ resp.status = falcon.HTTP_200
+
+ return
+ elif function == "get_floor_layout":
+ deployment_id = form_data.get('deployment_id')
+
+ dataa = {}
+ dataa['Function'] = "deployment_details"
+ if privileges == "-1":
+ layout = GetFloorPlan(deployment_id)
+ dataa['layout'] = layout
+ else:
+ privileges = privileges.split(",")
+ if deployment_id in privileges:
+ layout = GetFloorPlan(deployment_id)
+ dataa['layout'] = layout
+
+
+ resp.media = package_response(dataa)
+ resp.status = falcon.HTTP_200
+
+ return
+ elif function == "get_beneficiary":
+ user_id = form_data.get('user_id')
+ all_beneficiaries = ListBeneficiaries(privileges, user_id)
+ beneficiaries_list = []
+ for beneficiary_temp in all_beneficiaries:
+ beneficiaries_list.append(str(beneficiary_temp[0]))
+
+ dataa = {}
+ dataa['Function'] = "beneficiary_details"
+ if user_id in beneficiaries_list:
+ beneficiary = UserDetails(user_id)
+ #lets remove fields not relevant for beneficiary
+ try:
+ del beneficiary['time_edit']
+ except:
+ pass
+
+ try:
+ del beneficiary['user_edit']
+ except:
+ pass
+
+ try:
+ del beneficiary['access_to_deployments']
+ except:
+ pass
+ dataa['beneficiary_details'] = beneficiary
+ resp.media = package_response(dataa)
+ resp.status = falcon.HTTP_200
+ return
+ elif function == "get_caretaker":
+
+ user_name = form_data.get('user_name')
+
+
+ all_caretakers = ListCaretakers(privileges, user_name)
+ if len(all_caretakers) > 1:
+ user_id = form_data.get('user_id')
+ else:
+ user_id = str(all_caretakers[0][0])
+
+ caretakers_list = []
+ for caretakers_temp in all_caretakers:
+ caretakers_list.append(str(caretakers_temp[0]))
+
+ dataa = {}
+ dataa['Function'] = "caretaker_details"
+ if user_id in caretakers_list:
+ caretaker = UserDetails(user_id)
+ #lets remove fields not relevant for beneficiary
+ try:
+ del caretaker['time_edit']
+ except:
+ pass
+
+ try:
+ del caretaker['user_edit']
+ except:
+ pass
+
+ dataa['caretaker_details'] = caretaker
+ resp.media = package_response(dataa)
+ resp.status = falcon.HTTP_200
+ return
+
+ elif function == "get_device":
+ device_id = form_data.get('device_id')
+ devices = GetVisibleDevices(privileges)
+ dataa = {}
+ dataa['Function'] = "device_details"
+ dataa['device_details'] = []
+ if privileges == "-1":
+ #device_det = GetDeviceDetails(device_id)
+ device_det = GetDeviceDetailsSingle(device_id)
+ if device_det['radar_threshold'] == None or device_det['radar_threshold'] == "":
+ device_det['radar_threshold'] = '["s3_max",12]'
+ dataa['device_details'] = device_det
+ else:
+ devices_list = []
+ for device_id_temp in devices:
+ devices_list.append(str(device_id_temp[0]))
+
+ if device_id in devices_list:
+ device_det = GetDeviceDetailsSingle(device_id)
+ if device_det['radar_threshold'] == None or device_det['radar_threshold'] == "":
+ device_det['radar_threshold'] = '["s3_max",12]'
+
+
+ dataa['device_details'] = device_det
+
+
+ resp.media = package_response(dataa)
+ resp.status = falcon.HTTP_200
+
return
elif function == "request_deployment_map_new":
st = time.time()
@@ -11676,7 +15707,7 @@ class WellApi:
deployment_id = form_data.get('deployment_id')
map_type = form_data.get('map_type')
print(f"$1 ----{time.time() - st}")
- maps_dates, positions_list = GetDeploymentDatesBoth(deployment_id)
+ maps_dates, positions_list, timezone_s = GetDeploymentDatesBoth(deployment_id)
print(f"$2 ----{time.time() - st}")
datee = form_data.get('date')
if maps_dates != []:
@@ -11704,6 +15735,7 @@ class WellApi:
maps_dates.sort(reverse = True)
dataa['maps_dates'] = maps_dates
dataa['device_count'] = len(positions_list)
+ dataa['time_zone'] = timezone_s
dataa['map_type'] = map_type
#MACs_list = GetMACsListSimple(positions_list)
@@ -11804,6 +15836,29 @@ class WellApi:
resp.media = package_response(dataa)
resp.status = falcon.HTTP_200
+ elif function == "get_deployment_details":
+ deployment_id = form_data.get('deployment_id')
+ group_id = form_data.get('group_id')
+ location = form_data.get('location')
+ if location == "0":
+ location = "All"
+ is_fresh = form_data.get('is_fresh')
+ matching_devices = GetMatchingDevicesComplete(privileges, group_id, deployment_id, location)
+ deployment = DeploymentDetails(deployment_id)
+ dataa = {}
+ dataa['Function'] = "devices_report"
+ if len(matching_devices) > 0:
+ dataa['devices'] = matching_devices
+ else:
+ dataa['devices'] = []
+
+ if len(deployment) > 0:
+ dataa['details'] = deployment
+ else:
+ dataa['details'] = {}
+ resp.media = package_response(dataa)
+ resp.status = falcon.HTTP_200
+
elif function == "device_form":
editing_device_id = form_data.get('editing_device_id')
@@ -11830,24 +15885,760 @@ class WellApi:
return
elif function == "get_raw_data":
- container = GetReference("/MAC")
- MAC = req_dict["MAC"][0]
- sensor = req_dict["sensor"][0]
- if "part" in req_dict:
- part = req_dict["part"][0]
- else:
- part = ""
- from_time = req_dict["from_time"][0]
- to_time = req_dict["to_time"][0]
- timezone_str = req_dict["tzone"][0]
- AddToLog("get_raw_data:" + str(MAC) +","+ str(sensor) + "," + str(from_time) + "," + str(to_time) + "," + part+ "," + timezone_str)
- #raw_data = GetRawSensorData(container, MAC, sensor, from_time, to_time, timezone_str)
- raw_data = GetRawSensorDataFromBlobStorage(MAC, sensor, part, from_time, to_time, timezone_str)
+ #container = GetReference("/MAC")
+ #MAC = req_dict["MAC"][0]
+ #sensor = req_dict["sensor"][0]
+ #if "part" in req_dict:
+ #part = req_dict["part"][0]
+ #else:
+ #part = ""
+ #from_time = req_dict["from_time"][0]
+ #to_time = req_dict["to_time"][0]
+ #timezone_str = req_dict["tzone"][0]
+ #AddToLog("get_raw_data:" + str(MAC) +","+ str(sensor) + "," + str(from_time) + "," + str(to_time) + "," + part+ "," + timezone_str)
+ ##raw_data = GetRawSensorData(container, MAC, sensor, from_time, to_time, timezone_str)
+ #raw_data = []#GetRawSensorDataFromBlobStorage(MAC, sensor, part, from_time, to_time, timezone_str)
data_payload = {'raw_data': raw_data}
resp.media = package_response(data_payload)
resp.status = falcon.HTTP_200
return
+ elif function == "get_presence_data":
+
+ deployment_id = form_data.get('deployment_id')
+ device_id_in_s = form_data.get('device_id')
+ device_id_in = None
+ refresh = form_data.get('refresh') == "1"
+
+ if privileges != "-1":
+ privileges_lst = privileges.split(",")
+ if deployment_id not in privileges_lst:
+ data_payload = {}
+ resp.media = package_response(data_payload)
+ resp.status = falcon.HTTP_200
+ return
+
+ filter = int(form_data.get('filter'))
+ ddate = form_data.get('date')
+ ddate = ddate.replace("_","-")
+ to_date = form_data.get('to_date')
+
+ if to_date == None:
+ to_date = ddate
+ else:
+ to_date = to_date.replace("_","-")
+
+ ddate, to_date = ensure_date_order(ddate, to_date)
+
+
+ date_obj = datetime.datetime.strptime(ddate, "%Y-%m-%d")
+ # Subtract one day
+ previous_day = date_obj - timedelta(days=1)
+ # Convert back to string
+ prev_date = previous_day.strftime("%Y-%m-%d")
+
+ data_type = form_data.get('data_type') #all, raw, presence, z-graph
+ if data_type == None or data_type == "":
+ data_type = "presence"
+
+ time_zone_s = GetTimeZoneOfDeployment(deployment_id)
+ timee = LocalDateToUTCEpoch(ddate, time_zone_s)+5 #add so date boundary is avoided
+ devices_list, device_ids = GetProximityList(deployment_id, timee)
+
+ if device_id_in_s != None: #lets remove other devices, since asking for one
+ device_id_in = int(device_id_in_s)
+ device_ids = [id for id in device_ids if id == device_id_in]
+ devices_list = [device for device in devices_list if device[1] == device_id_in]
+
+ time_from_str, _ = GetLocalTimeForDate(ddate, time_zone_s)
+ _, time_to_str = GetLocalTimeForDate(to_date, time_zone_s)
+
+ time_from = datetime.datetime.strptime(time_from_str, '%Y-%m-%d %H:%M:%S%z')
+ time_to = datetime.datetime.strptime(time_to_str, '%Y-%m-%d %H:%M:%S%z')
+ epoch_time = calendar.timegm(time_from.utctimetuple())
+
+ presence_map = {}
+ presence_map["time_start"] = epoch_time
+ presence_map["time_zone"] = time_zone_s
+
+ # Calculate the difference in days
+ days_difference = (time_to - time_from).days
+
+ if data_type == "all" or data_type == "z-graph" or data_type == "multiple":
+
+ # Convert string to datetime object
+ date_obj = datetime.datetime.strptime(time_from_str, "%Y-%m-%d %H:%M:%S%z")
+ # Subtract one day
+ previous_day = date_obj - timedelta(days=1)
+
+ # Format back to string in the same format
+ time_from_z_str = previous_day.strftime("%Y-%m-%d %H:%M:%S%z")
+
+
+ device_id_2_threshold = {}
+ device_id_2_location = {0: "Outside"}
+
+ for details in devices_list:
+
+ well_id, device_id, location_name, description, MAC, radar_threshold_group_st, close_to = details #(266, 559, 'Bathroom', None, '64B70888FAB0', '["s3_max",12]')
+ if radar_threshold_group_st == None:
+ radar_threshold_group_st = '["s3_max",12]' #last value is threshold to s28 composite
+
+ if len(radar_threshold_group_st) > 8:
+ radar_threshold_group = json.loads(radar_threshold_group_st)
+ else:
+ radar_threshold_group = ["s3_max",12]
+
+ print(well_id, radar_threshold_group)
+ device_id_2_location[device_id] = location_name
+ device_id_2_threshold[device_id] = radar_threshold_group
+
+
+ ids_list = []
+ well_ids = []
+ id2well_id = {}
+ radar_fields_of_interest = []
+ device_field_indexes = {}
+ for details in devices_list:
+
+ if device_id_in == None or details[1] == device_id_in:
+ threshold_str = details[5]
+ try:
+ threshold_lst = json.loads(threshold_str)
+ except:
+ threshold_lst = ["s3",12]
+ #threshold_lst = ["s3_max",12]
+
+ radar_field = threshold_lst[0]
+ #since we are getting 10 sec dat, no more need for min or max...
+ radar_field = radar_field.split("_")[0]
+ if radar_field not in radar_fields_of_interest:
+ device_field_indexes[radar_field] = len(radar_fields_of_interest)
+ radar_fields_of_interest.append(radar_field)
+
+ ids_list.append(details[1])
+ id2well_id[details[1]] = details[0]
+ well_ids.append(details[0])
+ presence_map["well_ids"] = well_ids
+
+
+ devices_list_str = ','.join(str(device[1]) for device in devices_list)
+ #sql = get_deployment_radar_only_colapsed_query(devices_list_str, time_from_str, time_to_str, ids_list, radar_fields_of_interest)
+ sql = get_deployment_radar_10sec_snapped_query(devices_list_str, time_from_str, time_to_str, ids_list, radar_fields_of_interest)
+ print(sql)
+ if data_type == "z-graph" or data_type == "all" or data_type == "multiple":
+ #zsql = get_deployment_radar_only_colapsed_query(devices_list_str, time_from_z_str, time_to_str, ids_list, radar_fields_of_interest)
+ zsql = get_deployment_radar_10sec_snapped_query(devices_list_str, time_from_z_str, time_to_str, ids_list, radar_fields_of_interest)
+ print(zsql)
+
+ with get_db_connection() as conn:
+ with conn.cursor() as cur:
+ cur.execute(sql)
+ my_data = None
+ myz_data = None
+
+ my_data = cur.fetchall()
+ if data_type == "z-graph" or data_type == "all" or data_type == "multiple":
+ cur.execute(zsql)
+ myz_data = cur.fetchall()
+
+ if my_data != None:
+
+ device_id_2_threshold = {}
+ device_id_2_location = {0: "Outside"}
+ row_nr_2_device_id = {}
+ cnt = 0
+ row_nr_2_device_id[0] = 0
+
+ #presence_map['longpresence'] and temporary_map_day_plus are similar, except one is used for Z-graph, and another for multiple persons detection
+
+ if data_type == "presence" or data_type == "all" or data_type == "z-graph" or data_type == "multiple":
+ presence_map['presence'] = {}
+ presence_map['longpresence'] = {}
+
+ if data_type == "raw" or data_type == "all":
+ presence_map['raw'] = {}
+
+ for details in devices_list:
+ #(266, 559, 'Bathroom', None, '64B70888FAB0', '["s3_max",12]','')
+ well_id, device_id, location_name, description, MAC, radar_threshold_group_st, close_to = details
+
+ if data_type == "raw" or data_type == "all":
+ zeros_list = [0] * 6 * 1440 * days_difference
+ presence_map['raw'][well_id] = zeros_list
+
+ if data_type == "presence" or data_type == "all" or data_type == "z-graph" or data_type == "multiple":
+ zeros_list = [0] * 6 * 1440 * days_difference
+ presence_map['presence'][well_id] = zeros_list
+
+
+ #presence_map[][well_id] = zeros_list
+ cnt += 1
+ row_nr_2_device_id[cnt] = well_id
+
+ if radar_threshold_group_st == None:
+ radar_threshold_group_st = '["s3",12]' #last value is threshold to s28 composite
+
+ if len(radar_threshold_group_st) > 8:
+ radar_threshold_group = json.loads(radar_threshold_group_st)
+ else:
+ radar_threshold_group = ["s3",12]
+
+ device_id_2_location[well_id] = location_name
+ device_id_2_threshold[well_id] = radar_threshold_group
+
+ start_time_ = my_data[0][0]
+ parsed_time = datetime.datetime.strptime(time_from_str, '%Y-%m-%d %H:%M:%S%z')
+
+ start_time = datetime.datetime(
+ parsed_time.year,
+ parsed_time.month,
+ parsed_time.day,
+ parsed_time.hour - 7, # Adjust for UTC-7
+ parsed_time.minute,
+ parsed_time.second,
+ tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=61200))
+ )
+
+ presence_map = optimized_radar_processing(my_data, start_time_, id2well_id, device_id_2_threshold, device_field_indexes, presence_map, data_type)
+
+ #last_device_id = 0
+ #for radar_read in my_data: #(datetime.datetime(2025, 4, 28, 0, 0, tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=61200))), 559, 6.512857142857143, 6.91, 9.28)
+ #local_time = radar_read[0]
+ #deca = int((local_time - start_time).total_seconds() / 10)
+ #device_id = radar_read[1]
+ #if device_id != last_device_id:
+ #last_device_id = device_id
+ #if data_type == "raw" or data_type == "all":
+ #days_decas = len(presence_map['raw'][id2well_id[device_id]])
+ #else:
+ #days_decas = len(presence_map['presence'][id2well_id[device_id]])
+ #well_id = id2well_id[device_id]
+ #radar_threshold_group_st = device_id_2_threshold[well_id]
+ #threshold_sig, threshold = radar_threshold_group_st
+ #threshold_sig = threshold_sig.split("_")[0]
+
+ #radar_val = radar_read[2+device_field_indexes[threshold_sig]]
+ #if data_type == "presence" or data_type == "z-graph" or data_type == "all" or data_type == "multiple":
+ #if radar_val > threshold:
+ #if deca < days_decas:
+ #presence_map['presence'][id2well_id[device_id]][deca] = 1
+
+ #if data_type == "raw" or data_type == "all":
+ #if deca < days_decas:
+ #presence_map['raw'][id2well_id[device_id]][deca] = radar_val
+
+
+ if myz_data != None:
+ temporary_map_day_plus = {}
+ presence_map['z_graph'] = {}
+ for details in devices_list:
+ #(266, 559, 'Bathroom', None, '64B70888FAB0', '["s3_max",12]','')
+ well_id, device_id, location_name, description, MAC, radar_threshold_group_st, close_to = details
+
+ if data_type == "z-graph" or data_type == "all" or data_type == "multiple":
+ zeros_list = [0] * 6 * 1440 * (days_difference + 1) #+1 is for previous day
+
+ presence_map['z_graph'][well_id] = [] #just place holder
+ temporary_map_day_plus[well_id] = zeros_list
+ presence_map['longpresence'][well_id] = zeros_list #just place holder
+
+
+ parsed_time = datetime.datetime.strptime(time_from_z_str, '%Y-%m-%d %H:%M:%S%z')
+
+ start_time = datetime.datetime(
+ parsed_time.year,
+ parsed_time.month,
+ parsed_time.day,
+ parsed_time.hour - 7, # Adjust for UTC-7
+ parsed_time.minute,
+ parsed_time.second,
+ tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=61200))
+ )
+
+
+ #start_time_ = myz_data[0][0]
+ st = time.time()
+ device_lookup_cache = {}
+ threshold_cache = {}
+ temporary_map_day_plus = optimized_processing(myz_data, start_time, id2well_id, device_id_2_threshold, device_field_indexes, temporary_map_day_plus, data_type)
+
+ if data_type == "all" or data_type == "z-graph" or data_type == "presence" or data_type == "multiple":
+ overlaps_str = GetOverlapps(deployment_id)
+ overlaps_lst = []
+ if overlaps_str != None:
+ if ":" in overlaps_str:
+ overlaps_lst = json.loads(overlaps_str)
+ temporary_map_day_plus = ClearOverlaps(temporary_map_day_plus, overlaps_lst)
+
+ if data_type == "all" or data_type == "z-graph" or data_type == "presence" or data_type == "multiple":
+ for device_id in ids_list:
+ device_id_str = str(device_id)
+
+ if data_type == "presence" or data_type == "all":
+ if filter > 1:
+ #presence_list = filter_short_groups_numpy(presence_map["presence"][id2well_id[device_id]], filter, device_id, ddate+"-"+to_date)
+ #cnt = 0
+ #device_id_str = 524 Kitchen
+ #for item in presence_map["presence"][id2well_id[device_id]]:
+ # if item > 0:
+ # print(cnt, item)
+ # cnt += 1
+
+ #3302 = 1 should not be filtered
+
+ inlist = presence_map["presence"][id2well_id[device_id]]
+ #presence_list = filter_short_groups_c_wc(presence_map["presence"][id2well_id[device_id]], filter, device_id_str, ddate, to_date, time_zone_s, refresh)
+ presence_list = filter_short_groups_c_wc(inlist, filter, device_id_str, ddate, to_date, time_zone_s, refresh)
+
+ #cnt = 0 #here First non 0 is at 12006 That is wrong!
+ #for item in presence_list:
+ # if item > 0:
+ # print(cnt, item)
+ # cnt += 1
+ #presence_listt = filter_short_groupss(presence_map["presence"][id2well_id[device_id]], filter)
+ #if presence_list != presence_listt:
+ # print("stop")
+ if data_type != "presence":
+ #longpresence_list = filter_short_groups_numpy(presence_map["longpresence"][id2well_id[device_id]], filter, device_id, ddate+"-"+to_date)
+ longpresence_list = filter_short_groups_c_wc(presence_map["longpresence"][id2well_id[device_id]], filter, device_id_str, prev_date, to_date, time_zone_s)
+ presence_map["presence"][id2well_id[device_id]] = presence_list
+ if data_type != "presence":
+ presence_map["longpresence"][id2well_id[device_id]] = longpresence_list
+
+ else: #straight decas
+ presence_list = presence_map["presence"][id2well_id[device_id]]
+
+ if data_type != "presence":
+ longpresence_list = presence_map["longpresence"][id2well_id[device_id]]
+
+
+ if data_type == "z-graph":
+ if filter > 1:
+ longpresence_list = filter_short_groups_c_wc(presence_map["longpresence"][id2well_id[device_id]], filter, device_id_str, prev_date, to_date, time_zone_s, refresh)
+ presence_map["longpresence"][id2well_id[device_id]] = longpresence_list
+
+ else: #straight decas
+ longpresence_list = presence_map["longpresence"][id2well_id[device_id]]
+
+
+ if data_type == "z-graph" or data_type == "all" or data_type == "multiple":
+ if filter > 1: #straight decas
+ #presence_list1 = filter_short_high_groups_iterative_analog_orig(temporary_map_day_plus[id2well_id[device_id]], filter)
+ presence_list1 = filter_short_high_groups_iterative_analog(temporary_map_day_plus[id2well_id[device_id]], filter)
+ #if (presence_list1 == presence_list2):
+ # print("OK!")
+ #else:
+ # print("WRONG!")
+ else:
+ presence_list1 = temporary_map_day_plus[id2well_id[device_id]]
+
+ temporary_map_day_plus[id2well_id[device_id]] = presence_list1
+
+
+ if data_type == "z-graph" or data_type == "all" or data_type == "multiple":
+ for device_id in ids_list:
+ #print(device_id_2_threshold[id2well_id[device_id]])
+ presence_list = CreateZGraph(id2well_id[device_id], presence_map["longpresence"][id2well_id[device_id]]) #temporary_map_day_plus[id2well_id[device_id]])
+ presence_map["z_graph"][id2well_id[device_id]] = presence_list
+
+
+ if data_type == "all" or data_type == "multiple":
+ #lets create "multiple" series
+ seen_at_lst, seen_where_list_uf = DetectMultiple(temporary_map_day_plus, overlaps_lst)
+ #here seen_at is straight decas
+ #seen_at = [1 if x >= 2 else 0 for x in seen_at]
+ pers_in_deka = []
+ dekas_in_day = 6 * 1440
+ for i in range(dekas_in_day, len(seen_where_list_uf)):
+ n_pers = seen_where_list_uf[i]
+ pers_in_deka.append(100*len(n_pers))
+
+ seen_at = filter_out_short_highs_iterative(seen_at_lst, filter) #this converts decas into compressed format!
+ seen_at_lst = Decompress(seen_at)
+ pers_in_deka = filter_out_short_same_groups_iterative(pers_in_deka, filter)
+ persons_decompressed = Decompress(pers_in_deka)
+ persons = Compress(persons_decompressed)
+
+ multiple_list = CreateZGraph("multiple", seen_at_lst)
+ presence_map["multiple"] = multiple_list
+ presence_map["persons"] = persons
+
+ if data_type == "z-graph":
+ if "raw" in presence_map:
+ del presence_map["raw"]
+ if "presence" in presence_map:
+ del presence_map["presence"]
+ if "longpresence" in presence_map:
+ del presence_map["longpresence"]
+
+ if data_type == "multiple":
+ if "raw" in presence_map:
+ del presence_map["raw"]
+ if "presence" in presence_map:
+ del presence_map["presence"]
+ if "longpresence" in presence_map:
+ del presence_map["longpresence"]
+ if "z_graph" in presence_map:
+ del presence_map["z_graph"]
+
+ if "presence" in presence_map:
+ presence_map["presence"] = CompressList(presence_map["presence"])
+
+ data_payload = presence_map
+ resp.media = package_response(data_payload)
+ resp.status = falcon.HTTP_200
+ return
+
+ elif function == "get_zgraph_data":
+
+ deployment_id = form_data.get('deployment_id')
+
+ if privileges != "-1":
+ privileges_lst = privileges.split(",")
+ if deployment_id not in privileges_lst:
+ data_payload = {}
+ resp.media = package_response(data_payload)
+ resp.status = falcon.HTTP_200
+ return
+
+ device_id = int(form_data.get('device_id'))
+
+ devices = GetVisibleDevices(privileges)
+
+ if not any(item[0] == device_id for item in devices):
+ data_payload = {}
+ resp.media = package_response(data_payload)
+ resp.status = falcon.HTTP_200
+ return
+
+ filter = int(form_data.get('filter'))
+ ddate = form_data.get('date')
+ ddate = ddate.replace("_","-")
+ to_date = form_data.get('to_date')
+
+ if to_date == None:
+ to_date = ddate
+ else:
+ to_date = to_date.replace("_","-")
+
+ ddate, to_date = ensure_date_order(ddate, to_date)
+ data_type = "z-graph"
+
+ time_zone_s = GetTimeZoneOfDeployment(deployment_id)
+ timee = LocalDateToUTCEpoch(ddate, time_zone_s)+5 #add so date boundary is avoided
+ devices_list, device_ids = GetProximityList(deployment_id, timee)
+
+ time_from_str, _ = GetLocalTimeForDate(ddate, time_zone_s)
+ _, time_to_str = GetLocalTimeForDate(to_date, time_zone_s)
+
+ time_from = datetime.datetime.strptime(time_from_str, '%Y-%m-%d %H:%M:%S%z')
+ time_to = datetime.datetime.strptime(time_to_str, '%Y-%m-%d %H:%M:%S%z')
+ epoch_time = calendar.timegm(time_from.utctimetuple())
+
+ presence_map = {}
+ presence_map["time_start"] = epoch_time
+ presence_map["time_zone"] = time_zone_s
+
+ # Calculate the difference in days
+ days_difference = (time_to - time_from).days
+
+ # Convert string to datetime object
+ date_obj = datetime.datetime.strptime(time_from_str, "%Y-%m-%d %H:%M:%S%z")
+ # Subtract one day
+ previous_day = date_obj - timedelta(days=1)
+
+ # Format back to string in the same format
+ time_from_z_str = previous_day.strftime("%Y-%m-%d %H:%M:%S%z")
+
+
+ device_id_2_threshold = {}
+ device_id_2_location = {0: "Outside"}
+
+ for details in devices_list:
+
+ well_id, device_id, location_name, description, MAC, radar_threshold_group_st, close_to = details #(266, 559, 'Bathroom', None, '64B70888FAB0', '["s3_max",12]')
+
+ if radar_threshold_group_st == None:
+ radar_threshold_group_st = '["s3_max",12]' #last value is threshold to s28 composite
+
+ if len(radar_threshold_group_st) > 8:
+ radar_threshold_group = json.loads(radar_threshold_group_st)
+ else:
+ radar_threshold_group = ["s3_max",12]
+
+ print(well_id, radar_threshold_group)
+
+ device_id_2_location[device_id] = location_name
+ device_id_2_threshold[device_id] = radar_threshold_group
+
+ ids_list = []
+ well_ids = []
+ id2well_id = {}
+ radar_fields_of_interest = []
+ device_field_indexes = {}
+ for details in devices_list:
+ threshold_str = details[5]
+ try:
+ threshold_lst = json.loads(threshold_str)
+ except:
+ threshold_lst = ["s3",12]
+ #threshold_lst = ["s3_max",12]
+
+ radar_field = threshold_lst[0]
+ #since we are getting 10 sec dat, no more need for min or max...
+ radar_field = radar_field.split("_")[0]
+ if radar_field not in radar_fields_of_interest:
+ device_field_indexes[radar_field] = len(radar_fields_of_interest)
+ radar_fields_of_interest.append(radar_field)
+
+ ids_list.append(details[1])
+ id2well_id[details[1]] = details[0]
+ well_ids.append(details[0])
+ presence_map["well_ids"] = well_ids
+
+
+ devices_list_str = ','.join(str(device[1]) for device in devices_list)
+ #sql = get_deployment_radar_only_colapsed_query(devices_list_str, time_from_str, time_to_str, ids_list, radar_fields_of_interest)
+ sql = get_deployment_radar_10sec_snapped_query(devices_list_str, time_from_str, time_to_str, ids_list, radar_fields_of_interest)
+ print(sql)
+ if data_type == "z-graph" or data_type == "all" or data_type == "multiple":
+ #zsql = get_deployment_radar_only_colapsed_query(devices_list_str, time_from_z_str, time_to_str, ids_list, radar_fields_of_interest)
+ zsql = get_deployment_radar_10sec_snapped_query(devices_list_str, time_from_z_str, time_to_str, ids_list, radar_fields_of_interest)
+ print(zsql)
+
+ with get_db_connection() as conn:
+ with conn.cursor() as cur:
+ cur.execute(sql)
+ my_data = None
+ myz_data = None
+
+ my_data = cur.fetchall()
+ if data_type == "z-graph" or data_type == "all" or data_type == "multiple":
+ cur.execute(zsql)
+ myz_data = cur.fetchall()
+
+ if my_data != None:
+
+ device_id_2_threshold = {}
+ device_id_2_location = {0: "Outside"}
+ row_nr_2_device_id = {}
+ cnt = 0
+ row_nr_2_device_id[0] = 0
+
+ #presence_map['longpresence'] and temporary_map_day_plus are similar, except one is used for Z-graph, and another for multiple persons detection
+
+ if data_type == "presence" or data_type == "all" or data_type == "z-graph" or data_type == "multiple":
+ presence_map['presence'] = {}
+ presence_map['longpresence'] = {}
+
+ if data_type == "raw" or data_type == "all":
+ presence_map['raw'] = {}
+
+ for details in devices_list:
+ #(266, 559, 'Bathroom', None, '64B70888FAB0', '["s3_max",12]','')
+ well_id, device_id, location_name, description, MAC, radar_threshold_group_st, close_to = details
+
+ if data_type == "raw" or data_type == "all":
+ zeros_list = [0] * 6 * 1440 * days_difference
+ presence_map['raw'][well_id] = zeros_list
+
+ if data_type == "presence" or data_type == "all" or data_type == "z-graph" or data_type == "multiple":
+ zeros_list = [0] * 6 * 1440 * days_difference
+ presence_map['presence'][well_id] = zeros_list
+
+
+ #presence_map[][well_id] = zeros_list
+ cnt += 1
+ row_nr_2_device_id[cnt] = well_id
+
+ if radar_threshold_group_st == None:
+ radar_threshold_group_st = '["s3",12]' #last value is threshold to s28 composite
+
+ if len(radar_threshold_group_st) > 8:
+ radar_threshold_group = json.loads(radar_threshold_group_st)
+ else:
+ radar_threshold_group = ["s3",12]
+
+ device_id_2_location[well_id] = location_name
+ device_id_2_threshold[well_id] = radar_threshold_group
+
+ start_time_ = my_data[0][0]
+ parsed_time = datetime.datetime.strptime(time_from_str, '%Y-%m-%d %H:%M:%S%z')
+
+ start_time = datetime.datetime(
+ parsed_time.year,
+ parsed_time.month,
+ parsed_time.day,
+ parsed_time.hour - 7, # Adjust for UTC-7
+ parsed_time.minute,
+ parsed_time.second,
+ tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=61200))
+ )
+
+ presence_map = optimized_radar_processing(my_data, start_time_, id2well_id, device_id_2_threshold, device_field_indexes, presence_map, data_type)
+
+ #last_device_id = 0
+ #for radar_read in my_data: #(datetime.datetime(2025, 4, 28, 0, 0, tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=61200))), 559, 6.512857142857143, 6.91, 9.28)
+ #local_time = radar_read[0]
+ #deca = int((local_time - start_time).total_seconds() / 10)
+ #device_id = radar_read[1]
+ #if device_id != last_device_id:
+ #last_device_id = device_id
+ #if data_type == "raw" or data_type == "all":
+ #days_decas = len(presence_map['raw'][id2well_id[device_id]])
+ #else:
+ #days_decas = len(presence_map['presence'][id2well_id[device_id]])
+ #well_id = id2well_id[device_id]
+ #radar_threshold_group_st = device_id_2_threshold[well_id]
+ #threshold_sig, threshold = radar_threshold_group_st
+ #threshold_sig = threshold_sig.split("_")[0]
+
+ #radar_val = radar_read[2+device_field_indexes[threshold_sig]]
+ #if data_type == "presence" or data_type == "z-graph" or data_type == "all" or data_type == "multiple":
+ #if radar_val > threshold:
+ #if deca < days_decas:
+ #presence_map['presence'][id2well_id[device_id]][deca] = 1
+
+ #if data_type == "raw" or data_type == "all":
+ #if deca < days_decas:
+ #presence_map['raw'][id2well_id[device_id]][deca] = radar_val
+
+
+ if myz_data != None:
+ temporary_map_day_plus = {}
+ presence_map['z_graph'] = {}
+ for details in devices_list:
+ #(266, 559, 'Bathroom', None, '64B70888FAB0', '["s3_max",12]','')
+ well_id, device_id, location_name, description, MAC, radar_threshold_group_st, close_to = details
+
+ if data_type == "z-graph" or data_type == "all" or data_type == "multiple":
+ zeros_list = [0] * 6 * 1440 * (days_difference + 1) #+1 is for previous day
+
+ presence_map['z_graph'][well_id] = [] #just place holder
+ temporary_map_day_plus[well_id] = zeros_list
+ presence_map['longpresence'][well_id] = zeros_list #just place holder
+
+
+ parsed_time = datetime.datetime.strptime(time_from_z_str, '%Y-%m-%d %H:%M:%S%z')
+
+ start_time = datetime.datetime(
+ parsed_time.year,
+ parsed_time.month,
+ parsed_time.day,
+ parsed_time.hour - 7, # Adjust for UTC-7
+ parsed_time.minute,
+ parsed_time.second,
+ tzinfo=datetime.timezone(datetime.timedelta(days=-1, seconds=61200))
+ )
+
+
+ #start_time_ = myz_data[0][0]
+ st = time.time()
+ device_lookup_cache = {}
+ threshold_cache = {}
+ temporary_map_day_plus = optimized_processing(myz_data, start_time, id2well_id, device_id_2_threshold, device_field_indexes, temporary_map_day_plus, data_type)
+
+ if data_type == "all" or data_type == "z-graph" or data_type == "presence" or data_type == "multiple":
+ overlaps_str = GetOverlapps(deployment_id)
+ overlaps_lst = []
+ if overlaps_str != None:
+ if ":" in overlaps_str:
+ overlaps_lst = json.loads(overlaps_str)
+ temporary_map_day_plus = ClearOverlaps(temporary_map_day_plus, overlaps_lst)
+
+ if data_type == "all" or data_type == "z-graph" or data_type == "presence" or data_type == "multiple":
+ for device_id in ids_list:
+ device_id_str = str(device_id)
+ if data_type == "presence" or data_type == "all" or data_type == "z-graph":
+ if filter > 1:
+ #presence_list = filter_short_groups_numpy(presence_map["presence"][id2well_id[device_id]], filter, device_id, ddate+"-"+to_date)
+ presence_list = filter_short_groups_c_wc(presence_map["presence"][id2well_id[device_id]], filter, device_id_str, ddate, to_date, time_zone_s)
+ #presence_listt = filter_short_groupss(presence_map["presence"][id2well_id[device_id]], filter)
+ #if presence_list != presence_listt:
+ # print("stop")
+ if data_type != "presence":
+ #longpresence_list = filter_short_groups_numpy(presence_map["longpresence"][id2well_id[device_id]], filter, device_id, ddate+"-"+to_date)
+ longpresence_list = filter_short_groups_c_wc(presence_map["longpresence"][id2well_id[device_id]], filter, device_id_str, prev_date, to_date, time_zone_s)
+ #longpresence_listt = filter_short_groupss(presence_map["longpresence"][id2well_id[device_id]], filter)
+ #if longpresence_list != longpresence_listt:
+ # print("stop")
+ # store_to_file(presence_map["longpresence"][id2well_id[device_id]], "test_list")
+ presence_map["presence"][id2well_id[device_id]] = presence_list
+ if data_type != "presence":
+ presence_map["longpresence"][id2well_id[device_id]] = longpresence_list
+
+ else: #straight decas
+ presence_list = presence_map["presence"][id2well_id[device_id]]
+
+ if data_type != "presence":
+ longpresence_list = presence_map["longpresence"][id2well_id[device_id]]
+
+
+
+ if data_type == "z-graph" or data_type == "all" or data_type == "multiple":
+ if filter > 1: #straight decas
+ presence_list1 = filter_short_high_groups_iterative_analog(temporary_map_day_plus[id2well_id[device_id]], filter)
+ else:
+ presence_list1 = temporary_map_day_plus[id2well_id[device_id]]
+
+ temporary_map_day_plus[id2well_id[device_id]] = presence_list1
+
+
+ if data_type == "z-graph" or data_type == "all" or data_type == "multiple":
+ for device_id in ids_list:
+ #print(device_id_2_threshold[id2well_id[device_id]])
+ presence_list = CreateZGraph(id2well_id[device_id], presence_map["longpresence"][id2well_id[device_id]]) #temporary_map_day_plus[id2well_id[device_id]])
+ presence_map["z_graph"][id2well_id[device_id]] = presence_list
+
+
+ if data_type == "all" or data_type == "multiple":
+ #lets create "multiple" series
+ seen_at_lst, seen_where_list_uf = DetectMultiple(temporary_map_day_plus, overlaps_lst)
+ #here seen_at is straight decas
+ #seen_at = [1 if x >= 2 else 0 for x in seen_at]
+ pers_in_deka = []
+ dekas_in_day = 6 * 1440
+ for i in range(dekas_in_day, len(seen_where_list_uf)):
+ n_pers = seen_where_list_uf[i]
+ pers_in_deka.append(100*len(n_pers))
+
+ seen_at = filter_out_short_highs_iterative(seen_at_lst, filter) #this converts decas into compressed format!
+ seen_at_lst = Decompress(seen_at)
+ pers_in_deka = filter_out_short_same_groups_iterative(pers_in_deka, filter)
+ persons_decompressed = Decompress(pers_in_deka)
+ persons = Compress(persons_decompressed)
+
+ multiple_list = CreateZGraph("multiple", seen_at_lst)
+ presence_map["multiple"] = multiple_list
+ presence_map["persons"] = persons
+
+ if data_type == "z-graph":
+ if "raw" in presence_map:
+ del presence_map["raw"]
+ if "presence" in presence_map:
+ del presence_map["presence"]
+ if "longpresence" in presence_map:
+ del presence_map["longpresence"]
+
+ if data_type == "multiple":
+ if "raw" in presence_map:
+ del presence_map["raw"]
+ if "presence" in presence_map:
+ del presence_map["presence"]
+ if "longpresence" in presence_map:
+ del presence_map["longpresence"]
+ if "z_graph" in presence_map:
+ del presence_map["z_graph"]
+
+ if "presence" in presence_map:
+ presence_map["presence"] = CompressList(presence_map["presence"])
+
+ data_payload = presence_map
+ resp.media = package_response(data_payload)
+ resp.status = falcon.HTTP_200
+ return
+
elif function == "get_candle_data":
container = GetReference("/MAC")
MAC = req_dict["MAC"][0]
@@ -11889,18 +16680,29 @@ class WellApi:
result_list = []
first_s = form_data.get('first')
last_s = form_data.get('last')
-
- try:
- first = int(first_s)
- except ValueError:
- first = 0
-
- try:
- last = int(last_s)
- except ValueError:
- last = 1000000
-
user_id = form_data.get('user_id')
+ first = 0
+ last = 1000000
+
+ try:
+ if first_s != None:
+ first = int(first_s)
+ except ValueError:
+ pass
+
+ try:
+ if last_s != None:
+ last = int(last_s)
+ except ValueError:
+ pass
+
+ #user_id = form_data.get('user_id')
+ if user_id == "" or user_id == None:
+ #user_id = GetUserId(user_name)
+ privileges, user_id = GetPriviledgesAndUserId(user_name)
+ else:
+ privileges = GetPriviledgesOnly(user_name)
+
all_deployments = ListDeployments(privileges, user_id)
cnt = 0
@@ -11917,13 +16719,120 @@ class WellApi:
resp.media = package_response(payload)
resp.status = falcon.HTTP_200
return
+ elif function == "device_list":
+ result_list = []
+ first_s = form_data.get('first')
+ last_s = form_data.get('last')
+
+ try:
+ first = int(first_s)
+ except ValueError:
+ first = 0
+
+ try:
+ last = int(last_s)
+ except ValueError:
+ last = 1000000
+
+ #user_id = form_data.get('user_id')
+
+ devices = GetVisibleDevices(privileges)
+
+ payload = {'result_list': devices}
+ resp.media = package_response(payload)
+ resp.status = falcon.HTTP_200
+ return
+
+ elif function == "device_list_by_deployment":
+ result_list = []
+ first_s = form_data.get('first')
+ last_s = form_data.get('last')
+ deployment_id = form_data.get('deployment_id')
+ try:
+ first = int(first_s)
+ except ValueError:
+ first = 0
+
+ try:
+ last = int(last_s)
+ except ValueError:
+ last = 1000000
+
+
+ if privileges == "-1":
+ devices = GetVisibleDevices(deployment_id)
+ else:
+ privileges = privileges.split(",")
+ if deployment_id in privileges:
+ devices = GetVisibleDevices(deployment_id)
+
+ payload = {'result_list': devices}
+ resp.media = package_response(payload)
+ resp.status = falcon.HTTP_200
+ return
+
+ elif function == "device_list_4_gui":
+ result_list = []
+ deploymentData = []
+ deviceData = []
+ macs_list = []
+ user_id = GetUserId(user_name)
+ all_deployments = ListDeployments(privileges, user_id)
+ #{'deployment_id': 21, 'beneficiary_id': 25, 'caretaker_id': 1, 'owner_id': 1, 'installer_id': 1, 'address_street': '661 Encore Way', 'address_city': 'San Jose', 'address_zip': '95134', 'address_state': 'CA', 'address_country': 'USA', 'devices': '["64B70888FAB0","64B70888F860","64B70888F6F0","64B708896BDC","64B708897428","64B70888FA84","64B70889062C"]', 'wifis': '', 'persons': 1, 'gender': 1, 'race': 1, 'born': 1940, 'pets': 0, 'time_zone': 'America/Los_Angeles'}
+ MAC2Deployment = {}
+
+ for deployment in all_deployments:
+ beneficiary_id = deployment['beneficiary_id']
+ user = GetNameFromUserId(beneficiary_id)
+ name = f"{user[1]} {user[2]}"
+ deploymentData.append({'deployment_id': str(deployment['deployment_id']), 'name': name})
+ devices = deployment['devices']
+ if devices != None:
+
+ devices_list = ToList(devices)
+ for device in devices_list:
+ macs_list.append(device)
+ MAC2Deployment[device] = deployment['deployment_id']
+ #deviceData.append({'well_id': device[0], 'mac': device[1]})
+
+ deployment_id_list = []
+ deviceData = []
+
+ #row_data = [device_id, well_id, mac, last_message_epoch, location_names[location_id], description, deployment_ids[cnt][0]]
+
+ with get_db_connection() as conn:
+ with conn.cursor() as cur:
+ device_ids, device_list = MACsToWellIds(cur, macs_list)
+
+
+ for device in device_list:
+ if MAC2Deployment[device[4]] != "":
+ deviceData.append({'well_id': device[0], 'mac': device[4], 'room_name': device[2], 'deployment_id': MAC2Deployment[device[4]]})
+
+ #deploymentData = [{'deployment_id': '21', 'name': 'Robert Zmrzli House'}, {'deployment_id': '36', 'name': 'Fred Zmrzli Apartment'}]
+ #deviceData = [{ 'well_id': '300', 'mac': '64B70888F6F0', 'room_name': 'Living Room', 'deployment_id': '21' }, { 'well_id': '301', 'mac': '64B70888F6F1', 'room_name': 'Bathroom Main', 'deployment_id': '36' }]
+
+ payload = {
+ 'status': "success", 'deploymentData': deploymentData, 'deviceData': deviceData
+ }
+
+ logger.debug(f"device_list_4_gui------ {payload} ------------------------------------------")
+
+ resp.media = package_response(payload)
+ resp.status = falcon.HTTP_200
+ return
elif function == "caretaker_form":
editing_user_id = form_data.get('editing_user_id')
email = form_data.get('email')
+ user_id = form_data.get('user_id')
+ if "@" not in email:
+ resp.media = package_response("Missing or illegal 'email' parameter", HTTP_400)
+ return
- if "@" in email:
- ok = StoreCaretaker2DB(form_data, editing_user_id)
+ print(privileges)
+ if privileges == "-1":
+ ok = StoreCaretaker2DB(form_data, editing_user_id, user_id)
if ok == 1:
payload = {'ok': ok}
resp.media = package_response(payload)
@@ -11934,10 +16843,14 @@ class WellApi:
resp.media = package_response(payload)
resp.status = falcon.HTTP_200
return
- else:
- resp.media = package_response("Missing or illegal 'email' parameter", HTTP_400)
+
+ elif "-1" in privileges:
+ payload = {'ok': 0, 'error': "Not allowed!"}
+ resp.media = package_response(payload)
+ resp.status = falcon.HTTP_200
return
+
elif function == "caretaker_delete":
if privileges == "-1":
ok = DeleteRecordFromDB(form_data)
@@ -11965,7 +16878,7 @@ class WellApi:
last = 1000000
if privileges == "-1":
- all_caretakers = ListCaretakers()
+ all_caretakers = ListCaretakers(privileges, user_name)
cnt = 0
@@ -11976,7 +16889,18 @@ class WellApi:
result_list.append(caretaker_min_object)
if cnt > last:
break
+ elif "-1" in privileges:
+ all_caretakers = ListCaretakers(privileges, user_name)
+ cnt = 0
+
+ for caretaker in all_caretakers:
+ cnt += 1
+ if cnt >= first:
+ caretaker_min_object = {"user_id": caretaker[0], "email": caretaker[3], "first_name": caretaker[5], "last_name": caretaker[6]}
+ result_list.append(caretaker_min_object)
+ if cnt > last:
+ break
payload = {'result_list': result_list}
resp.media = package_response(payload)
resp.status = falcon.HTTP_200
@@ -11985,15 +16909,16 @@ class WellApi:
elif function == "beneficiary_form":
editing_user_id = form_data.get('editing_user_id')
email = form_data.get('email')
+ user_id = GetUserId(user_name)
if "@" in email:
- ok = StoreBeneficiary2DB(form_data, editing_user_id)
+ ok, error_string = StoreBeneficiary2DB(form_data, editing_user_id, user_id)
if ok == 1:
payload = {'ok': ok}
resp.media = package_response(payload)
resp.status = falcon.HTTP_200
return
else:
- payload = {'ok': ok, 'error': debug_string}
+ payload = {'ok': ok, 'error': error_string}
resp.media = package_response(payload)
resp.status = falcon.HTTP_200
return
@@ -12048,6 +16973,119 @@ class WellApi:
elif function == "activities_report_details":
deployment_id = form_data.get('deployment_id')
+
+ timezone_str = GetTimeZoneOfDeployment(deployment_id)
+ filterr = form_data.get('filter')
+ if filterr == None:
+ filterr = 6
+ else:
+ filterr = int(filterr)
+
+ refresh = form_data.get('refresh') == "1"
+ ddate = current_date_at_tz(timezone_str)
+ timee = LocalDateToUTCEpoch(ddate, timezone_str)+5 #add so date boundary is avoided
+ devices_list, device_ids = GetProximityList(deployment_id, timee)
+
+ #Here we need to add per day: (all based on Z-graph data!)
+ #Bathroom visits number
+ #Bathroom time spent
+ #Sleep weakes number (As breaks in Z-graph indicates in 10PM to 9AM period)
+ #Sleep length (For now add all times seen in bedroom)
+ #Kitchen visits number
+ #Kitchen time spent
+ #Most frequented room visits number
+ #Most frequented room time spent
+
+ #Lets find device_id of bathroom sensor
+
+
+ bathroom_device_id, location_ba, bathroom_well_id = FindDeviceByRole(deployment_id, ["Bathroom Main", "Bathroom", "Bathroom Guest"])
+ bedroom_device_id, location_be, bedroom_well_id = FindDeviceByRole(deployment_id, ["Bedroom Master", "Bedroom", "Bedroom Guest"])
+ kitchen_device_id, location_ke, kitchen_well_id = FindDeviceByRole(deployment_id, ["Kitchen"])
+ most_present_device_id, location_ot, most_present_well_id = FindDeviceByRole(deployment_id, []) #this will find most_present (as defined in other filed of device record)
+
+ if isinstance(location_ot, int):
+ other_location = location_names[location_ot]
+ else:
+ other_location = location_ot
+
+ #weekly
+ week_dates = get_week_days_and_dates(7, timezone_str)
+ month_dates = get_week_days_and_dates(30, timezone_str)
+ six_months_dates = get_week_days_and_dates(180, timezone_str)
+
+ other_color = Loc2Color[other_location][0]
+ rgb_string = f"rgb({other_color[0]}, {other_color[1]}, {other_color[2]})"
+
+ rooms_reports = [("Bathroom", "blue", bathroom_device_id, bathroom_well_id), ("Bedroom", "green", bedroom_device_id, bedroom_well_id), ("Kitchen", "red", kitchen_device_id, kitchen_well_id), (other_location, rgb_string, most_present_device_id, most_present_well_id)]
+
+ six_months_report = []
+ for room_details in rooms_reports:
+ device_id = room_details[2]
+ if device_id > 0:
+
+ well_id = room_details[3]
+ radar_threshold_group_st = {device[1]: device[5] for device in devices_list}[device_id]
+ room = {"name": room_details[0],"color": room_details[1]}
+ data = []
+
+ for day_activity in six_months_dates:
+ datee = day_activity[0]
+ hours, events_count = GetActivities(device_id, well_id, datee, filterr, refresh, timezone_str, radar_threshold_group_st)
+
+ if hours > 18:
+ print("Too long 6m!!!", device_id, well_id, datee, filterr, refresh, timezone_str, radar_threshold_group_st)
+
+ data_record = { "title": str(day_activity[2]), "events": events_count, "hours": hours}
+ data.append(data_record)
+
+ room["data"] = data
+ six_months_report.append(room)
+
+ weekly_report = []
+ for room_details in rooms_reports:
+ device_id = room_details[2]
+ if device_id > 0:
+ well_id = room_details[3]
+ radar_threshold_group_st = {device[1]: device[5] for device in devices_list}[device_id]
+ room = {"name": room_details[0],"color": room_details[1]}
+ data = []
+
+ for day_activity in week_dates:
+ datee = day_activity[0]
+ hours, events_count = GetActivities(device_id, well_id, datee, filterr, refresh, timezone_str, radar_threshold_group_st)
+ data_record = { "title": day_activity[1], "events": events_count, "hours": hours}
+ data.append(data_record)
+
+ room["data"] = data
+ weekly_report.append(room)
+
+ monthly_report = []
+ for room_details in rooms_reports:
+ device_id = room_details[2]
+ if device_id > 0:
+ well_id = room_details[3]
+ radar_threshold_group_st = {device[1]: device[5] for device in devices_list}[device_id]
+ room = {"name": room_details[0],"color": room_details[1]}
+ data = []
+
+ for day_activity in month_dates:
+ datee = day_activity[0]
+ hours, events_count = GetActivities(device_id, well_id, datee, filterr, refresh, timezone_str, radar_threshold_group_st)
+ #if datee == "2025-05-20" and device_id == 572:
+ # print(hours)
+ if hours > 18:
+ print("Too long m!!!", device_id, well_id, datee, filterr, refresh, timezone_str, radar_threshold_group_st)
+
+ data_record = { "title": str(day_activity[2]), "events": events_count, "hours": hours}
+ data.append(data_record)
+
+ room["data"] = data
+ monthly_report.append(room)
+
+
+
+
result_dictionary = {
"alert_text": "No alert",
"alert_color": "bg-green-100 text-green-700",
@@ -12057,9 +17095,9 @@ class WellApi:
"rooms": [
{
"name": "Bathroom",
- "color": "purple",
+ "color": "blue",
"data": [
- { "title": "Monday", "events": 186, "hours": 80 },
+ { "title": "Monday", "events": 186, "hours": 80.56 },
{ "title": "Tuesday", "events": 305, "hours": 200 },
{ "title": "Wednesday", "events": 237, "hours": 120 },
{ "title": "Thursday", "events": 73, "hours": 190 },
@@ -12070,7 +17108,7 @@ class WellApi:
},
{
"name": "Bedroom",
- "color": "#3b82f6",
+ "color": "green",
"data": [
{ "title": "Monday", "events": 186, "hours": 80 },
{ "title": "Tuesday", "events": 305, "hours": 200 },
@@ -12083,7 +17121,7 @@ class WellApi:
},
{
"name": "Kitchen",
- "color": "orange",
+ "color": "red",
"data": [
{ "title": "Monday", "events": 186, "hours": 80 },
{ "title": "Tuesday", "events": 305, "hours": 200 },
@@ -12096,7 +17134,7 @@ class WellApi:
},
{
"name": "Other",
- "color": "hotpink",
+ "color": "yellow",
"data": [
{ "title": "Monday", "events": 186, "hours": 80 },
{ "title": "Tuesday", "events": 305, "hours": 200 },
@@ -12298,6 +17336,12 @@ class WellApi:
]
}
+ result_dictionary["chart_data"][0]["rooms"] = weekly_report
+ result_dictionary["chart_data"][1]["rooms"] = monthly_report
+ result_dictionary["chart_data"][2]["rooms"] = six_months_report
+
+
+
payload = result_dictionary #{'result_dictionary': result_dictionary}
resp.media = package_response(payload)
resp.status = falcon.HTTP_200
@@ -12307,6 +17351,9 @@ class WellApi:
elif function == "dashboard_list":
# works in UTC only
+
+ logger.error(f"------------------------------- dashboard_list ------------------------------------------")
+
caretaker = user_name
#date_s = form_data.get('date')
time_s = form_data.get('time')
@@ -12326,24 +17373,28 @@ class WellApi:
for deployment_id, first_name, last_name in deployments_list:
details = GetSensorsDetailsFromDeployment(deployment_id, date_s, filterr)
- details["units"] = "°C"
- if "America" in details["time_zone"]:
- details["temperature"] = CelsiusToFahrenheit(details["temperature"])
- details["units"] = "°F"
- devices_list, device_ids = GetProximityList(deployment_id, date_s)
- # convert dates back to UTC
- #details['bathroom_at'] = pytz.timezone(details['time_zone']).localize(datetime.datetime.strptime(details['bathroom_at'], "%Y-%m-%dT%H:%M:%S")).astimezone(pytz.UTC).strftime("%Y-%m-%dT%H:%M:%S")
- #details['kitchen_at'] = pytz.timezone(details['time_zone']).localize(datetime.datetime.strptime(details['bathroom_at'], "%Y-%m-%dT%H:%M:%S")).astimezone(pytz.UTC).strftime("%Y-%m-%dT%H:%M:%S")
- #details['bedroom_at'] = pytz.timezone(details['time_zone']).localize(datetime.datetime.strptime(details['bedroom_at'], "%Y-%m-%dT%H:%M:%S")).astimezone(pytz.UTC).strftime("%Y-%m-%dT%H:%M:%S")
- #details['last_detected_time'] = pytz.timezone(details['time_zone']).localize(datetime.datetime.strptime(details['last_detected_time'], "%Y-%m-%dT%H:%M:%S")).astimezone(pytz.UTC).strftime("%Y-%m-%dT%H:%M:%S")
- location_list = []
- for room_details in devices_list:
- well_id, device_id, location_name, description, MAC, radar_threshold_group_st, close_to = room_details #(266, 559, 'Bathroom', None, '64B70888FAB0', '["s3_max",12]')
- location_list.append(location_name)
+ if details != {}:
- details["deployment_id"] = deployment_id
- details["location_list"] = location_list
- result_list.append(details)
+ details["units"] = "°C"
+ if "America" in details["time_zone"]:
+ details["temperature"] = CelsiusToFahrenheit(details["temperature"])
+ details["units"] = "°F"
+ devices_list, device_ids = GetProximityList(deployment_id, date_s)
+ # convert dates back to UTC
+ #details['bathroom_at'] = pytz.timezone(details['time_zone']).localize(datetime.datetime.strptime(details['bathroom_at'], "%Y-%m-%dT%H:%M:%S")).astimezone(pytz.UTC).strftime("%Y-%m-%dT%H:%M:%S")
+ #details['kitchen_at'] = pytz.timezone(details['time_zone']).localize(datetime.datetime.strptime(details['bathroom_at'], "%Y-%m-%dT%H:%M:%S")).astimezone(pytz.UTC).strftime("%Y-%m-%dT%H:%M:%S")
+ #details['bedroom_at'] = pytz.timezone(details['time_zone']).localize(datetime.datetime.strptime(details['bedroom_at'], "%Y-%m-%dT%H:%M:%S")).astimezone(pytz.UTC).strftime("%Y-%m-%dT%H:%M:%S")
+ #details['last_detected_time'] = pytz.timezone(details['time_zone']).localize(datetime.datetime.strptime(details['last_detected_time'], "%Y-%m-%dT%H:%M:%S")).astimezone(pytz.UTC).strftime("%Y-%m-%dT%H:%M:%S")
+ location_list = []
+ for room_details in devices_list:
+ well_id, device_id, location_name, description, MAC, radar_threshold_group_st, close_to = room_details #(266, 559, 'Bathroom', None, '64B70888FAB0', '["s3_max",12]')
+ if description == None or description == "":
+ location_list.append(location_name)
+ else:
+ location_list.append(location_name + " " + description)
+ details["deployment_id"] = deployment_id
+ details["location_list"] = location_list
+ result_list.append(details)
payload = {'result_list': result_list}
resp.media = package_response(payload)
@@ -12367,7 +17418,7 @@ class WellApi:
result_list = []
- details = GetSensorsDetailsFromDeployment(deployment_id, date_s, filterr)
+ details = GetSensorsDetailsFromDeployment(deployment_id, date_s, filterr, False)
details["units"] = "°C"
if "America" in details["time_zone"]:
details["temperature"] = CelsiusToFahrenheit(details["temperature"])
@@ -12376,25 +17427,194 @@ class WellApi:
location_list = []
for room_details in devices_list:
well_id, device_id, location_name, description, MAC, radar_threshold_group_st, close_to = room_details #(266, 559, 'Bathroom', None, '64B70888FAB0', '["s3_max",12]')
- location_list.append(location_name)
-
+ if description == None or description == "":
+ location_list.append(location_name)
+ else:
+ location_list.append(location_name + " " + description)
details["deployment_id"] = deployment_id
details["location_list"] = location_list
- settings = {"wellness_score": False, "last_seen": False, "sleep_report": False, "activity_report": False, "temperature": True, "humidity": True, "air_pressure": True, "light": True, "air_quality": True, "radar": True, "other_activities": False}
+ settings = {"wellness_score": False, "last_seen": False, "sleep_report": True, "activity_report": True, "temperature": True, "humidity": True, "air_pressure": True, "light": True, "air_quality": True, "radar": True, "other_activities": False}
details["settings"] = settings
-
-
result_list.append(details)
-
payload = {'result_list': result_list}
resp.media = package_response(payload)
resp.status = falcon.HTTP_200
AddToLog(payload)
return
+ elif function == "request_node_red":
+ logger.error(f"------------------------------- {function} ------------------------------------------")
+ #this will:
+ # 1.prepare folder and settings.js
+ # 2.start instance on node-red and return it's return port
+ #caretaker = user_name
+ #date_s = form_data.get('date')
+ time_s = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
+ #deployment_id = form_data.get('deployment_id')
+ #redis_conn.set('node_red_requests', str([radar_threshold_signal, radar_threshold_value]))
+ # Hashes (dictionaries)
+ logger.error(f"Storing to node_red_requests {user_name}")
+ redis_conn.hset('node_red_requests', mapping={
+ 'user_name': user_name,
+ 'token': token,
+ 'time': time_s,
+ 'requests': 1
+ })
+
+ payload = {'ok': 1}
+ logger.error(f"Responding {payload}")
+ resp.media = package_response(payload)
+ resp.status = falcon.HTTP_200
+
+ elif function == "get_node_red_port":
+ #this will:
+ # 1.prepare folder and settings.js
+ # 2.start instance on node-red and return it's return port
+ hash_data = GetRedisMap(f'node_red_status_{user_name}')
+ port = 0
+ if hash_data != {}:
+ port = hash_data['port']
+ #date_s = form_data.get('date')
+ #date_s = datetime.datetime.utcnow().strftime("%Y-%m-%d")
+ #deployment_id = form_data.get('deployment_id')
+ payload = {'port': port}
+ logger.debug(f"get_node_red_port: {payload}")
+ resp.media = package_response(payload)
+ resp.status = falcon.HTTP_200
+ elif function == "activity_detected":
+ #this will:
+ # 1.store to REDIS time of last activity
+ time_s = form_data.get('time')
+
+ hash_data = GetRedisMap(f'node_red_status_{user_name}')
+ port = 0
+ if hash_data != {}:
+ port = hash_data['port']
+
+ redis_conn.hset(f'node_red_status_{user_name}', mapping={
+ 'port': port,
+ 'last_activity': time_s
+ })
+
+ payload = {'ok': 1}
+ logger.debug(f"activity_detected: {payload}")
+ resp.media = package_response(payload)
+ resp.status = falcon.HTTP_200
+
+ elif function == "store_flow":
+ #this will:
+ # 1.store flow into DB
+ time_s = form_data.get('time')
+ flow_json = form_data.get('flow')
+ logger.debug(f"store_flow: {flow_json}")
+ StoreFlow2DB(user_name, time_s, flow_json)
+ payload = {'ok': 1}
+ resp.media = package_response(payload)
+ resp.status = falcon.HTTP_200
+ elif function == "store_alarms":
+ #this will:
+ # 1.store flow into DB
+ deployment_id = form_data.get('deployment_id')
+ device_id = form_data.get('device_id')
+ deployment_alarms_json = form_data.get('deployment_alarms')
+ device_alarms_json = form_data.get('device_alarms')
+
+ logger.debug(f"store_alarms: {deployment_alarms_json}")
+
+
+ if privileges == "-1" or deployment_id in privileges:
+ ok = StoreAlarms2DB(deployment_id, device_id, deployment_alarms_json, device_alarms_json)
+
+ redis_conn.set('alarm_device_settings_'+device_id, device_alarms_json)
+ redis_conn.set('alarm_deployment_settings_'+deployment_id, deployment_alarms_json)
+
+ # Create record dictionary
+ record = {
+ 'user_name': user_name,
+ 'deployment_id': deployment_id,
+ 'device_id': device_id
+ }
+
+ # Convert dictionary to JSON string for storage in Redis list
+ record_json = json.dumps(record)
+
+ # Add to queue (list) - lpush adds to the left/front of the list
+ redis_conn.lpush('new_alarms', record_json)
+
+
+ payload = {'ok': ok}
+ resp.media = package_response(payload)
+ resp.status = falcon.HTTP_200
+ else:
+ payload = {'ok': 0, 'error': "not allowed"}
+ resp.media = package_response(payload)
+ resp.status = falcon.HTTP_200
+
+ return
+
+ elif function == "send_walarm":
+ # Extract data from form
+ deployment_id = form_data.get('deployment_id')
+ device_id = form_data.get('device_id')
+ location = form_data.get('location')
+ method = form_data.get('method')
+ feature = form_data.get('feature')
+ currentAlertTableMode = form_data.get('currentAlertTableMode')
+ time_s = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
+ content = form_data.get('content')
+ enabledCellContent = form_data.get('enabledCellContent')
+ currentUnits = form_data.get('currentUnits')
+ test_only = form_data.get('test_only')
+ action = form_data.get('action')
+
+ logger.debug(f"send_requests: {user_name}")
+
+ # Create record dictionary
+ record = {
+ 'user_name': user_name,
+ 'deployment_id': deployment_id,
+ 'location': location,
+ 'method': method,
+ 'feature': feature,
+ 'currentAlertTableMode': currentAlertTableMode,
+ 'time': time_s,
+ 'content': content,
+ 'currentUnits': currentUnits,
+ 'test_only': test_only,
+ 'action': action,
+ 'enabledCellContent': enabledCellContent
+ }
+
+ # Convert dictionary to JSON string for storage in Redis list
+ record_json = json.dumps(record)
+
+ # Add to queue (list) - lpush adds to the left/front of the list
+ redis_conn.lpush('send_requests', record_json)
+
+ payload = {'ok': 1}
+ logger.error(f"Responding {payload}")
+ resp.media = package_response(payload)
+ resp.status = falcon.HTTP_200
+ elif function == "node-red_deployed":
+ #this will:
+ # 1.store flow into DB
+ time_s = form_data.get('time')
+ logger.debug(f"node-red_deployed: {user_name}")
+ redis_conn.hset('node-red_deployed', mapping={
+ 'user_name': user_name,
+ 'token': token,
+ 'time': time_s,
+ 'requests': 1
+ })
+
+ payload = {'ok': 1}
+ logger.error(f"Responding {payload}")
+ resp.media = package_response(payload)
+ resp.status = falcon.HTTP_200
+
else:
AddToLog("Error: function not recognized!")
- payload = {'ok': ok, 'error': debug_string}
+ payload = {'ok': 0, 'error': debug_string}
resp.media = package_response(payload)
resp.status = falcon.HTTP_200
return
@@ -12469,6 +17689,7 @@ except:
# For older Falcon versions
app = falcon.API(middleware=middlewares)
+#logger.error(f"@1")
# Add routes for well-api
well_api_instance = WellApi()
@@ -12503,14 +17724,15 @@ clientL.on_message = on_messageL
#clientL.connect(MQTTSERVERL, MQTT_PortL, 60)
#lientL.loop_start()
-
+#logger.error(f"@2")
# This code runs when executed directly (for development/debugging)
if __name__ == "__main__":
from wsgiref.simple_server import make_server
-
+ redis_conn = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB)
# Use port 8000 for local debugging
- port = int(os.environ.get('PORT', 8002))
+ #port = int(os.environ.get('PORT', 8000))
+ port = int(os.environ.get('PORT', 1998))
# Create a WSGI server
with make_server('', port, app) as httpd:
@@ -12518,3 +17740,5 @@ if __name__ == "__main__":
# Serve until process is killed
httpd.serve_forever()
+else:
+ redis_conn = redis.Redis(host=redis_host, port=6379, db=0)