Added Custom Demo questions and cleared print statemenent
This commit is contained in:
parent
bda0505ed6
commit
be5160b143
2392
console_out_test-jobs.txt
Normal file
2392
console_out_test-jobs.txt
Normal file
File diff suppressed because it is too large
Load Diff
296
job_available_devices-test.sh
Normal file
296
job_available_devices-test.sh
Normal file
@ -0,0 +1,296 @@
|
||||
#!/bin/bash
|
||||
|
||||
# ==============================================================================
|
||||
# WellDrySense API Test Suite (Bash Version)
|
||||
# Functionality: Exercises Job APIs including Create, List, Edit, Details, Weather,
|
||||
# Sensor Data, and performs Database Cleanup.
|
||||
# ==============================================================================
|
||||
|
||||
# --- Configuration ---
|
||||
# Load .env file if it exists
|
||||
if [ -f .env ]; then
|
||||
export $(cat .env | xargs)
|
||||
fi
|
||||
|
||||
# Defaults (can be overridden by env vars)
|
||||
PORT="${PORT:-8002}"
|
||||
BASE_URL="http://localhost:$PORT/api/well_api"
|
||||
API_USER="${API_USER:-jpeters}"
|
||||
API_PASSWORD="${API_PASSWORD:-WellJson}"
|
||||
DB_NAME="${DB_NAME:-wellnuo}"
|
||||
DB_USER="${DB_USER:-postgres}"
|
||||
DB_HOST="${DB_HOST:-localhost}"
|
||||
DB_PORT="${DB_PORT:-5432}"
|
||||
# DB_PASSWORD should be set in .env or exported
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
RED='\033[0;31m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Global Variables
|
||||
TOKEN=""
|
||||
USER_ID=""
|
||||
JOB_ID=""
|
||||
|
||||
# Check for jq
|
||||
if ! command -v jq &> /dev/null; then
|
||||
echo -e "${RED}Error: 'jq' is not installed. Please install it to run this script.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${BLUE}=== Setting up WellDrySense Test Suite on Port $PORT ===${NC}"
|
||||
|
||||
# ==============================================================================
|
||||
# Helper Functions
|
||||
# ==============================================================================
|
||||
|
||||
# Function to print section headers
|
||||
print_header() {
|
||||
echo -e "\n${BLUE}----------------------------------------------------------------${NC}"
|
||||
echo -e "${BLUE}[Test] $1${NC}"
|
||||
}
|
||||
|
||||
# Function to perform a POST request
|
||||
# Usage: perform_test "Test Name" "JSON_PAYLOAD_STRING"
|
||||
perform_test() {
|
||||
local test_name="$1"
|
||||
local json_payload="$2"
|
||||
|
||||
print_header "$test_name"
|
||||
|
||||
# 1. Print Request
|
||||
echo "# Request:"
|
||||
echo "$json_payload" | jq '.'
|
||||
|
||||
# 2. Convert JSON to Form Data for curl (flattening simple objects)
|
||||
# Note: This simple conversion handles top-level keys.
|
||||
# Complex nested JSON strings (like 'devices') need to be passed as strings in the input JSON.
|
||||
local form_data=""
|
||||
|
||||
# Extract keys and values and build form string
|
||||
while IFS="=" read -r key value; do
|
||||
if [ -n "$key" ]; then
|
||||
# URL encode the value
|
||||
encoded_value=$(printf '%s' "$value" | jq -sRr @uri)
|
||||
if [ -z "$form_data" ]; then
|
||||
form_data="${key}=${encoded_value}"
|
||||
else
|
||||
form_data="${form_data}&${key}=${encoded_value}"
|
||||
fi
|
||||
fi
|
||||
done < <(echo "$json_payload" | jq -r "to_entries|map(\"\(.key)=\(.value)\")|.[]")
|
||||
|
||||
# 3. Execute Request
|
||||
response=$(curl -s -X POST "$BASE_URL" \
|
||||
-H "Content-Type: application/x-www-form-urlencoded" \
|
||||
-d "$form_data")
|
||||
|
||||
# 4. Print Response
|
||||
echo -e "\n# Response:"
|
||||
if [ -z "$response" ]; then
|
||||
echo "(Empty Response)"
|
||||
echo -e "${RED}FAIL${NC}"
|
||||
return 1
|
||||
else
|
||||
echo "$response" | jq '.' 2>/dev/null || echo "$response"
|
||||
fi
|
||||
|
||||
# 5. Evaluate Pass/Fail based on "ok": 1
|
||||
ok_val=$(echo "$response" | jq -r '.ok // .status // 0')
|
||||
|
||||
# Handle different response structures (some return {status: 200}, some {ok: 1})
|
||||
if [ "$ok_val" == "1" ] || [ "$ok_val" == "200" ] || [ "$ok_val" == "success" ]; then
|
||||
echo -e "${GREEN}PASS${NC}"
|
||||
|
||||
# Extract Job ID if this was the create step
|
||||
if [ "$test_name" == "job_create" ]; then
|
||||
JOB_ID=$(echo "$response" | jq -r '.job_id')
|
||||
echo "-> Captured Job ID: $JOB_ID"
|
||||
fi
|
||||
return 0
|
||||
else
|
||||
error_msg=$(echo "$response" | jq -r '.error // .message // "Unknown error"')
|
||||
echo -e "${RED}FAIL: $error_msg${NC}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# ==============================================================================
|
||||
# Test Execution
|
||||
# ==============================================================================
|
||||
|
||||
# 1. Login / Credentials
|
||||
# ----------------------
|
||||
login_payload=$(jq -n \
|
||||
--arg fn "credentials" \
|
||||
--arg un "$API_USER" \
|
||||
--arg ps "$API_PASSWORD" \
|
||||
--arg cid "bash-suite" \
|
||||
--arg nonce "test-nonce" \
|
||||
'{function: $fn, user_name: $un, ps: $ps, clientId: $cid, nonce: $nonce}')
|
||||
|
||||
print_header "Login"
|
||||
echo "# Request:"
|
||||
echo "$login_payload" | jq '.'
|
||||
|
||||
# Special handling for login to capture token
|
||||
response=$(curl -s -X POST "$BASE_URL" -d "function=credentials&user_name=$API_USER&ps=$API_PASSWORD&clientId=bash-suite&nonce=test-nonce")
|
||||
|
||||
echo -e "\n# Response:"
|
||||
echo "$response" | jq '.'
|
||||
|
||||
TOKEN=$(echo "$response" | jq -r '.access_token // .data.access_token')
|
||||
USER_ID=$(echo "$response" | jq -r '.user_id // .data.user_id')
|
||||
|
||||
if [ -n "$TOKEN" ] && [ "$TOKEN" != "null" ]; then
|
||||
echo -e "${GREEN}PASS${NC} (User ID: $USER_ID)"
|
||||
else
|
||||
echo -e "${RED}FATAL: Login failed. Check credentials.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# # 2. Create Job
|
||||
# # ----------------------
|
||||
# # Note: We pass JSON strings for complex fields like 'devices' and 'alerts_config'
|
||||
# devices_json='[{"mac": "TEST_MAC_VIRTUAL", "location": "Lab"}]'
|
||||
# alerts_json='{"temp_high": 30}'
|
||||
|
||||
# create_payload=$(jq -n \
|
||||
# --arg fn "job_create" \
|
||||
# --arg un "$API_USER" \
|
||||
# --arg tk "$TOKEN" \
|
||||
# --arg cn "TEST_SUITE_CUSTOMER_BASH" \
|
||||
# --arg as "123 Bash Script Ln" \
|
||||
# --arg ac "Shellville" \
|
||||
# --arg dev "$devices_json" \
|
||||
# --arg lat "34.05" \
|
||||
# --arg lng "-118.25" \
|
||||
# '{function: $fn, user_name: $un, token: $tk, customer_name: $cn, address_street: $as, address_city: $ac, devices: $dev, lat: $lat, lng: $lng}')
|
||||
|
||||
# perform_test "job_create" "$create_payload" || exit 1
|
||||
|
||||
# # 3. Job List
|
||||
# # ----------------------
|
||||
# list_payload=$(jq -n \
|
||||
# --arg fn "job_list" \
|
||||
# --arg un "$API_USER" \
|
||||
# --arg tk "$TOKEN" \
|
||||
# '{function: $fn, user_name: $un, token: $tk}')
|
||||
|
||||
# perform_test "job_list" "$list_payload"
|
||||
|
||||
# # 4. Job Details
|
||||
# # ----------------------
|
||||
# details_payload=$(jq -n \
|
||||
# --arg fn "job_details" \
|
||||
# --arg un "$API_USER" \
|
||||
# --arg tk "$TOKEN" \
|
||||
# --arg jid "$JOB_ID" \
|
||||
# '{function: $fn, user_name: $un, token: $tk, job_id: $jid}')
|
||||
|
||||
# perform_test "job_details" "$details_payload"
|
||||
|
||||
# # 5. Job Edit (Stop Job)
|
||||
# # ----------------------
|
||||
# edit_payload=$(jq -n \
|
||||
# --arg fn "job_edit" \
|
||||
# --arg un "$API_USER" \
|
||||
# --arg tk "$TOKEN" \
|
||||
# --arg jid "$JOB_ID" \
|
||||
# --arg st "Stopped" \
|
||||
# --arg dt "2025-12-31T23:59:59" \
|
||||
# '{function: $fn, user_name: $un, token: $tk, job_id: $jid, job_status: $st, date_to: $dt}')
|
||||
|
||||
# perform_test "job_edit" "$edit_payload"
|
||||
|
||||
|
||||
# ... previous parts of script ...
|
||||
|
||||
# ... previous sections (Login, Create Job, etc.) ...
|
||||
|
||||
# 6. Available Devices
|
||||
# ----------------------
|
||||
# Note: The API now derives the deployment ID from the user_name -> person_details.
|
||||
# well_id is not sent as a parameter.
|
||||
|
||||
# This payload works correctly with the new implementation
|
||||
avail_payload=$(jq -n \
|
||||
--arg fn "job_available_devices" \
|
||||
--arg un "$API_USER" \
|
||||
--arg tk "$TOKEN" \
|
||||
'{function: $fn, user_name: $un, token: $tk}')
|
||||
|
||||
perform_test "job_available_devices" "$avail_payload"
|
||||
|
||||
# This payload works correctly with the new implementation
|
||||
avail_payload=$(jq -n \
|
||||
--arg fn "job_available_devices2" \
|
||||
--arg un "$API_USER" \
|
||||
--arg tk "$TOKEN" \
|
||||
'{function: $fn, user_name: $un, token: $tk}')
|
||||
|
||||
perform_test "job_available_devices2" "$avail_payload"
|
||||
|
||||
# ... remaining sections ...
|
||||
|
||||
# ... rest of script ...
|
||||
|
||||
# # 7. Job Weather
|
||||
# # ----------------------
|
||||
# weather_payload=$(jq -n \
|
||||
# --arg fn "job_weather" \
|
||||
# --arg un "$API_USER" \
|
||||
# --arg tk "$TOKEN" \
|
||||
# --arg jid "$JOB_ID" \
|
||||
# '{function: $fn, user_name: $un, token: $tk, job_id: $jid}')
|
||||
|
||||
# perform_test "job_weather" "$weather_payload"
|
||||
|
||||
# # 8. Job Sensor Bucketed Data (New Test)
|
||||
# # ----------------------
|
||||
# # Using dynamic dates for the test
|
||||
# DATE_FROM=$(date +%Y-%m-%d)
|
||||
# DATE_TO=$(date +%Y-%m-%d)
|
||||
|
||||
# sensor_payload=$(jq -n \
|
||||
# --arg fn "get_job_sensor_bucketed_data" \
|
||||
# --arg un "$API_USER" \
|
||||
# --arg tk "$TOKEN" \
|
||||
# --arg jid "$JOB_ID" \
|
||||
# --arg sens "temperature" \
|
||||
# --arg dt "$DATE_FROM" \
|
||||
# --arg dtt "$DATE_TO" \
|
||||
# --arg bs "15m" \
|
||||
# '{function: $fn, user_name: $un, token: $tk, job_id: $jid, sensor: $sens, date: $dt, to_date: $dtt, bucket_size: $bs}')
|
||||
|
||||
# perform_test "get_job_sensor_bucketed_data" "$sensor_payload"
|
||||
|
||||
# ==============================================================================
|
||||
# Cleanup
|
||||
# ==============================================================================
|
||||
print_header "Cleanup"
|
||||
|
||||
if [ -n "$JOB_ID" ]; then
|
||||
echo "-> Deleting Job ID: $JOB_ID from database..."
|
||||
|
||||
# Use PGPASSWORD for non-interactive auth if set
|
||||
export PGPASSWORD="${DB_PASSWORD}"
|
||||
|
||||
psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "DELETE FROM public.jobs WHERE job_id = $JOB_ID;" > /dev/null 2>&1
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
echo -e "${GREEN}Cleanup successful. Database restored.${NC}"
|
||||
else
|
||||
echo -e "${RED}Cleanup failed. Please manually delete job_id $JOB_ID from public.jobs.${NC}"
|
||||
echo "Command attempted: psql -h $DB_HOST -U $DB_USER -d $DB_NAME -c \"DELETE FROM public.jobs WHERE job_id = $JOB_ID;\""
|
||||
fi
|
||||
else
|
||||
echo "No Job ID created, skipping cleanup."
|
||||
fi
|
||||
|
||||
echo -e "\n${BLUE}=== Test Suite Finished ===${NC}"
|
||||
# ==============================================================================
|
||||
# well-api.py modifications to support WellDrySense API on port 1998
|
||||
# ==============================================================================
|
||||
5808
postgress.sql
Normal file
5808
postgress.sql
Normal file
File diff suppressed because it is too large
Load Diff
354
test_device_list.sh
Normal file
354
test_device_list.sh
Normal file
@ -0,0 +1,354 @@
|
||||
#!/bin/bash
|
||||
|
||||
# ==============================================================================
|
||||
# WellDrySense API Test Suite (Bash Version)
|
||||
# Functionality: Exercises Job APIs including Create, List, Edit, Details, Weather,
|
||||
# Sensor Data, and performs Database Cleanup.
|
||||
# ==============================================================================
|
||||
|
||||
# --- Configuration ---
|
||||
# Load .env file if it exists
|
||||
if [ -f .env ]; then
|
||||
export $(cat .env | xargs)
|
||||
fi
|
||||
|
||||
# Defaults (can be overridden by env vars)
|
||||
PORT="${PORT:-8002}"
|
||||
BASE_URL="http://localhost:$PORT/api/well_api"
|
||||
#API_USER="${API_USER:-jpeters}"
|
||||
#API_PASSWORD="${API_PASSWORD:-WellJson}"
|
||||
API_USER="${API_USER:-brankol}"
|
||||
API_PASSWORD="${API_PASSWORD:-branko_2025!}"
|
||||
DB_NAME="${DB_NAME:-wellnuo}"
|
||||
DB_USER="${DB_USER:-postgres}"
|
||||
DB_HOST="${DB_HOST:-localhost}"
|
||||
DB_PORT="${DB_PORT:-5432}"
|
||||
# DB_PASSWORD should be set in .env or exported
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
RED='\033[0;31m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Global Variables
|
||||
TOKEN=""
|
||||
USER_ID=""
|
||||
JOB_ID=""
|
||||
|
||||
# Check for jq
|
||||
if ! command -v jq &> /dev/null; then
|
||||
echo -e "${RED}Error: 'jq' is not installed. Please install it to run this script.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${BLUE}=== Setting up WellDrySense Test Suite on Port $PORT ===${NC}"
|
||||
|
||||
# ==============================================================================
|
||||
# Helper Functions
|
||||
# ==============================================================================
|
||||
|
||||
# Function to print section headers
|
||||
print_header() {
|
||||
echo -e "\n${BLUE}----------------------------------------------------------------${NC}"
|
||||
echo -e "${BLUE}[Test] $1${NC}"
|
||||
}
|
||||
|
||||
# Function to perform a POST request
|
||||
# Usage: perform_test "Test Name" "JSON_PAYLOAD_STRING"
|
||||
perform_test() {
|
||||
local test_name="$1"
|
||||
local json_payload="$2"
|
||||
|
||||
print_header "$test_name"
|
||||
|
||||
# 1. Print Request
|
||||
echo "# Request:"
|
||||
echo "$json_payload" | jq '.'
|
||||
|
||||
# 2. Convert JSON to Form Data for curl (flattening simple objects)
|
||||
# Note: This simple conversion handles top-level keys.
|
||||
# Complex nested JSON strings (like 'devices') need to be passed as strings in the input JSON.
|
||||
local form_data=""
|
||||
|
||||
# Extract keys and values and build form string
|
||||
while IFS="=" read -r key value; do
|
||||
if [ -n "$key" ]; then
|
||||
# URL encode the value
|
||||
encoded_value=$(printf '%s' "$value" | jq -sRr @uri)
|
||||
if [ -z "$form_data" ]; then
|
||||
form_data="${key}=${encoded_value}"
|
||||
else
|
||||
form_data="${form_data}&${key}=${encoded_value}"
|
||||
fi
|
||||
fi
|
||||
done < <(echo "$json_payload" | jq -r "to_entries|map(\"\(.key)=\(.value)\")|.[]")
|
||||
|
||||
# 3. Execute Request
|
||||
response=$(curl -s -X POST "$BASE_URL" \
|
||||
-H "Content-Type: application/x-www-form-urlencoded" \
|
||||
-d "$form_data")
|
||||
|
||||
# 4. Print Response
|
||||
echo -e "\n# Response:"
|
||||
if [ -z "$response" ]; then
|
||||
echo "(Empty Response)"
|
||||
echo -e "${RED}FAIL${NC}"
|
||||
return 1
|
||||
else
|
||||
echo "$response" | jq '.' 2>/dev/null || echo "$response"
|
||||
fi
|
||||
|
||||
# 5. Evaluate Pass/Fail based on "ok": 1
|
||||
ok_val=$(echo "$response" | jq -r '.ok // .status // 0')
|
||||
|
||||
# Handle different response structures (some return {status: 200}, some {ok: 1})
|
||||
if [ "$ok_val" == "1" ] || [ "$ok_val" == "200" ] || [ "$ok_val" == "success" ]; then
|
||||
echo -e "${GREEN}PASS${NC}"
|
||||
|
||||
# Extract Job ID if this was the create step
|
||||
if [ "$test_name" == "job_create" ]; then
|
||||
JOB_ID=$(echo "$response" | jq -r '.job_id')
|
||||
echo "-> Captured Job ID: $JOB_ID"
|
||||
fi
|
||||
return 0
|
||||
else
|
||||
error_msg=$(echo "$response" | jq -r '.error // .message // "Unknown error"')
|
||||
echo -e "${RED}FAIL: $error_msg${NC}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# ==============================================================================
|
||||
# Test Execution
|
||||
# ==============================================================================
|
||||
|
||||
# 1. Login / Credentials
|
||||
# ----------------------
|
||||
login_payload=$(jq -n \
|
||||
--arg fn "credentials" \
|
||||
--arg un "$API_USER" \
|
||||
--arg ps "$API_PASSWORD" \
|
||||
--arg cid "bash-suite" \
|
||||
--arg nonce "test-nonce" \
|
||||
'{function: $fn, user_name: $un, ps: $ps, clientId: $cid, nonce: $nonce}')
|
||||
|
||||
print_header "Login"
|
||||
echo "# Request:"
|
||||
echo "$login_payload" | jq '.'
|
||||
|
||||
# Special handling for login to capture token
|
||||
response=$(curl -s -X POST "$BASE_URL" -d "function=credentials&user_name=$API_USER&ps=$API_PASSWORD&clientId=bash-suite&nonce=test-nonce")
|
||||
|
||||
echo -e "\n# Response:"
|
||||
echo "$response" | jq '.'
|
||||
|
||||
TOKEN=$(echo "$response" | jq -r '.access_token // .data.access_token')
|
||||
USER_ID=$(echo "$response" | jq -r '.user_id // .data.user_id')
|
||||
|
||||
if [ -n "$TOKEN" ] && [ "$TOKEN" != "null" ]; then
|
||||
echo -e "${GREEN}PASS${NC} (User ID: $USER_ID)"
|
||||
else
|
||||
echo -e "${RED}FATAL: Login failed. Check credentials.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# # 2. Create Job
|
||||
# # ----------------------
|
||||
# # Note: We pass JSON strings for complex fields like 'devices' and 'alerts_config'
|
||||
# devices_json='[{"mac": "TEST_MAC_VIRTUAL", "location": "Lab"}]'
|
||||
# alerts_json='{"temp_high": 30}'
|
||||
|
||||
# create_payload=$(jq -n \
|
||||
# --arg fn "job_create" \
|
||||
# --arg un "$API_USER" \
|
||||
# --arg tk "$TOKEN" \
|
||||
# --arg cn "TEST_SUITE_CUSTOMER_BASH" \
|
||||
# --arg as "123 Bash Script Ln" \
|
||||
# --arg ac "Shellville" \
|
||||
# --arg dev "$devices_json" \
|
||||
# --arg lat "34.05" \
|
||||
# --arg lng "-118.25" \
|
||||
# '{function: $fn, user_name: $un, token: $tk, customer_name: $cn, address_street: $as, address_city: $ac, devices: $dev, lat: $lat, lng: $lng}')
|
||||
|
||||
# perform_test "job_create" "$create_payload" || exit 1
|
||||
|
||||
# # 3. Job List
|
||||
# # ----------------------
|
||||
# list_payload=$(jq -n \
|
||||
# --arg fn "job_list" \
|
||||
# --arg un "$API_USER" \
|
||||
# --arg tk "$TOKEN" \
|
||||
# '{function: $fn, user_name: $un, token: $tk}')
|
||||
|
||||
# perform_test "job_list" "$list_payload"
|
||||
|
||||
# # 3. Job List 2 (with added search)
|
||||
# # ----------------------
|
||||
# list_payload=$(jq -n \
|
||||
# --arg fn "job_list2" \
|
||||
# --arg un "$API_USER" \
|
||||
# --arg tk "$TOKEN" \
|
||||
# --arg sch "" \
|
||||
# '{function: $fn, user_name: $un, token: $tk, search: $sch}')
|
||||
|
||||
# perform_test "job_list2" "$list_payload"
|
||||
|
||||
# # 4. Job Details
|
||||
# # ----------------------
|
||||
# details_payload=$(jq -n \
|
||||
# --arg fn "job_details" \
|
||||
# --arg un "$API_USER" \
|
||||
# --arg tk "$TOKEN" \
|
||||
# --arg jid "$JOB_ID" \
|
||||
# '{function: $fn, user_name: $un, token: $tk, job_id: $jid}')
|
||||
|
||||
# perform_test "job_details" "$details_payload"
|
||||
|
||||
# # 5. Job Edit (Stop Job)
|
||||
# # ----------------------
|
||||
# edit_payload=$(jq -n \
|
||||
# --arg fn "job_edit" \
|
||||
# --arg un "$API_USER" \
|
||||
# --arg tk "$TOKEN" \
|
||||
# --arg jid "$JOB_ID" \
|
||||
# --arg st "Stopped" \
|
||||
# --arg dt "2025-12-31T23:59:59" \
|
||||
# '{function: $fn, user_name: $un, token: $tk, job_id: $jid, job_status: $st, date_to: $dt}')
|
||||
|
||||
# perform_test "job_edit" "$edit_payload"
|
||||
|
||||
# # 6. Available Devices (two versions, with direct SQL and via GetProximityList)
|
||||
# # ----------------------
|
||||
# avail_payload=$(jq -n \
|
||||
# --arg fn "job_available_devices" \
|
||||
# --arg un "$API_USER" \
|
||||
# --arg tk "$TOKEN" \
|
||||
# '{function: $fn, user_name: $un, token: $tk}')
|
||||
|
||||
# perform_test "job_available_devices" "$avail_payload"
|
||||
|
||||
# # 6. Available Devices (Alternative Test using job_available_devices2, which is using GetProximityList)
|
||||
# avail_payload=$(jq -n \
|
||||
# --arg fn "job_available_devices2" \
|
||||
# --arg un "$API_USER" \
|
||||
# --arg tk "$TOKEN" \
|
||||
# '{function: $fn, user_name: $un, token: $tk}')
|
||||
|
||||
# perform_test "job_available_devices2" "$avail_payload"
|
||||
|
||||
# # 6. Available Devices(made reusing job_user_all_devices2, adding filter and search)
|
||||
# avail_payload=$(jq -n \
|
||||
# --arg fn "job_devices" \
|
||||
# --arg un "$API_USER" \
|
||||
# --arg tk "$TOKEN" \
|
||||
# --arg fl "all" \
|
||||
# --arg sch "" \
|
||||
# '{function: $fn, user_name: $un, token: $tk, filter: $fl, search: $sch}')
|
||||
|
||||
# perform_test "job_devices" "$avail_payload"
|
||||
|
||||
# # 7. Job Weather
|
||||
# # ----------------------
|
||||
# weather_payload=$(jq -n \
|
||||
# --arg fn "job_weather" \
|
||||
# --arg un "$API_USER" \
|
||||
# --arg tk "$TOKEN" \
|
||||
# --arg jid "$JOB_ID" \
|
||||
# '{function: $fn, user_name: $un, token: $tk, job_id: $jid}')
|
||||
|
||||
# perform_test "job_weather" "$weather_payload"
|
||||
|
||||
# # 8. Job Sensor Bucketed Data (New Test)
|
||||
# # ----------------------
|
||||
# # Using dynamic dates for the test
|
||||
# DATE_FROM=$(date +%Y-%m-%d)
|
||||
# DATE_TO=$(date +%Y-%m-%d)
|
||||
|
||||
# sensor_payload=$(jq -n \
|
||||
# --arg fn "get_job_sensor_bucketed_data" \
|
||||
# --arg un "$API_USER" \
|
||||
# --arg tk "$TOKEN" \
|
||||
# --arg jid "$JOB_ID" \
|
||||
# --arg sens "temperature" \
|
||||
# --arg dt "$DATE_FROM" \
|
||||
# --arg dtt "$DATE_TO" \
|
||||
# --arg bs "15m" \
|
||||
# '{function: $fn, user_name: $un, token: $tk, job_id: $jid, sensor: $sens, date: $dt, to_date: $dtt, bucket_size: $bs}')
|
||||
|
||||
# perform_test "get_job_sensor_bucketed_data" "$sensor_payload"
|
||||
|
||||
# 9. device_list (New Test)
|
||||
# based on name=devices_list
|
||||
# &token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InJvYnN0ZXIiLCJleHAiOjE3NjQ3OTYwNzh9.t9rKVDRpSYOZGJMm2G0HYKSOOeaLypKwRGIJHehJBFE
|
||||
# &user_id=32
|
||||
# &user_name=robster
|
||||
# &first=0
|
||||
# &last=1000
|
||||
# &privileges=-1
|
||||
#
|
||||
# robster test
|
||||
device_list_payload=$(jq -n \
|
||||
--arg fn "devices_list" \
|
||||
--arg fid "0" \
|
||||
--arg lid "1000" \
|
||||
--arg priv "-1" \
|
||||
--arg uid "32" \
|
||||
--arg un "robster" \
|
||||
--arg tk "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InJvYnN0ZXIiLCJleHAiOjE3NjQ3OTYwNzh9.t9rKVDRpSYOZGJMm2G0HYKSOOeaLypKwRGIJHehJBFE" \
|
||||
'{function: $fn, name: $fn, user_id: $uid, user_name: $un, token: $tk, first: $fid, last: $lid, privileges: $priv}')
|
||||
perform_test "devices_list" "$device_list_payload"
|
||||
# brankol test
|
||||
device_list_payload=$(jq -n \
|
||||
--arg fn "devices_list" \
|
||||
--arg fid "0" \
|
||||
--arg lid "1000" \
|
||||
--arg priv "-1" \
|
||||
--arg un "$API_USER" \
|
||||
--arg tk "$TOKEN" \
|
||||
'{function: $fn, name: $fn, user_name: $un, token: $tk, first: $fid, last: $lid, privileges: $priv}')
|
||||
# --arg uid "32" \
|
||||
# --arg un "robster" \
|
||||
# --arg tk "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InJvYnN0ZXIiLCJleHAiOjE3NjQ3OTYwNzh9.t9rKVDRpSYOZGJMm2G0HYKSOOeaLypKwRGIJHehJBFE" \
|
||||
#'{name: $fn, user_id: $uid, user_name: $un, token: $tk, first: $fid, last: $lid, privileges: $priv}')
|
||||
perform_test "devices_list" "$device_list_payload"
|
||||
|
||||
|
||||
# device_list_payload=$(jq -n \
|
||||
# --arg fn "devices_list" \
|
||||
# --arg un "$API_USER" \
|
||||
# --arg tk "$TOKEN" \
|
||||
# --arg fid "0" \
|
||||
# --arg lid "1000" \
|
||||
# --arg priv "-1" \
|
||||
# '{function: $fn, user_name: $un, token: $tk, first: $fid, last: $lid, privileges: $priv}')
|
||||
# perform_test "devices_list" "$device_list_payload"
|
||||
|
||||
# # ==============================================================================
|
||||
# # Cleanup
|
||||
# # ==============================================================================
|
||||
# print_header "Cleanup"
|
||||
|
||||
# if [ -n "$JOB_ID" ]; then
|
||||
# echo "-> Deleting Job ID: $JOB_ID from database..."
|
||||
|
||||
# # Use PGPASSWORD for non-interactive auth if set
|
||||
# export PGPASSWORD="${DB_PASSWORD}"
|
||||
|
||||
# psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "DELETE FROM public.jobs WHERE job_id = $JOB_ID;" > /dev/null 2>&1
|
||||
|
||||
# if [ $? -eq 0 ]; then
|
||||
# echo -e "${GREEN}Cleanup successful. Database restored.${NC}"
|
||||
# else
|
||||
# echo -e "${RED}Cleanup failed. Please manually delete job_id $JOB_ID from public.jobs.${NC}"
|
||||
# echo "Command attempted: psql -h $DB_HOST -U $DB_USER -d $DB_NAME -c \"DELETE FROM public.jobs WHERE job_id = $JOB_ID;\""
|
||||
# fi
|
||||
# else
|
||||
# echo "No Job ID created, skipping cleanup."
|
||||
# fi
|
||||
|
||||
|
||||
echo -e "\n${BLUE}=== Test Suite Finished ===${NC}"
|
||||
# ==============================================================================
|
||||
# well-api.py modifications to support WellDrySense API on port 1998
|
||||
# ==============================================================================
|
||||
261
test_welldrysense_api.py
Normal file
261
test_welldrysense_api.py
Normal file
@ -0,0 +1,261 @@
|
||||
#!/usr/bin/env python3
|
||||
import unittest
|
||||
import requests
|
||||
import os
|
||||
import json
|
||||
import psycopg2
|
||||
import sys
|
||||
import time
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# --- Configuration ---
|
||||
# Load environment variables from .env file in the same directory
|
||||
load_dotenv()
|
||||
|
||||
# Configuration with fallbacks
|
||||
PORT = os.getenv('PORT', '8002')
|
||||
BASE_URL = f"http://localhost:{PORT}"
|
||||
API_USER = os.getenv('API_USER', 'jpeters')
|
||||
API_PASSWORD = os.getenv('API_PASSWORD', 'WellJson')
|
||||
|
||||
# ANSI Colors for better readability
|
||||
GREEN = '\033[92m'
|
||||
RED = '\033[91m'
|
||||
YELLOW = '\033[93m'
|
||||
RESET = '\033[0m'
|
||||
|
||||
class TestWellDrySenseAPI(unittest.TestCase):
|
||||
"""
|
||||
Test suite for WellDrySense.
|
||||
Ensures zero-impact on existing data by cleaning up created records.
|
||||
"""
|
||||
|
||||
token = None
|
||||
user_id = None
|
||||
job_id_to_test = None
|
||||
db_conn_params = {}
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
print(f"\n{GREEN}=== Setting up WellDrySense Test Suite on Port {PORT} ==={RESET}")
|
||||
|
||||
# Setup DB Params
|
||||
cls.db_conn_params = {
|
||||
'dbname': os.getenv('DB_NAME'),
|
||||
'user': os.getenv('DB_USER'),
|
||||
'password': os.getenv('DB_PASSWORD'),
|
||||
'host': os.getenv('DB_HOST'),
|
||||
'port': os.getenv('DB_PORT')
|
||||
}
|
||||
|
||||
# Authenticate
|
||||
print(f"-> Logging in as: {API_USER}...")
|
||||
url = f"{BASE_URL}/api/well_api"
|
||||
payload = {
|
||||
"function": "credentials",
|
||||
"user_name": API_USER,
|
||||
"ps": API_PASSWORD,
|
||||
"clientId": "test-suite",
|
||||
"nonce": "test-nonce"
|
||||
}
|
||||
|
||||
try:
|
||||
response = requests.post(url, data=payload)
|
||||
if response.status_code != 200:
|
||||
print(f"{RED}FATAL: Login failed. Status: {response.status_code}{RESET}")
|
||||
print(f"Response: {response.text}")
|
||||
sys.exit(1)
|
||||
|
||||
data = response.json()
|
||||
|
||||
# Handle different response structures
|
||||
if 'access_token' in data:
|
||||
cls.token = data['access_token']
|
||||
cls.user_id = data.get('user_id')
|
||||
elif 'data' in data and 'access_token' in data['data']:
|
||||
cls.token = data['data']['access_token']
|
||||
cls.user_id = data['data'].get('user_id')
|
||||
else:
|
||||
print(f"{RED}FATAL: Token not found in response.{RESET}")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"{GREEN}-> Login successful. User ID: {cls.user_id}{RESET}")
|
||||
|
||||
except requests.exceptions.ConnectionError:
|
||||
print(f"{RED}FATAL: Could not connect to {BASE_URL}. Ensure well-api.py is running.{RESET}")
|
||||
sys.exit(1)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
print(f"\n{GREEN}=== Tearing Down Test Suite ==={RESET}")
|
||||
if cls.job_id_to_test:
|
||||
print(f"-> Cleaning up Job ID: {cls.job_id_to_test}...")
|
||||
conn = None
|
||||
try:
|
||||
conn = psycopg2.connect(**cls.db_conn_params)
|
||||
with conn.cursor() as cur:
|
||||
cur.execute("DELETE FROM public.jobs WHERE job_id = %s;", (cls.job_id_to_test,))
|
||||
conn.commit()
|
||||
print(f"{GREEN}-> Cleanup successful. Database restored.{RESET}")
|
||||
except Exception as e:
|
||||
print(f"{RED}CRITICAL: DB Cleanup failed. Manually delete job {cls.job_id_to_test}. Error: {e}{RESET}")
|
||||
finally:
|
||||
if conn: conn.close()
|
||||
else:
|
||||
print("-> No job created, skipping cleanup.")
|
||||
|
||||
def _post_api(self, form_data):
|
||||
"""Helper to send authenticated POST requests"""
|
||||
form_data['user_name'] = API_USER
|
||||
form_data['token'] = self.token
|
||||
|
||||
try:
|
||||
response = requests.post(f"{BASE_URL}/api/well_api", data=form_data)
|
||||
self.assertEqual(response.status_code, 200, f"API HTTP Error {response.status_code}: {response.text}")
|
||||
|
||||
try:
|
||||
json_resp = response.json()
|
||||
if 'data' in json_resp and 'status' in json_resp:
|
||||
return json_resp['data']
|
||||
return json_resp
|
||||
except json.JSONDecodeError:
|
||||
self.fail(f"API returned invalid JSON: {response.text}")
|
||||
except requests.exceptions.ConnectionError:
|
||||
self.fail("Connection refused. API server is down.")
|
||||
|
||||
# --- TESTS ---
|
||||
|
||||
def test_01_create_job(self):
|
||||
"""Test creating a new job"""
|
||||
print("\n[Test] job_create")
|
||||
payload = {
|
||||
"function": "job_create",
|
||||
"customer_name": "TEST_SUITE_CUSTOMER",
|
||||
"address_street": "123 Python Way",
|
||||
"address_city": "Codeville",
|
||||
"address_state": "CA",
|
||||
"address_country": "USA",
|
||||
"lat": 34.05,
|
||||
"lng": -118.25,
|
||||
"key_person_name": "Test Runner",
|
||||
"key_person_email": "test@wellnuo.com",
|
||||
"devices": json.dumps([{"mac": "TEST_MAC_VIRTUAL", "location": "Lab"}]),
|
||||
"alerts_config": json.dumps({"temp_high": 30})
|
||||
}
|
||||
|
||||
data = self._post_api(payload)
|
||||
self.assertEqual(data.get('ok'), 1, f"Job creation failed: {data.get('error')}")
|
||||
self.assertIn('job_id', data)
|
||||
self.__class__.job_id_to_test = data['job_id']
|
||||
print(f"-> Job created with ID: {self.job_id_to_test}")
|
||||
|
||||
def test_02_job_list(self):
|
||||
"""Test retrieving the job list"""
|
||||
print("\n[Test] job_list")
|
||||
payload = {"function": "job_list"}
|
||||
data = self._post_api(payload)
|
||||
|
||||
self.assertEqual(data.get('ok'), 1, f"List failed: {data.get('error')}")
|
||||
self.assertIn('jobs', data)
|
||||
|
||||
found = any(j.get('job_id') == self.job_id_to_test for j in data['jobs'])
|
||||
self.assertTrue(found, f"Created Job ID {self.job_id_to_test} not found in job_list")
|
||||
|
||||
def test_03_job_details(self):
|
||||
"""Test retrieving single job details"""
|
||||
print("\n[Test] job_details")
|
||||
if not self.job_id_to_test: self.skipTest("No job ID available")
|
||||
|
||||
payload = {"function": "job_details", "job_id": self.job_id_to_test}
|
||||
data = self._post_api(payload)
|
||||
|
||||
self.assertEqual(data.get('ok'), 1, f"Details failed: {data.get('error')}")
|
||||
self.assertEqual(data['details']['customer_name'], "TEST_SUITE_CUSTOMER")
|
||||
|
||||
# Verify JSON parsing of devices
|
||||
devices = data['details'].get('devices')
|
||||
if isinstance(devices, str): devices = json.loads(devices)
|
||||
self.assertEqual(devices[0]['mac'], "TEST_MAC_VIRTUAL")
|
||||
|
||||
def test_04_job_edit(self):
|
||||
"""Test updating a job (Stop the job)"""
|
||||
print("\n[Test] job_edit")
|
||||
if not self.job_id_to_test: self.skipTest("No job ID available")
|
||||
|
||||
payload = {
|
||||
"function": "job_edit",
|
||||
"job_id": self.job_id_to_test,
|
||||
"customer_name": "UPDATED_CUSTOMER_NAME",
|
||||
"job_status": "Stopped",
|
||||
"date_to": "2025-12-31T23:59:59"
|
||||
}
|
||||
data = self._post_api(payload)
|
||||
self.assertEqual(data.get('ok'), 1, f"Edit failed: {data.get('error')}")
|
||||
|
||||
# Verify
|
||||
v_payload = {"function": "job_details", "job_id": self.job_id_to_test}
|
||||
v_data = self._post_api(v_payload)
|
||||
self.assertEqual(v_data['details']['customer_name'], "UPDATED_CUSTOMER_NAME")
|
||||
self.assertEqual(v_data['details']['job_status'], "Stopped")
|
||||
|
||||
def test_05_available_devices(self):
|
||||
"""Test fetching available devices"""
|
||||
print("\n[Test] job_available_devices")
|
||||
payload = {"function": "job_available_devices"}
|
||||
data = self._post_api(payload)
|
||||
self.assertEqual(data.get('ok'), 1, f"Available devices failed: {data.get('error')}")
|
||||
self.assertIsInstance(data['devices'], list)
|
||||
|
||||
def test_06_job_weather(self):
|
||||
"""Test fetching weather"""
|
||||
print("\n[Test] job_weather")
|
||||
if not self.job_id_to_test: self.skipTest("No job ID available")
|
||||
|
||||
#if not os.getenv('WEATHER_API_KEY'): print(f"{YELLOW}-> Warning: WEATHER_API_KEY not found in .env{RESET}")
|
||||
|
||||
payload = {"function": "job_weather", "job_id": self.job_id_to_test}
|
||||
data = self._post_api(payload)
|
||||
|
||||
if data.get('ok') == 0:
|
||||
print(f"-> Weather API returned error (Expected if key invalid): {data.get('error')}")
|
||||
else:
|
||||
self.assertIn('weather', data)
|
||||
print(f"-> Weather received: {data['weather']}")
|
||||
|
||||
def test_07_job_sensor_data(self):
|
||||
"""Test fetching bucketed sensor data for a job"""
|
||||
print("\n[Test] get_job_sensor_bucketed_data")
|
||||
if not self.job_id_to_test: self.skipTest("No job ID available")
|
||||
|
||||
# Use a date range likely to cover "now" or recent past for testing
|
||||
today = datetime.datetime.now().strftime("%Y-%m-%d")
|
||||
|
||||
payload = {
|
||||
"function": "get_job_sensor_bucketed_data",
|
||||
"job_id": self.job_id_to_test,
|
||||
"sensor": "temperature",
|
||||
"date": today,
|
||||
"bucket_size": "1h"
|
||||
}
|
||||
|
||||
data = self._post_api(payload)
|
||||
|
||||
self.assertEqual(data.get('ok'), 1, f"Sensor data fetch failed: {data.get('error')}")
|
||||
self.assertIn('chart_data', data)
|
||||
self.assertIn('units', data)
|
||||
self.assertIn('time_zone', data)
|
||||
|
||||
# Since we created the job with a virtual MAC in test_01,
|
||||
# we expect chart_data to contain an entry for that device (even if data list is empty)
|
||||
# Note: The virtual MAC likely won't exist in the 'devices' table unless pre-seeded,
|
||||
# so the API might skip it or return empty. We check structure primarily.
|
||||
if data['chart_data']:
|
||||
print(f"-> Retrieved data for {len(data['chart_data'])} locations")
|
||||
first_loc = data['chart_data'][0]
|
||||
self.assertIn('name', first_loc)
|
||||
self.assertIn('data', first_loc)
|
||||
else:
|
||||
print("-> No device data found (Expected if test MAC is not in DB)")
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
305
test_welldrysense_api.sh
Normal file
305
test_welldrysense_api.sh
Normal file
@ -0,0 +1,305 @@
|
||||
#!/bin/bash
|
||||
|
||||
# ==============================================================================
|
||||
# WellDrySense API Test Suite (Bash Version)
|
||||
# Functionality: Exercises Job APIs including Create, List, Edit, Details, Weather,
|
||||
# Sensor Data, and performs Database Cleanup.
|
||||
# ==============================================================================
|
||||
|
||||
# --- Configuration ---
|
||||
# Load .env file if it exists
|
||||
if [ -f .env ]; then
|
||||
export $(cat .env | xargs)
|
||||
fi
|
||||
|
||||
# Defaults (can be overridden by env vars)
|
||||
PORT="${PORT:-8002}"
|
||||
BASE_URL="http://localhost:$PORT/api/well_api"
|
||||
API_USER="${API_USER:-jpeters}"
|
||||
API_PASSWORD="${API_PASSWORD:-WellJson}"
|
||||
DB_NAME="${DB_NAME:-wellnuo}"
|
||||
DB_USER="${DB_USER:-postgres}"
|
||||
DB_HOST="${DB_HOST:-localhost}"
|
||||
DB_PORT="${DB_PORT:-5432}"
|
||||
# DB_PASSWORD should be set in .env or exported
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
RED='\033[0;31m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Global Variables
|
||||
TOKEN=""
|
||||
USER_ID=""
|
||||
JOB_ID=""
|
||||
|
||||
# Check for jq
|
||||
if ! command -v jq &> /dev/null; then
|
||||
echo -e "${RED}Error: 'jq' is not installed. Please install it to run this script.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${BLUE}=== Setting up WellDrySense Test Suite on Port $PORT ===${NC}"
|
||||
|
||||
# ==============================================================================
|
||||
# Helper Functions
|
||||
# ==============================================================================
|
||||
|
||||
# Function to print section headers
|
||||
print_header() {
|
||||
echo -e "\n${BLUE}----------------------------------------------------------------${NC}"
|
||||
echo -e "${BLUE}[Test] $1${NC}"
|
||||
}
|
||||
|
||||
# Function to perform a POST request
|
||||
# Usage: perform_test "Test Name" "JSON_PAYLOAD_STRING"
|
||||
perform_test() {
|
||||
local test_name="$1"
|
||||
local json_payload="$2"
|
||||
|
||||
print_header "$test_name"
|
||||
|
||||
# 1. Print Request
|
||||
echo "# Request:"
|
||||
echo "$json_payload" | jq '.'
|
||||
|
||||
# 2. Convert JSON to Form Data for curl (flattening simple objects)
|
||||
# Note: This simple conversion handles top-level keys.
|
||||
# Complex nested JSON strings (like 'devices') need to be passed as strings in the input JSON.
|
||||
local form_data=""
|
||||
|
||||
# Extract keys and values and build form string
|
||||
while IFS="=" read -r key value; do
|
||||
if [ -n "$key" ]; then
|
||||
# URL encode the value
|
||||
encoded_value=$(printf '%s' "$value" | jq -sRr @uri)
|
||||
if [ -z "$form_data" ]; then
|
||||
form_data="${key}=${encoded_value}"
|
||||
else
|
||||
form_data="${form_data}&${key}=${encoded_value}"
|
||||
fi
|
||||
fi
|
||||
done < <(echo "$json_payload" | jq -r "to_entries|map(\"\(.key)=\(.value)\")|.[]")
|
||||
|
||||
# 3. Execute Request
|
||||
response=$(curl -s -X POST "$BASE_URL" \
|
||||
-H "Content-Type: application/x-www-form-urlencoded" \
|
||||
-d "$form_data")
|
||||
|
||||
# 4. Print Response
|
||||
echo -e "\n# Response:"
|
||||
if [ -z "$response" ]; then
|
||||
echo "(Empty Response)"
|
||||
echo -e "${RED}FAIL${NC}"
|
||||
return 1
|
||||
else
|
||||
echo "$response" | jq '.' 2>/dev/null || echo "$response"
|
||||
fi
|
||||
|
||||
# 5. Evaluate Pass/Fail based on "ok": 1
|
||||
ok_val=$(echo "$response" | jq -r '.ok // .status // 0')
|
||||
|
||||
# Handle different response structures (some return {status: 200}, some {ok: 1})
|
||||
if [ "$ok_val" == "1" ] || [ "$ok_val" == "200" ] || [ "$ok_val" == "success" ]; then
|
||||
echo -e "${GREEN}PASS${NC}"
|
||||
|
||||
# Extract Job ID if this was the create step
|
||||
if [ "$test_name" == "job_create" ]; then
|
||||
JOB_ID=$(echo "$response" | jq -r '.job_id')
|
||||
echo "-> Captured Job ID: $JOB_ID"
|
||||
fi
|
||||
return 0
|
||||
else
|
||||
error_msg=$(echo "$response" | jq -r '.error // .message // "Unknown error"')
|
||||
echo -e "${RED}FAIL: $error_msg${NC}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# ==============================================================================
|
||||
# Test Execution
|
||||
# ==============================================================================
|
||||
|
||||
# 1. Login / Credentials
|
||||
# ----------------------
|
||||
login_payload=$(jq -n \
|
||||
--arg fn "credentials" \
|
||||
--arg un "$API_USER" \
|
||||
--arg ps "$API_PASSWORD" \
|
||||
--arg cid "bash-suite" \
|
||||
--arg nonce "test-nonce" \
|
||||
'{function: $fn, user_name: $un, ps: $ps, clientId: $cid, nonce: $nonce}')
|
||||
|
||||
print_header "Login"
|
||||
echo "# Request:"
|
||||
echo "$login_payload" | jq '.'
|
||||
|
||||
# Special handling for login to capture token
|
||||
response=$(curl -s -X POST "$BASE_URL" -d "function=credentials&user_name=$API_USER&ps=$API_PASSWORD&clientId=bash-suite&nonce=test-nonce")
|
||||
|
||||
echo -e "\n# Response:"
|
||||
echo "$response" | jq '.'
|
||||
|
||||
TOKEN=$(echo "$response" | jq -r '.access_token // .data.access_token')
|
||||
USER_ID=$(echo "$response" | jq -r '.user_id // .data.user_id')
|
||||
|
||||
if [ -n "$TOKEN" ] && [ "$TOKEN" != "null" ]; then
|
||||
echo -e "${GREEN}PASS${NC} (User ID: $USER_ID)"
|
||||
else
|
||||
echo -e "${RED}FATAL: Login failed. Check credentials.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# 2. Create Job
|
||||
# ----------------------
|
||||
# Note: We pass JSON strings for complex fields like 'devices' and 'alerts_config'
|
||||
devices_json='[{"mac": "TEST_MAC_VIRTUAL", "location": "Lab"}]'
|
||||
alerts_json='{"temp_high": 30}'
|
||||
|
||||
create_payload=$(jq -n \
|
||||
--arg fn "job_create" \
|
||||
--arg un "$API_USER" \
|
||||
--arg tk "$TOKEN" \
|
||||
--arg cn "TEST_SUITE_CUSTOMER_BASH" \
|
||||
--arg as "123 Bash Script Ln" \
|
||||
--arg ac "Shellville" \
|
||||
--arg dev "$devices_json" \
|
||||
--arg lat "34.05" \
|
||||
--arg lng "-118.25" \
|
||||
'{function: $fn, user_name: $un, token: $tk, customer_name: $cn, address_street: $as, address_city: $ac, devices: $dev, lat: $lat, lng: $lng}')
|
||||
|
||||
perform_test "job_create" "$create_payload" || exit 1
|
||||
|
||||
# 3. Job List
|
||||
# ----------------------
|
||||
list_payload=$(jq -n \
|
||||
--arg fn "job_list" \
|
||||
--arg un "$API_USER" \
|
||||
--arg tk "$TOKEN" \
|
||||
'{function: $fn, user_name: $un, token: $tk}')
|
||||
|
||||
perform_test "job_list" "$list_payload"
|
||||
|
||||
# 3. Job List 2 (with added search)
|
||||
# ----------------------
|
||||
list_payload=$(jq -n \
|
||||
--arg fn "job_list2" \
|
||||
--arg un "$API_USER" \
|
||||
--arg tk "$TOKEN" \
|
||||
--arg sch "" \
|
||||
'{function: $fn, user_name: $un, token: $tk, search: $sch}')
|
||||
|
||||
perform_test "job_list2" "$list_payload"
|
||||
|
||||
# 4. Job Details
|
||||
# ----------------------
|
||||
details_payload=$(jq -n \
|
||||
--arg fn "job_details" \
|
||||
--arg un "$API_USER" \
|
||||
--arg tk "$TOKEN" \
|
||||
--arg jid "$JOB_ID" \
|
||||
'{function: $fn, user_name: $un, token: $tk, job_id: $jid}')
|
||||
|
||||
perform_test "job_details" "$details_payload"
|
||||
|
||||
# 5. Job Edit (Stop Job)
|
||||
# ----------------------
|
||||
edit_payload=$(jq -n \
|
||||
--arg fn "job_edit" \
|
||||
--arg un "$API_USER" \
|
||||
--arg tk "$TOKEN" \
|
||||
--arg jid "$JOB_ID" \
|
||||
--arg st "Stopped" \
|
||||
--arg dt "2025-12-31T23:59:59" \
|
||||
'{function: $fn, user_name: $un, token: $tk, job_id: $jid, job_status: $st, date_to: $dt}')
|
||||
|
||||
perform_test "job_edit" "$edit_payload"
|
||||
|
||||
# 6. Available Devices (two versions, with direct SQL and via GetProximityList)
|
||||
# ----------------------
|
||||
avail_payload=$(jq -n \
|
||||
--arg fn "job_available_devices" \
|
||||
--arg un "$API_USER" \
|
||||
--arg tk "$TOKEN" \
|
||||
'{function: $fn, user_name: $un, token: $tk}')
|
||||
|
||||
perform_test "job_available_devices" "$avail_payload"
|
||||
|
||||
# 6. Available Devices (Alternative Test using job_available_devices2, which is using GetProximityList)
|
||||
avail_payload=$(jq -n \
|
||||
--arg fn "job_available_devices2" \
|
||||
--arg un "$API_USER" \
|
||||
--arg tk "$TOKEN" \
|
||||
'{function: $fn, user_name: $un, token: $tk}')
|
||||
|
||||
perform_test "job_available_devices2" "$avail_payload"
|
||||
|
||||
# 6. Available Devices(made reusing job_user_all_devices2, adding filter and search)
|
||||
avail_payload=$(jq -n \
|
||||
--arg fn "job_devices" \
|
||||
--arg un "$API_USER" \
|
||||
--arg tk "$TOKEN" \
|
||||
--arg fl "all" \
|
||||
--arg sch "" \
|
||||
'{function: $fn, user_name: $un, token: $tk, filter: $fl, search: $sch}')
|
||||
|
||||
perform_test "job_devices" "$avail_payload"
|
||||
|
||||
# 7. Job Weather
|
||||
# ----------------------
|
||||
weather_payload=$(jq -n \
|
||||
--arg fn "job_weather" \
|
||||
--arg un "$API_USER" \
|
||||
--arg tk "$TOKEN" \
|
||||
--arg jid "$JOB_ID" \
|
||||
'{function: $fn, user_name: $un, token: $tk, job_id: $jid}')
|
||||
|
||||
perform_test "job_weather" "$weather_payload"
|
||||
|
||||
# 8. Job Sensor Bucketed Data (New Test)
|
||||
# ----------------------
|
||||
# Using dynamic dates for the test
|
||||
DATE_FROM=$(date +%Y-%m-%d)
|
||||
DATE_TO=$(date +%Y-%m-%d)
|
||||
|
||||
sensor_payload=$(jq -n \
|
||||
--arg fn "get_job_sensor_bucketed_data" \
|
||||
--arg un "$API_USER" \
|
||||
--arg tk "$TOKEN" \
|
||||
--arg jid "$JOB_ID" \
|
||||
--arg sens "temperature" \
|
||||
--arg dt "$DATE_FROM" \
|
||||
--arg dtt "$DATE_TO" \
|
||||
--arg bs "15m" \
|
||||
'{function: $fn, user_name: $un, token: $tk, job_id: $jid, sensor: $sens, date: $dt, to_date: $dtt, bucket_size: $bs}')
|
||||
|
||||
perform_test "get_job_sensor_bucketed_data" "$sensor_payload"
|
||||
|
||||
# ==============================================================================
|
||||
# Cleanup
|
||||
# ==============================================================================
|
||||
print_header "Cleanup"
|
||||
|
||||
if [ -n "$JOB_ID" ]; then
|
||||
echo "-> Deleting Job ID: $JOB_ID from database..."
|
||||
|
||||
# Use PGPASSWORD for non-interactive auth if set
|
||||
export PGPASSWORD="${DB_PASSWORD}"
|
||||
|
||||
psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "DELETE FROM public.jobs WHERE job_id = $JOB_ID;" > /dev/null 2>&1
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
echo -e "${GREEN}Cleanup successful. Database restored.${NC}"
|
||||
else
|
||||
echo -e "${RED}Cleanup failed. Please manually delete job_id $JOB_ID from public.jobs.${NC}"
|
||||
echo "Command attempted: psql -h $DB_HOST -U $DB_USER -d $DB_NAME -c \"DELETE FROM public.jobs WHERE job_id = $JOB_ID;\""
|
||||
fi
|
||||
else
|
||||
echo "No Job ID created, skipping cleanup."
|
||||
fi
|
||||
|
||||
echo -e "\n${BLUE}=== Test Suite Finished ===${NC}"
|
||||
# ==============================================================================
|
||||
# well-api.py modifications to support WellDrySense API on port 1998
|
||||
# ==============================================================================
|
||||
2329
well-api.py
2329
well-api.py
File diff suppressed because it is too large
Load Diff
545
wellDbQuery.py
Normal file
545
wellDbQuery.py
Normal file
@ -0,0 +1,545 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import sys
|
||||
import csv
|
||||
import json
|
||||
import zipfile
|
||||
import argparse
|
||||
import datetime
|
||||
import logging
|
||||
import ast
|
||||
import io
|
||||
from collections import defaultdict
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Try to import psycopg2
|
||||
try:
|
||||
import psycopg2
|
||||
from psycopg2.extras import RealDictCursor
|
||||
except ImportError:
|
||||
print("Error: psycopg2 module not found. Please install it: pip install psycopg2-binary")
|
||||
sys.exit(1)
|
||||
|
||||
# ==========================================
|
||||
# Configuration & Defaults
|
||||
# ==========================================
|
||||
|
||||
load_dotenv()
|
||||
|
||||
DEFAULTS = {
|
||||
'DB_NAME': os.getenv('DB_NAME', 'wellnuo'),
|
||||
'DB_USER': os.getenv('DB_USER', 'well_app'),
|
||||
'DB_PASS': os.getenv('DB_PASSWORD', 'well_app_2024'),
|
||||
'DB_HOST': os.getenv('DB_HOST', '192.168.68.70'),
|
||||
'DB_PORT': os.getenv('DB_PORT', '5432'),
|
||||
'OUT_FILE': "out.zip",
|
||||
'RADAR_PART': "s28",
|
||||
'GROUP_BY': "by_minute"
|
||||
}
|
||||
|
||||
# Custom Logging Levels
|
||||
LOG_INFO = 20
|
||||
LOG_STEPS = 15 # Level 1 (-d 1)
|
||||
LOG_DATA = 12 # Level 2 (-d 2)
|
||||
LOG_SQL = 5 # Level 3 (-d 3)
|
||||
|
||||
logging.addLevelName(LOG_STEPS, "STEPS")
|
||||
logging.addLevelName(LOG_DATA, "DATA")
|
||||
logging.addLevelName(LOG_SQL, "SQL")
|
||||
|
||||
logger = logging.getLogger("wellDbQuery")
|
||||
handler = logging.StreamHandler(sys.stdout)
|
||||
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
# ==========================================
|
||||
# Database Abstraction
|
||||
# ==========================================
|
||||
|
||||
class Database:
|
||||
def __init__(self, args):
|
||||
self.conn_params = {
|
||||
"host": args.db_host,
|
||||
"port": args.db_port,
|
||||
"dbname": args.db_name,
|
||||
"user": args.db_username,
|
||||
"password": args.db_password
|
||||
}
|
||||
self.conn = None
|
||||
|
||||
def connect(self):
|
||||
if self.conn is None or self.conn.closed:
|
||||
try:
|
||||
logger.log(LOG_STEPS, f"Connecting to database {self.conn_params['host']}...")
|
||||
self.conn = psycopg2.connect(**self.conn_params)
|
||||
except Exception as e:
|
||||
logger.error(f"Database connection failed: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
def close(self):
|
||||
if self.conn:
|
||||
self.conn.close()
|
||||
logger.log(LOG_STEPS, "Database connection closed.")
|
||||
|
||||
def execute(self, query, params=None):
|
||||
self.connect()
|
||||
try:
|
||||
with self.conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
logger.log(LOG_SQL, f"EXECUTING SQL: {query}")
|
||||
if params:
|
||||
logger.log(LOG_SQL, f"PARAMS: {params}")
|
||||
|
||||
cur.execute(query, params)
|
||||
|
||||
# If query returns rows
|
||||
if cur.description:
|
||||
return cur.fetchall()
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Query execution failed: {e}")
|
||||
if logger.level <= LOG_SQL:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
# ==========================================
|
||||
# Data Processing Logic
|
||||
# ==========================================
|
||||
|
||||
class DataProcessor:
|
||||
"""
|
||||
Handles the complexity of unwrapping 'mtype' sensor readings
|
||||
and merging them with radar data.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def get_bucket_interval(group_by):
|
||||
if group_by == 'by_10_seconds':
|
||||
return '10 seconds'
|
||||
elif group_by == 'by_hour':
|
||||
return '1 hour'
|
||||
elif group_by == 'by_10_minute':
|
||||
return '10 minutes'
|
||||
return '1 minute'
|
||||
|
||||
@staticmethod
|
||||
def build_fetch_query(device_ids, start_time, end_time, radar_part, all_radar, group_by):
|
||||
"""
|
||||
Builds a query that fetches raw rows. We perform the pivoting in Python
|
||||
to dynamically handle the sparse s10-s79 columns.
|
||||
"""
|
||||
device_list_str = ",".join(map(str, device_ids))
|
||||
bucket_interval = DataProcessor.get_bucket_interval(group_by)
|
||||
|
||||
# Radar Selection Logic
|
||||
if all_radar:
|
||||
radar_select = """
|
||||
MAX(absent) AS radar_absent, MAX(moving) AS radar_moving, MAX(stationary) AS radar_stationary, MAX("both") AS radar_both,
|
||||
MAX(m0) AS m0, MAX(m1) AS m1, MAX(m2) AS m2, MAX(m3) AS m3, MAX(m4) AS m4, MAX(m5) AS m5, MAX(m6) AS m6, MAX(m7) AS m7, MAX(m8) AS m8,
|
||||
MAX(s2) AS radar_s2, MAX(s3) AS radar_s3, MAX(s4) AS radar_s4, MAX(s5) AS radar_s5, MAX(s6) AS radar_s6, MAX(s7) AS radar_s7, MAX(s8) AS radar_s8
|
||||
"""
|
||||
radar_outer = """
|
||||
rr.radar_absent, rr.radar_moving, rr.radar_stationary, rr.radar_both,
|
||||
rr.m0, rr.m1, rr.m2, rr.m3, rr.m4, rr.m5, rr.m6, rr.m7, rr.m8,
|
||||
rr.radar_s2, rr.radar_s3, rr.radar_s4, rr.radar_s5, rr.radar_s6, rr.radar_s7, rr.radar_s8
|
||||
"""
|
||||
else:
|
||||
radar_expr = radar_part
|
||||
if radar_expr == "s28":
|
||||
radar_expr = "(s2+s3+s4+s5+s6+s7+s8)/7"
|
||||
radar_select = f"MAX({radar_expr}) AS radar"
|
||||
radar_outer = "rr.radar"
|
||||
|
||||
# We fetch s0-s9 and mtype. Python will map these to s0-s79.
|
||||
# We use MAX/MIN aggregation to flatten duplicates within the time bucket if any exist
|
||||
# though typically mtypes are distinct rows.
|
||||
sql = f"""
|
||||
SELECT
|
||||
COALESCE(sr.minute, rr.minute) as time,
|
||||
COALESCE(sr.device_id, rr.device_id) as device_id,
|
||||
sr.mtype,
|
||||
sr.temperature, sr.humidity, sr.pressure, sr.light,
|
||||
sr.s0, sr.s1, sr.s2, sr.s3, sr.s4, sr.s5, sr.s6, sr.s7, sr.s8, sr.s9,
|
||||
{radar_outer}
|
||||
FROM (
|
||||
SELECT
|
||||
time_bucket('{bucket_interval}', time) AS minute,
|
||||
device_id,
|
||||
mtype,
|
||||
AVG(temperature) AS temperature,
|
||||
AVG(humidity) AS humidity,
|
||||
AVG(pressure) AS pressure,
|
||||
MAX(light) AS light,
|
||||
MIN(s0) as s0, MIN(s1) as s1, MIN(s2) as s2, MIN(s3) as s3, MIN(s4) as s4,
|
||||
MIN(s5) as s5, MIN(s6) as s6, MIN(s7) as s7, MIN(s8) as s8, MIN(s9) as s9
|
||||
FROM sensor_readings
|
||||
WHERE device_id IN ({device_list_str})
|
||||
AND time >= '{start_time}'
|
||||
AND time < '{end_time}'
|
||||
GROUP BY minute, device_id, mtype
|
||||
) sr
|
||||
FULL OUTER JOIN (
|
||||
SELECT
|
||||
time_bucket('{bucket_interval}', time) AS minute,
|
||||
device_id,
|
||||
{radar_select}
|
||||
FROM radar_readings
|
||||
WHERE device_id IN ({device_list_str})
|
||||
AND time >= '{start_time}'
|
||||
AND time < '{end_time}'
|
||||
GROUP BY minute, device_id
|
||||
) rr
|
||||
ON sr.minute = rr.minute AND sr.device_id = rr.device_id
|
||||
ORDER BY time ASC, device_id ASC;
|
||||
"""
|
||||
return sql
|
||||
|
||||
@staticmethod
|
||||
def process_rows(rows):
|
||||
"""
|
||||
Pivots the rows.
|
||||
Input: Multiple rows per timestamp (one for each mtype).
|
||||
Output: Single row per timestamp with columns s0...s79 populated dynamically.
|
||||
"""
|
||||
if not rows:
|
||||
return [], []
|
||||
|
||||
# Dictionary to hold merged records: key = (time, device_id)
|
||||
merged_data = defaultdict(dict)
|
||||
|
||||
# Track which sensor columns actually have data to optimize CSV headers
|
||||
active_s_columns = set()
|
||||
|
||||
# Base headers that are always present
|
||||
base_headers = ['time', 'device_id', 'temperature', 'humidity', 'pressure', 'light']
|
||||
|
||||
# Radar headers depend on query, extract them from the first row excluding known sensor/base cols
|
||||
first_row_keys = list(rows[0].keys())
|
||||
radar_headers = [k for k in first_row_keys if k not in base_headers and k not in ['mtype'] and not (k.startswith('s') and k[1:].isdigit())]
|
||||
|
||||
for row in rows:
|
||||
key = (row['time'], row['device_id'])
|
||||
|
||||
# Initialize base data if not present
|
||||
if key not in merged_data:
|
||||
for h in base_headers:
|
||||
merged_data[key][h] = row.get(h)
|
||||
for h in radar_headers:
|
||||
merged_data[key][h] = row.get(h)
|
||||
|
||||
# Merge Base Sensor Data (Temp/Hum/etc) if current row has it and stored is None
|
||||
# (This handles FULL OUTER JOIN nulls)
|
||||
if row.get('temperature') is not None:
|
||||
for h in base_headers[2:]: # Skip time/id
|
||||
merged_data[key][h] = row.get(h)
|
||||
|
||||
# Merge Radar Data
|
||||
if any(row.get(h) is not None for h in radar_headers):
|
||||
for h in radar_headers:
|
||||
if row.get(h) is not None:
|
||||
merged_data[key][h] = row.get(h)
|
||||
|
||||
# Process Extended Sensors (s0-s79) based on mtype
|
||||
mtype = row.get('mtype')
|
||||
|
||||
# Logic from well-api.py:
|
||||
# mtype 0, 17, 100 -> s0-s9
|
||||
# mtype 110 -> s10-s19 ... mtype 170 -> s70-s79
|
||||
|
||||
if mtype is not None:
|
||||
base_offset = 0
|
||||
if mtype in [0, 17, 100]:
|
||||
base_offset = 0
|
||||
elif 110 <= mtype <= 170:
|
||||
base_offset = mtype - 100
|
||||
else:
|
||||
# Unknown mtype, skip sensor mapping or log debug
|
||||
continue
|
||||
|
||||
for i in range(10):
|
||||
val = row.get(f's{i}')
|
||||
if val is not None:
|
||||
target_idx = base_offset + i
|
||||
col_name = f's{target_idx}'
|
||||
merged_data[key][col_name] = val
|
||||
active_s_columns.add(target_idx)
|
||||
|
||||
# Sort active sensor columns numerically
|
||||
sorted_s_cols = [f's{i}' for i in sorted(list(active_s_columns))]
|
||||
|
||||
# Final Header List
|
||||
final_headers = base_headers + radar_headers + sorted_s_cols
|
||||
|
||||
# Flatten dictionary to list of dicts
|
||||
final_rows = []
|
||||
for key in sorted(merged_data.keys()): # Sort by time, device_id
|
||||
row_data = merged_data[key]
|
||||
# Ensure all columns exist in dict for CSV writer
|
||||
for col in final_headers:
|
||||
if col not in row_data:
|
||||
row_data[col] = None
|
||||
final_rows.append(row_data)
|
||||
|
||||
return final_rows, final_headers
|
||||
|
||||
# ==========================================
|
||||
# Main Application Logic
|
||||
# ==========================================
|
||||
|
||||
class WellExporter:
|
||||
def __init__(self, args):
|
||||
self.args = args
|
||||
self.db = Database(args)
|
||||
self.target_device_ids = []
|
||||
self.file_identifier = "unknown" # Used for filename prefix
|
||||
|
||||
def resolve_devices(self):
|
||||
"""Resolves command line arguments into a list of internal device_ids."""
|
||||
ids = set()
|
||||
|
||||
# 1. Explicit Device ID
|
||||
if self.args.device_id:
|
||||
ids.add(int(self.args.device_id))
|
||||
self.file_identifier = str(self.args.device_id)
|
||||
logger.log(LOG_STEPS, f"Resolved Device ID: {self.args.device_id}")
|
||||
|
||||
# 2. Well ID
|
||||
if self.args.well_id:
|
||||
rows = self.db.execute("SELECT device_id FROM public.devices WHERE well_id = %s", (self.args.well_id,))
|
||||
if rows:
|
||||
ids.add(rows[0]['device_id'])
|
||||
self.file_identifier = str(self.args.well_id)
|
||||
logger.log(LOG_STEPS, f"Resolved Well ID {self.args.well_id} -> Device ID {rows[0]['device_id']}")
|
||||
else:
|
||||
logger.warning(f"Well ID {self.args.well_id} not found.")
|
||||
|
||||
# 3. MAC Address
|
||||
if self.args.mac:
|
||||
rows = self.db.execute("SELECT device_id FROM public.devices WHERE device_mac = %s", (self.args.mac,))
|
||||
if rows:
|
||||
ids.add(rows[0]['device_id'])
|
||||
self.file_identifier = self.args.mac
|
||||
logger.log(LOG_STEPS, f"Resolved MAC {self.args.mac} -> Device ID {rows[0]['device_id']}")
|
||||
else:
|
||||
logger.warning(f"MAC {self.args.mac} not found.")
|
||||
|
||||
# 4. Deployment ID
|
||||
if self.args.deployment_id:
|
||||
self.file_identifier = str(self.args.deployment_id)
|
||||
logger.log(LOG_STEPS, f"Resolving devices for Deployment ID: {self.args.deployment_id}")
|
||||
rows = self.db.execute("SELECT devices FROM public.deployment_details WHERE deployment_id = %s", (self.args.deployment_id,))
|
||||
|
||||
if rows and rows[0]['devices']:
|
||||
raw_devices = rows[0]['devices']
|
||||
macs_to_find = []
|
||||
|
||||
try:
|
||||
# Handle various formats stored in DB (JSON list, string representation, nested lists)
|
||||
# Format seen in well-api: '["MAC1", "MAC2"]' or '[[id,id,loc,desc,MAC],...]'
|
||||
try:
|
||||
device_data = json.loads(raw_devices)
|
||||
except json.JSONDecodeError:
|
||||
try:
|
||||
device_data = ast.literal_eval(raw_devices)
|
||||
except:
|
||||
# Fallback for simple comma separated string
|
||||
device_data = [d.strip() for d in raw_devices.strip('[]"\'').split(',')]
|
||||
|
||||
for item in device_data:
|
||||
# Format: [[id, id, "Loc", "Desc", "MAC"], ...]
|
||||
if isinstance(item, list) and len(item) > 4:
|
||||
macs_to_find.append(item[4])
|
||||
# Format: ["MAC1", "MAC2"]
|
||||
elif isinstance(item, str):
|
||||
# Clean potential quotes
|
||||
clean_mac = item.strip().replace('"', '').replace("'", "")
|
||||
if len(clean_mac) in [12, 17]:
|
||||
macs_to_find.append(clean_mac)
|
||||
|
||||
if macs_to_find:
|
||||
placeholders = ','.join(['%s'] * len(macs_to_find))
|
||||
sql = f"SELECT device_id FROM public.devices WHERE device_mac IN ({placeholders})"
|
||||
d_rows = self.db.execute(sql, tuple(macs_to_find))
|
||||
if d_rows:
|
||||
for r in d_rows:
|
||||
ids.add(r['device_id'])
|
||||
logger.info(f"Found {len(d_rows)} devices in deployment {self.args.deployment_id}")
|
||||
else:
|
||||
logger.warning("No matching devices found in DB for the MACs in deployment.")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to parse deployment devices string: {e}")
|
||||
else:
|
||||
logger.warning(f"Deployment {self.args.deployment_id} not found or empty.")
|
||||
|
||||
self.target_device_ids = sorted(list(ids))
|
||||
if not self.target_device_ids:
|
||||
logger.error("No valid devices found based on input parameters.")
|
||||
sys.exit(1)
|
||||
|
||||
logger.log(LOG_DATA, f"Target Device IDs: {self.target_device_ids}")
|
||||
|
||||
def run(self):
|
||||
# 1. Setup
|
||||
self.resolve_devices()
|
||||
|
||||
# 2. Parse Dates
|
||||
try:
|
||||
start_date = datetime.datetime.strptime(self.args.date_from, "%Y-%m-%d")
|
||||
end_date = datetime.datetime.strptime(self.args.date_to, "%Y-%m-%d")
|
||||
except ValueError:
|
||||
logger.error("Invalid date format. Use YYYY-MM-DD")
|
||||
sys.exit(1)
|
||||
|
||||
if start_date > end_date:
|
||||
start_date, end_date = end_date, start_date
|
||||
|
||||
# 3. Open Zip File
|
||||
try:
|
||||
logger.info(f"Creating output file: {self.args.outFile}")
|
||||
zip_buffer = zipfile.ZipFile(self.args.outFile, 'w', zipfile.ZIP_DEFLATED)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create zip file: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
# 4. Iterate Days
|
||||
current_date = start_date
|
||||
total_rows_exported = 0
|
||||
|
||||
try:
|
||||
while current_date <= end_date:
|
||||
day_str = current_date.strftime("%Y-%m-%d")
|
||||
|
||||
# Define 24h window (UTC assumed based on schema usage in well-api)
|
||||
t_start = f"{day_str} 00:00:00"
|
||||
t_end = (current_date + datetime.timedelta(days=1)).strftime("%Y-%m-%d 00:00:00")
|
||||
|
||||
logger.info(f"Processing date: {day_str}...")
|
||||
|
||||
# Build Query
|
||||
sql = DataProcessor.build_fetch_query(
|
||||
self.target_device_ids,
|
||||
t_start,
|
||||
t_end,
|
||||
self.args.radar_part,
|
||||
self.args.allRadar,
|
||||
self.args.group_by
|
||||
)
|
||||
|
||||
# Execute
|
||||
raw_rows = self.db.execute(sql)
|
||||
|
||||
if raw_rows:
|
||||
logger.log(LOG_DATA, f" -> Fetched {len(raw_rows)} raw rows (including mtype splits).")
|
||||
|
||||
# Process / Pivot Data
|
||||
processed_rows, headers = DataProcessor.process_rows(raw_rows)
|
||||
|
||||
count = len(processed_rows)
|
||||
total_rows_exported += count
|
||||
logger.log(LOG_STEPS, f" -> Processed into {count} unique time records.")
|
||||
|
||||
# Generate CSV in memory
|
||||
csv_buffer = io.StringIO()
|
||||
writer = csv.DictWriter(csv_buffer, fieldnames=headers)
|
||||
writer.writeheader()
|
||||
writer.writerows(processed_rows)
|
||||
|
||||
# Add to Zip
|
||||
# Format: {ID}_{DATE}_{GROUP}_rc_data.csv
|
||||
csv_filename = f"{self.file_identifier}_{day_str}_{self.args.group_by}_rc_data.csv"
|
||||
zip_buffer.writestr(csv_filename, csv_buffer.getvalue())
|
||||
logger.log(LOG_STEPS, f" -> Added {csv_filename} to zip.")
|
||||
else:
|
||||
logger.log(LOG_STEPS, " -> No data found for this date.")
|
||||
|
||||
current_date += datetime.timedelta(days=1)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
logger.warning("Operation cancelled by user.")
|
||||
except Exception as e:
|
||||
logger.error(f"An error occurred during processing: {e}")
|
||||
if self.args.debug > 0:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
zip_buffer.close()
|
||||
self.db.close()
|
||||
|
||||
logger.info(f"Export complete. Total records: {total_rows_exported}. File: {self.args.outFile}")
|
||||
|
||||
# ==========================================
|
||||
# Argument Parsing
|
||||
# ==========================================
|
||||
|
||||
def parse_arguments():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Wellnuo Database Export Tool",
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
epilog="""Examples:
|
||||
# Query by Device ID
|
||||
wellDbQuery.py --device_id 560 --date_from 2025-03-09 --date_to 2025-04-22 --outFile c.zip
|
||||
|
||||
# Query by Deployment ID (all devices in deployment)
|
||||
wellDbQuery.py --deployment_id 21 --date_from 2025-06-01 --date_to 2025-06-01 --outFile deployment_21.zip
|
||||
|
||||
# Query by MAC with all radar columns and high debug
|
||||
wellDbQuery.py --mac 64B70888FAB0 --date_from 2025-01-01 --date_to 2025-01-01 --allRadar -d 2
|
||||
"""
|
||||
)
|
||||
|
||||
# Selection Group (Mutually Exclusive logic handled in code)
|
||||
sel = parser.add_argument_group('Target Selection (One required)')
|
||||
sel.add_argument('--device_id', '-di', type=int, help='Target Device ID (internal DB id)')
|
||||
sel.add_argument('--well_id', '-wi', type=int, help='Target Well ID (external id)')
|
||||
sel.add_argument('--mac', '-m', type=str, help='Target Device MAC Address')
|
||||
sel.add_argument('--deployment_id', '-depid', type=int, help='Target Deployment ID (fetches all devices in deployment)')
|
||||
|
||||
# Date Group
|
||||
date = parser.add_argument_group('Date Range')
|
||||
date.add_argument('--date_from', '-df', type=str, required=True, help='Start Date (YYYY-MM-DD)')
|
||||
date.add_argument('--date_to', '-dt', type=str, required=True, help='End Date (YYYY-MM-DD)')
|
||||
|
||||
# DB Config
|
||||
db = parser.add_argument_group('Database Configuration')
|
||||
db.add_argument('--db_name' , default=DEFAULTS['DB_NAME'], help=f"Default: {DEFAULTS['DB_NAME']}")
|
||||
db.add_argument('--db_username' , default=DEFAULTS['DB_USER'], help=f"Default: {DEFAULTS['DB_USER']}")
|
||||
db.add_argument('--db_password' , default=DEFAULTS['DB_PASS'], help="Default: from .env")
|
||||
db.add_argument('--db_host' , default=DEFAULTS['DB_HOST'], help=f"Default: {DEFAULTS['DB_HOST']}")
|
||||
db.add_argument('--db_port' , default=DEFAULTS['DB_PORT'], help=f"Default: {DEFAULTS['DB_PORT']}")
|
||||
|
||||
# Options
|
||||
opts = parser.add_argument_group('Export Options')
|
||||
opts.add_argument('--outFile', '-o' , default=DEFAULTS['OUT_FILE'], help=f"Output ZIP filename (default: {DEFAULTS['OUT_FILE']})")
|
||||
opts.add_argument('--radar_part', '-radar', default=DEFAULTS['RADAR_PART'], help=f"Radar column expression (default: {DEFAULTS['RADAR_PART']})")
|
||||
opts.add_argument('--allRadar', '-allr', action='store_true', help="Retrieve all raw radar columns instead of calculated part")
|
||||
opts.add_argument('--group_by', '-g', default=DEFAULTS['GROUP_BY'], choices=['by_minute', 'by_10_seconds', 'by_hour', 'by_10_minute'], help="Time aggregation bucket")
|
||||
opts.add_argument('-d', '--debug', type=int, default=0, choices=[0, 1, 2, 3], help="Debug level: 0=Info, 1=Steps, 2=Data, 3=SQL")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if not any([args.device_id, args.well_id, args.mac, args.deployment_id]):
|
||||
parser.error("You must provide one of --device_id, --well_id, --mac, or --deployment_id")
|
||||
|
||||
return args
|
||||
|
||||
def setup_logging_level(level):
|
||||
if level == 0:
|
||||
logger.setLevel(logging.INFO)
|
||||
elif level == 1:
|
||||
logger.setLevel(LOG_STEPS)
|
||||
elif level == 2:
|
||||
logger.setLevel(LOG_DATA)
|
||||
elif level == 3:
|
||||
logger.setLevel(LOG_SQL)
|
||||
|
||||
# ==========================================
|
||||
# Entry Point
|
||||
# ==========================================
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_arguments()
|
||||
setup_logging_level(args.debug)
|
||||
|
||||
exporter = WellExporter(args)
|
||||
exporter.run()
|
||||
@ -4676,9 +4676,9 @@ async function DeploymentChange() {
|
||||
}
|
||||
|
||||
}
|
||||
groups = "0"; // Document.getElementById("group_id").value;
|
||||
deployments = "0"; // Document.getElementById("Deployments").value;
|
||||
locations = "0"; // Document.getElementById("Locations").value;
|
||||
groups = "0"; // document.getElementById("group_id").value;
|
||||
deployments = document.getElementById("Deployments").value;
|
||||
locations = "0"; // document.getElementById("Locations").value;
|
||||
RequestFilteredDevices(groups, deployments, locations, fresh);
|
||||
ShowHistoryMap();
|
||||
//PrepareChart(devices_count); //that is called inside ShowHistoryMap
|
||||
|
||||
111
welldrysense_job_db-update.sql
Normal file
111
welldrysense_job_db-update.sql
Normal file
@ -0,0 +1,111 @@
|
||||
-- =============================================================================
|
||||
-- SQL Changes for WellDrySense Product
|
||||
-- Description: Creates a dedicated 'jobs' table for the Water Damage Mitigation
|
||||
-- product without altering any existing tables. This script is
|
||||
-- idempotent and safe to run multiple times.
|
||||
-- =============================================================================
|
||||
|
||||
-- 1. Create the new 'jobs' table for the Water Damage Mitigation product.
|
||||
CREATE TABLE IF NOT EXISTS public.jobs (
|
||||
job_id SERIAL PRIMARY KEY,
|
||||
customer_name TEXT,
|
||||
mitigation_person_id INTEGER REFERENCES public.person_details(user_id) ON DELETE SET NULL,
|
||||
key_person_name TEXT,
|
||||
key_person_mobile TEXT,
|
||||
key_person_email TEXT,
|
||||
address_street TEXT,
|
||||
address_city TEXT,
|
||||
address_zip TEXT,
|
||||
address_state TEXT,
|
||||
address_country TEXT,
|
||||
lat REAL,
|
||||
lng REAL,
|
||||
date_from TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
date_to TIMESTAMP WITH TIME ZONE,
|
||||
job_status TEXT DEFAULT 'Active' NOT NULL, -- Can be 'Active', 'Stopped', 'Archived'
|
||||
devices JSONB, -- Stores an array of device objects, e.g., [{"mac": "...", "location": "..."}]
|
||||
alerts_config JSONB,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
user_edit INTEGER REFERENCES public.person_details(user_id) ON DELETE SET NULL
|
||||
);
|
||||
|
||||
-- Add comments for clarity
|
||||
COMMENT ON TABLE public.jobs IS 'Stores job information for the WellDrySense water damage mitigation product.';
|
||||
COMMENT ON COLUMN public.jobs.devices IS 'A JSON array of device objects assigned to this job. Structure: [{"mac": "AA:BB:CC:DD:EE:FF", "location": "Kitchen Under Sink"}, ...].';
|
||||
COMMENT ON COLUMN public.jobs.job_status IS 'The current lifecycle state of the job: Active, Stopped, or Archived.';
|
||||
COMMENT ON COLUMN public.jobs.mitigation_person_id IS 'The user from person_details responsible for this job.';
|
||||
|
||||
-- 2. Add indexes for efficient querying.
|
||||
CREATE INDEX IF NOT EXISTS idx_jobs_job_status ON public.jobs(job_status);
|
||||
CREATE INDEX IF NOT EXISTS idx_jobs_mitigation_person_id ON public.jobs(mitigation_person_id);
|
||||
|
||||
-- 3. Add a GIN index for efficient searching within the 'devices' JSONB column.
|
||||
-- This is crucial for the job_available_devices API.
|
||||
CREATE INDEX IF NOT EXISTS idx_jobs_devices_gin ON public.jobs USING GIN (devices);
|
||||
|
||||
-- 4. Grant necessary permissions to the application user.
|
||||
GRANT ALL ON TABLE public.jobs TO well_app;
|
||||
GRANT USAGE, SELECT ON SEQUENCE jobs_job_id_seq TO well_app;
|
||||
|
||||
-- --- End of Script ---
|
||||
|
||||
-- -- =============================================================================
|
||||
-- -- SQL Changes for WellDrySense Product (Version 1.1 - Idempotent)
|
||||
-- -- File: drysense_db_update.sql
|
||||
-- -- Description: Creates and configures the 'jobs' table for the DrySense product.
|
||||
-- -- =============================================================================
|
||||
|
||||
-- -- 1. Create the 'jobs' table if it does not already exist.
|
||||
-- CREATE TABLE IF NOT EXISTS public.jobs (
|
||||
-- job_id SERIAL PRIMARY KEY,
|
||||
-- customer_name TEXT,
|
||||
-- mitigation_person_id INTEGER REFERENCES public.person_details(user_id),
|
||||
-- key_person_name TEXT,
|
||||
-- key_person_mobile TEXT,
|
||||
-- key_person_email TEXT,
|
||||
-- address_street TEXT,
|
||||
-- address_city TEXT,
|
||||
-- address_zip TEXT,
|
||||
-- address_state TEXT,
|
||||
-- address_country TEXT,
|
||||
-- lat REAL,
|
||||
-- lng REAL,
|
||||
-- date_from TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
-- date_to TIMESTAMP WITH TIME ZONE,
|
||||
-- job_status TEXT DEFAULT 'Active' NOT NULL, -- e.g., 'Active', 'Stopped', 'Archived'
|
||||
-- devices TEXT, -- Storing as a JSON string of device MACs
|
||||
-- alerts_config JSONB, -- Store alert settings as a JSON object
|
||||
-- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
-- user_edit INTEGER REFERENCES public.person_details(user_id)
|
||||
-- );
|
||||
|
||||
-- -- Add comments to the table and columns for documentation purposes.
|
||||
-- -- These commands are safe to re-run.
|
||||
-- COMMENT ON TABLE public.jobs IS 'Stores job information for the WellDrySense water damage mitigation product.';
|
||||
-- COMMENT ON COLUMN public.jobs.customer_name IS 'The name of the client for whom the job is being done.';
|
||||
-- COMMENT ON COLUMN public.jobs.mitigation_person_id IS 'The user (from person_details) responsible for the job.';
|
||||
-- COMMENT ON COLUMN public.jobs.key_person_name IS 'The name of the primary contact person at the client site.';
|
||||
-- COMMENT ON COLUMN public.jobs.job_status IS 'Lifecycle status of the job: Active, Stopped, Archived.';
|
||||
-- COMMENT ON COLUMN public.jobs.date_to IS 'The date the job was stopped or archived.';
|
||||
-- COMMENT ON COLUMN public.jobs.devices IS 'A JSON array of device MAC addresses assigned to this job.';
|
||||
-- COMMENT ON COLUMN public.jobs.alerts_config IS 'JSON object storing alert thresholds, e.g., {"temp_abs_high": 30, "hum_rel_above": 15}.';
|
||||
|
||||
-- -- 2. Add an index for performance if it does not already exist.
|
||||
-- CREATE INDEX IF NOT EXISTS idx_jobs_job_status ON public.jobs(job_status);
|
||||
|
||||
-- -- 3. Rename the 'description' column to 'location_name' in the 'devices' table if it exists.
|
||||
-- -- This DO block ensures the ALTER command only runs if the 'description' column exists.
|
||||
-- DO $$
|
||||
-- BEGIN
|
||||
-- IF EXISTS(SELECT 1 FROM information_schema.columns WHERE table_name='devices' AND column_name='description') THEN
|
||||
-- ALTER TABLE public.devices RENAME COLUMN description TO location_name;
|
||||
-- COMMENT ON COLUMN public.devices.location_name IS 'User-defined name for the specific location of the device on a job site (e.g., Kitchen Under Sink).';
|
||||
-- END IF;
|
||||
-- END $$;
|
||||
|
||||
-- -- 4. Grant necessary permissions to the application user 'well_app'.
|
||||
-- -- These commands are safe to re-run.
|
||||
-- GRANT ALL ON TABLE public.jobs TO well_app;
|
||||
-- GRANT USAGE, SELECT ON SEQUENCE jobs_job_id_seq TO well_app;
|
||||
|
||||
-- -- --- End of Script ---
|
||||
Loading…
x
Reference in New Issue
Block a user