Documentation Index Fetch the complete documentation index at: https://hyrex.io/docs/llms.txt
Use this file to discover all available pages before exploring further.
Monitor your Hyrex applications and implement scaling strategies for optimal performance.
Monitoring
Cloud Dashboard Access production metrics at hyrex.io/cloud :
Task metrics : throughput, latency, success rates
Worker health : utilization, memory usage, uptime
Queue monitoring : depths, processing times, backlog
Error tracking : failure rates, error logs, alerts
Alerts Configure alerts in the Hyrex Cloud dashboard:
High error rates
Queue backlog warnings
Worker health issues
Capacity thresholds
Hyrex Studio Launch the monitoring dashboard: Access at https://local.hyrex.studio to monitor:
Task queue status
Worker health and performance
Task execution history
Error logs and debugging
Custom Metrics Integrate with your monitoring stack: import logging
from prometheus_client import Counter, Histogram
# Custom metrics
task_counter = Counter( 'hyrex_tasks_total' , 'Total tasks processed' , [ 'queue' , 'status' ])
task_duration = Histogram( 'hyrex_task_duration_seconds' , 'Task duration' )
@hy.task
def monitored_task ( ctx ):
with task_duration.time():
try :
# Your task logic
result = process_data(ctx)
task_counter.labels( queue = 'default' , status = 'success' ).inc()
return result
except Exception as e:
task_counter.labels( queue = 'default' , status = 'error' ).inc()
logging.error( f "Task failed: { e } " )
raise
Scaling Strategies
Queue Design
Design queues based on task characteristics:
# Separate queues by priority and resource needs
@hy.task ( queue = "critical" , max_retries = 5 )
def process_payment ( ctx ):
"""High priority, low latency tasks"""
pass
@hy.task ( queue = "batch" , timeout_seconds = 3600 )
def generate_report ( ctx ):
"""Long-running batch processing"""
pass
@hy.task ( queue = "io-heavy" , max_concurrency = 50 )
def fetch_external_data ( ctx ):
"""I/O bound tasks that can run concurrently"""
pass
@hy.task ( queue = "cpu-intensive" , max_concurrency = 4 )
def process_images ( ctx ):
"""CPU bound tasks with limited concurrency"""
pass
Worker Scaling
Scale workers based on queue patterns:
# Process all queues
hyrex run-worker app:app
# Process specific queue pattern
hyrex run-worker app:app --queue_pattern "critical-*"
# Multiple workers for different workloads
hyrex run-worker app:app --queue_pattern "email-*" --num_processes 5
hyrex run-worker app:app --queue_pattern "batch-*" --num_processes 20
hyrex run-worker app:app --queue_pattern "cpu-*" --num_processes 4
Horizontal Scaling
Kubernetes Example
apiVersion : apps/v1
kind : Deployment
metadata :
name : hyrex-worker
spec :
replicas : 3
selector :
matchLabels :
app : hyrex-worker
template :
metadata :
labels :
app : hyrex-worker
spec :
containers :
- name : worker
image : myapp:latest
env :
- name : HYREX_API_KEY
valueFrom :
secretKeyRef :
name : hyrex-secrets
key : api-key
resources :
requests :
memory : "256Mi"
cpu : "250m"
limits :
memory : "512Mi"
cpu : "500m"
Auto-scaling Based on Queue Depth
# Custom autoscaling logic
@hy.schedule ( "*/2 * * * *" ) # Every 2 minutes
def autoscale_workers ():
"""Scale workers based on queue depth"""
queue_depths = {
"critical" : hy.get_queue_depth( "critical" ),
"batch" : hy.get_queue_depth( "batch" ),
"default" : hy.get_queue_depth( "default" )
}
scaling_decisions = []
for queue, depth in queue_depths.items():
if depth > 100 :
# Scale up
scaling_decisions.append({
"queue" : queue,
"action" : "scale_up" ,
"target_workers" : min ( 10 , depth // 20 )
})
elif depth < 10 :
# Scale down
scaling_decisions.append({
"queue" : queue,
"action" : "scale_down" ,
"target_workers" : max ( 1 , depth // 5 )
})
return {
"timestamp" : time.time(),
"queue_depths" : queue_depths,
"scaling_decisions" : scaling_decisions
}
Task Optimization
# Optimize task performance
@hy.task (
queue = "optimized" ,
timeout_seconds = 60 ,
max_retries = 3 ,
retry_delay_seconds = 5
)
def optimized_task ( ctx ):
"""Well-optimized task"""
# Use connection pooling
with get_db_connection() as conn:
# Batch operations when possible
results = conn.execute_batch(queries)
# Process in chunks to manage memory
for chunk in chunked_data(ctx.data, chunk_size = 1000 ):
process_chunk(chunk)
return { "processed" : len (ctx.data)}
Resource Monitoring
import psutil
import gc
@hy.task
def resource_aware_task ( ctx ):
"""Monitor and optimize resource usage"""
# Check memory usage
memory_percent = psutil.virtual_memory().percent
if memory_percent > 80 :
gc.collect() # Force garbage collection
logging.warning( f "High memory usage: { memory_percent } %" )
# Your task logic here
result = process_data(ctx)
return {
"result" : result,
"memory_usage" : psutil.virtual_memory().percent,
"cpu_usage" : psutil.cpu_percent()
}
Next Steps
Best Practices Production checklist and maintenance
Deployment Environment setup and deployment