Documentation Index Fetch the complete documentation index at: https://hyrex.io/docs/llms.txt
Use this file to discover all available pages before exploring further.
Prepare comprehensive user context for AI assistants and LLM applications by orchestrating parallel data fetching and intelligent summarization.
Overview
This example demonstrates sophisticated context engineering patterns:
Parallel data fetching from multiple APIs
LLM-powered summarization for context compression
Orchestrated task workflows with dependency management
Bulk processing for multiple users
Context caching and optimization strategies
Perfect for building personalized AI assistants, recommendation engines, or any LLM application requiring rich user context.
Task Definitions
from hyrex import HyrexRegistry
import openai
import requests
from typing import List, Dict
hy = HyrexRegistry()
@hy.task
def fetch_user_profile ( user_id : str ) -> Dict:
"""Fetch user profile data"""
response = requests.get( f "https://api.example.com/users/ { user_id } " )
return response.json()
@hy.task
def fetch_recent_activities ( user_id : str , limit : int = 50 ) -> List[Dict]:
"""Fetch user's recent activities"""
response = requests.get(
f "https://api.example.com/users/ { user_id } /activities?limit= { limit } "
)
return response.json()
@hy.task
def fetch_user_preferences ( user_id : str ) -> Dict:
"""Fetch user preferences and settings"""
response = requests.get( f "https://api.example.com/users/ { user_id } /preferences" )
return response.json()
@hy.task
def build_context_summary ( user_id : str , profile : Dict, activities : List[Dict], preferences : Dict) -> str :
"""Use LLM to create a context summary"""
context_data = {
"profile" : profile,
"recent_activities" : activities[: 10 ], # Last 10 activities
"preferences" : preferences
}
prompt = f """
Create a concise summary of this user's context for an AI assistant:
Profile: { context_data[ 'profile' ] }
Recent Activities: { context_data[ 'recent_activities' ] }
Preferences: { context_data[ 'preferences' ] }
Focus on the most relevant information for personalized assistance.
"""
response = openai.chat.completions.create(
model = "gpt-4" ,
messages = [{ "role" : "user" , "content" : prompt}],
max_tokens = 500
)
return response.choices[ 0 ].message.content
@hy.task
def prepare_llm_context ( user_id : str ):
"""Orchestrate parallel context preparation"""
# Launch all data fetching tasks in parallel
profile_task = fetch_user_profile.send(user_id)
activities_task = fetch_recent_activities.send(user_id, 50 )
preferences_task = fetch_user_preferences.send(user_id)
# Wait for all tasks to complete
profile = profile_task.get()
activities = activities_task.get()
preferences = preferences_task.get()
# Build final context summary
context_summary = build_context_summary.send(
user_id, profile, activities, preferences
).get()
return {
"user_id" : user_id,
"context_summary" : context_summary,
"raw_data" : {
"profile" : profile,
"activities" : activities,
"preferences" : preferences
}
}
REST API Endpoints
Python (FastAPI)
TypeScript (Express)
from fastapi import FastAPI
from pydantic import BaseModel
from .tasks import prepare_llm_context
app = FastAPI()
class ContextRequest ( BaseModel ):
user_id: str
include_raw_data: bool = False
class BulkContextRequest ( BaseModel ):
user_ids: list[ str ]
include_raw_data: bool = False
@app.post ( "/context/prepare" )
async def prepare_user_context ( request : ContextRequest):
"""Prepare context for AI/LLM consumption"""
task = prepare_llm_context.send(request.user_id)
return {
"message" : "Context preparation started" ,
"task_id" : task.task_id,
"user_id" : request.user_id
}
@app.post ( "/context/bulk-prepare" )
async def prepare_bulk_contexts ( request : BulkContextRequest):
"""Prepare context for multiple users in parallel"""
task_ids = []
for user_id in request.user_ids:
task = prepare_llm_context.send(user_id)
task_ids.append({
"user_id" : user_id,
"task_id" : task.task_id
})
return {
"message" : f "Started context preparation for { len (request.user_ids) } users" ,
"tasks" : task_ids
}
@app.get ( "/context/status/ {task_id} " )
async def get_context_status ( task_id : str ):
"""Check if context preparation is complete"""
task = hy.get_task(task_id)
if task.is_complete:
return {
"status" : "complete" ,
"context_data" : task.result
}
else :
return {
"status" : "processing" ,
"progress" : task.progress
}
Advanced Context Patterns
Context Caching
from hyrex import HyrexKV
import json
@hy.task
def get_cached_context ( user_id : str , max_age_minutes : int = 60 ):
"""Get context from cache or prepare fresh"""
cache_key = f "user_context: { user_id } "
try :
cached_data = HyrexKV.get(cache_key)
context_info = json.loads(cached_data)
# Check if cache is still fresh
cached_time = datetime.fromisoformat(context_info[ 'timestamp' ])
age_minutes = (datetime.now() - cached_time).total_seconds() / 60
if age_minutes < max_age_minutes:
return context_info[ 'context' ]
except :
pass
# Cache miss or stale - prepare fresh context
fresh_context = prepare_llm_context.send(user_id).get()
# Cache the result
HyrexKV.set(cache_key, json.dumps({
'context' : fresh_context,
'timestamp' : datetime.now().isoformat()
}), expiry_seconds = max_age_minutes * 60 )
return fresh_context
Context Personalization
@hy.task
def personalize_context ( base_context : dict , interaction_type : str ):
"""Customize context based on interaction type"""
prompt = f """
Adapt this user context for a { interaction_type } interaction:
{ base_context[ 'context_summary' ] }
Emphasize the most relevant aspects for { interaction_type } .
Keep it under 200 tokens.
"""
response = openai.chat.completions.create(
model = "gpt-3.5-turbo" ,
messages = [{ "role" : "user" , "content" : prompt}],
max_tokens = 200
)
return {
"interaction_type" : interaction_type,
"personalized_context" : response.choices[ 0 ].message.content,
"original_context" : base_context
}
Usage Examples
# Prepare context for a single user
curl -X POST http://localhost:8000/context/prepare \
-H "Content-Type: application/json" \
-d '{"user_id": "user123"}'
# Bulk prepare contexts for multiple users
curl -X POST http://localhost:8000/context/bulk-prepare \
-H "Content-Type: application/json" \
-d '{
"user_ids": ["user123", "user456", "user789"],
"include_raw_data": false
}'
# Check context preparation status
curl http://localhost:8000/context/status/task_12345
Context Engineering Best Practices
1. Parallel Processing
Fetch different data sources simultaneously
Use task orchestration to manage dependencies
Optimize for speed when building real-time context
2. Context Compression
Use LLMs to summarize large amounts of raw data
Focus on relevance for the specific use case
Balance detail with token limits
3. Caching Strategy
Cache context to avoid expensive rebuilds
Set appropriate TTL based on data freshness needs
Use cache invalidation for critical data changes
4. Privacy & Security
Implement proper data access controls
Sanitize sensitive information before LLM processing
Log context usage for audit trails
Production Considerations
Rate limiting : Manage API calls to external services and LLM providers
Fallback handling : Provide default context when data sources fail
Cost optimization : Balance context richness with LLM token costs
Performance : Monitor context preparation latency and optimize bottlenecks
Data freshness : Implement appropriate caching and invalidation strategies
Next Steps
Document Embeddings Build searchable knowledge bases
Error Handling Handle API failures gracefully