Skip to main content

MonitoringClient

The MonitoringClient provides execution monitoring and analytics for tracking order performance, latency metrics, fill rates, and error analysis.

Overview

Monitor and analyze:

  • Order Performance: Fill rates, slippage, execution quality
  • Latency Metrics: End-to-end latency, venue latency, percentiles
  • Algorithm Analytics: TWAP/VWAP benchmark comparison
  • Error Tracking: Error patterns, failure analysis
  • Real-time Dashboard: Live metrics and alerts

Quick Start

from zenotc import ZenOTCClient

client = ZenOTCClient(
api_key="your_api_key",
api_secret="your_api_secret"
)

# Get dashboard overview
dashboard = await client.monitoring.get_dashboard()

print(f"Orders Today: {dashboard.orders_today}")
print(f"Fill Rate: {dashboard.fill_rate}%")
print(f"Avg Latency: {dashboard.avg_latency_ms}ms")

Dashboard

Get Dashboard Data

dashboard = await client.monitoring.get_dashboard(
time_range="24h", # "1h", "24h", "7d", "30d"
)

# Summary metrics
print(f"Total Orders: {dashboard.total_orders}")
print(f"Filled Orders: {dashboard.filled_orders}")
print(f"Cancelled Orders: {dashboard.cancelled_orders}")
print(f"Fill Rate: {dashboard.fill_rate}%")
print(f"Total Volume: ${dashboard.total_volume}")

# Latency metrics
print(f"Avg Latency: {dashboard.avg_latency_ms}ms")
print(f"P50 Latency: {dashboard.p50_latency_ms}ms")
print(f"P95 Latency: {dashboard.p95_latency_ms}ms")
print(f"P99 Latency: {dashboard.p99_latency_ms}ms")

# Error summary
print(f"Error Rate: {dashboard.error_rate}%")
print(f"Total Errors: {dashboard.total_errors}")

Order Monitoring

Get Order Details

order = await client.monitoring.get_order(order_id)

print(f"Order ID: {order.id}")
print(f"Status: {order.status}")
print(f"Created: {order.created_at}")
print(f"Filled: {order.filled_at}")
print(f"Duration: {order.execution_duration_ms}ms")

# Execution details
print(f"Requested Qty: {order.requested_quantity}")
print(f"Filled Qty: {order.filled_quantity}")
print(f"Avg Price: {order.average_price}")
print(f"Slippage: {order.slippage_bps} bps")

List Orders with Metrics

orders = await client.monitoring.list_orders(
time_range="24h",
status="filled", # "pending", "filled", "cancelled", "error"
asset="BTC",
order_type="algo", # "market", "limit", "algo"
min_slippage_bps=5, # Filter by slippage
limit=100,
)

for order in orders:
print(f"{order.id}: {order.filled_quantity} @ {order.average_price}")
print(f" Slippage: {order.slippage_bps} bps")
print(f" Latency: {order.execution_duration_ms}ms")

Latency Analysis

Get Latency Metrics

latency = await client.monitoring.get_latency_metrics(
time_range="24h",
asset="BTC",
order_type="limit",
)

print(f"Sample Count: {latency.sample_count}")

# Percentiles
print(f"P50: {latency.p50_ms}ms")
print(f"P75: {latency.p75_ms}ms")
print(f"P90: {latency.p90_ms}ms")
print(f"P95: {latency.p95_ms}ms")
print(f"P99: {latency.p99_ms}ms")
print(f"Max: {latency.max_ms}ms")

# Breakdown
print(f"Network Latency: {latency.network_latency_ms}ms")
print(f"Processing Latency: {latency.processing_latency_ms}ms")
print(f"Venue Latency: {latency.venue_latency_ms}ms")

Latency Histogram

histogram = await client.monitoring.get_latency_histogram(
time_range="24h",
bucket_size_ms=10,
)

for bucket in histogram.buckets:
print(f"{bucket.min_ms}-{bucket.max_ms}ms: {bucket.count} ({bucket.percentage}%)")

Latency Over Time

timeseries = await client.monitoring.get_latency_timeseries(
time_range="24h",
interval="1h",
)

for point in timeseries.data:
print(f"{point.timestamp}: P50={point.p50_ms}ms, P95={point.p95_ms}ms")

Fill Rate Analysis

Get Fill Metrics

fills = await client.monitoring.get_fill_metrics(
time_range="24h",
asset="BTC",
)

print(f"Total Orders: {fills.total_orders}")
print(f"Fully Filled: {fills.fully_filled}")
print(f"Partially Filled: {fills.partially_filled}")
print(f"Unfilled: {fills.unfilled}")
print(f"Fill Rate: {fills.fill_rate}%")
print(f"Partial Fill Rate: {fills.partial_fill_rate}%")

# Volume metrics
print(f"Requested Volume: {fills.requested_volume}")
print(f"Filled Volume: {fills.filled_volume}")
print(f"Fill Volume Rate: {fills.fill_volume_rate}%")

Fill Rate by Order Type

by_type = await client.monitoring.get_fill_rate_by_type(time_range="24h")

for entry in by_type:
print(f"{entry.order_type}: {entry.fill_rate}% ({entry.count} orders)")

Fill Rate Over Time

timeseries = await client.monitoring.get_fill_rate_timeseries(
time_range="7d",
interval="1d",
)

for point in timeseries.data:
print(f"{point.date}: {point.fill_rate}%")

Slippage Analysis

Get Slippage Metrics

slippage = await client.monitoring.get_slippage_metrics(
time_range="24h",
asset="BTC",
side="buy",
)

print(f"Avg Slippage: {slippage.avg_slippage_bps} bps")
print(f"Median Slippage: {slippage.median_slippage_bps} bps")
print(f"P95 Slippage: {slippage.p95_slippage_bps} bps")
print(f"Max Slippage: {slippage.max_slippage_bps} bps")

# Volume-weighted
print(f"VWAP Slippage: {slippage.vwap_slippage_bps} bps")

Slippage Distribution

distribution = await client.monitoring.get_slippage_distribution(
time_range="24h",
)

for bucket in distribution.buckets:
print(f"{bucket.min_bps}-{bucket.max_bps} bps: {bucket.count} orders")

Algorithm Monitoring

Get Algo Performance

algo_metrics = await client.monitoring.get_algo_metrics(
algo_order_id=order_id,
)

print(f"Algorithm: {algo_metrics.algo_type}")
print(f"Status: {algo_metrics.status}")

# Progress
print(f"Progress: {algo_metrics.progress_percent}%")
print(f"Slices Completed: {algo_metrics.completed_slices}/{algo_metrics.total_slices}")

# Performance vs benchmark
print(f"Benchmark VWAP: {algo_metrics.benchmark_vwap}")
print(f"Execution VWAP: {algo_metrics.execution_vwap}")
print(f"Performance: {algo_metrics.performance_bps} bps")

# Timing
print(f"Expected Duration: {algo_metrics.expected_duration_mins} min")
print(f"Actual Duration: {algo_metrics.actual_duration_mins} min")

List Algo Orders

algos = await client.monitoring.list_algo_orders(
time_range="7d",
algo_type="twap", # "twap", "vwap", "iceberg", "conditional"
status="completed",
)

for algo in algos:
print(f"{algo.id}: {algo.algo_type}")
print(f" Performance: {algo.performance_bps} bps vs benchmark")
print(f" Fill Rate: {algo.fill_rate}%")

Algo Comparison

Compare algorithm performance:

comparison = await client.monitoring.compare_algos(
time_range="30d",
asset="BTC",
)

for algo_type, stats in comparison.items():
print(f"{algo_type}:")
print(f" Avg Performance: {stats.avg_performance_bps} bps")
print(f" Avg Fill Rate: {stats.avg_fill_rate}%")
print(f" Avg Slippage: {stats.avg_slippage_bps} bps")

Error Analysis

Get Error Summary

errors = await client.monitoring.get_error_summary(
time_range="24h",
)

print(f"Total Errors: {errors.total_errors}")
print(f"Error Rate: {errors.error_rate}%")

for error_type, count in errors.by_type.items():
print(f" {error_type}: {count}")

List Errors

error_list = await client.monitoring.list_errors(
time_range="24h",
error_type="execution_failed",
limit=50,
)

for error in error_list:
print(f"{error.timestamp}: {error.error_type}")
print(f" Order: {error.order_id}")
print(f" Message: {error.message}")
print(f" Details: {error.details}")

Error Patterns

Detect recurring error patterns:

patterns = await client.monitoring.get_error_patterns(
time_range="7d",
min_occurrences=5,
)

for pattern in patterns:
print(f"Pattern: {pattern.description}")
print(f" Occurrences: {pattern.count}")
print(f" First Seen: {pattern.first_seen}")
print(f" Last Seen: {pattern.last_seen}")
print(f" Affected Orders: {pattern.affected_orders}")

Real-Time Monitoring

Subscribe to Events

async def on_monitoring_event(event):
if event.type == "order_filled":
print(f"Filled: {event.order_id} @ {event.price}")
print(f" Slippage: {event.slippage_bps} bps")
print(f" Latency: {event.latency_ms}ms")

elif event.type == "latency_alert":
print(f"Latency Alert: {event.latency_ms}ms (threshold: {event.threshold_ms}ms)")

elif event.type == "error":
print(f"Error: {event.error_type} - {event.message}")

elif event.type == "fill_rate_alert":
print(f"Fill Rate Alert: {event.fill_rate}% (threshold: {event.threshold}%)")

await client.monitoring.subscribe(on_monitoring_event)

Configure Alerts

# Set up monitoring alerts
await client.monitoring.configure_alerts(
latency_threshold_ms=500, # Alert if latency > 500ms
fill_rate_threshold=90, # Alert if fill rate < 90%
slippage_threshold_bps=20, # Alert if slippage > 20 bps
error_rate_threshold=5, # Alert if error rate > 5%
)

Export Data

Export to CSV

csv_data = await client.monitoring.export_orders(
time_range="30d",
format="csv",
)

with open("orders_export.csv", "w") as f:
f.write(csv_data)

Export Metrics

metrics = await client.monitoring.export_metrics(
time_range="30d",
metrics=["latency", "fill_rate", "slippage"],
interval="1d",
format="json",
)

Error Handling

from zenotc.exceptions import (
MonitoringError,
DataNotAvailableError,
InvalidTimeRangeError,
)

try:
metrics = await client.monitoring.get_latency_metrics(
time_range="1y", # Too long
)
except InvalidTimeRangeError as e:
print(f"Invalid time range: {e.max_allowed}")
except DataNotAvailableError as e:
print(f"No data available for the requested period")
except MonitoringError as e:
print(f"Monitoring error: {e}")

Best Practices

  1. Monitor continuously: Set up real-time alerts for critical metrics
  2. Review daily: Check dashboard metrics daily for anomalies
  3. Track trends: Use timeseries data to identify performance trends
  4. Analyze errors: Regularly review error patterns to prevent issues
  5. Benchmark algorithms: Compare algo performance to choose optimal strategies
  6. Export regularly: Export historical data for compliance and analysis