Documentation Index
Fetch the complete documentation index at: https://mathematicalcompany.mintlify.app/llms.txt
Use this file to discover all available pages before exploring further.
Quantitative Analytics
Horizon ships ~25 Rust-native quantitative functions, 3 streaming detectors, pipeline integrations, stress testing, and Combinatorial Purged Cross-Validation (CPCV). All computation runs in Rust for maximum throughput.
Measure uncertainty, divergence, and information flow in prediction markets.
import horizon as hz
# Binary entropy (max at p=0.5)
h = hz.shannon_entropy(0.5) # 1.0 bits
# Multi-outcome entropy
h = hz.joint_entropy([0.25, 0.25, 0.25, 0.25]) # 2.0 bits
# KL divergence between distributions
kl = hz.kl_divergence([0.9, 0.1], [0.5, 0.5])
# Mutual information via 2D histogram
mi = hz.mutual_information(predictions, outcomes, n_bins=10)
# Transfer entropy: directed information flow
te = hz.transfer_entropy(source_series, target_series, lag=1, n_bins=10)
Microstructure
Measure market quality, price impact, and order flow.
# Kyle's lambda (price impact)
lam = hz.kyles_lambda(price_changes, signed_volumes)
# Amihud illiquidity ratio
illiq = hz.amihud_ratio(returns, volumes)
# Roll implicit spread
spread = hz.roll_spread(returns)
# Effective and realized spread
eff = hz.effective_spread(fill_price=0.52, mid=0.50, is_buy=True)
real = hz.realized_spread(fill_price=0.52, mid_after=0.51, is_buy=True)
# LOB imbalance (multi-level)
imb = hz.lob_imbalance(bids=[(0.50, 100)], asks=[(0.51, 80)], levels=5)
# Microprice (volume-weighted mid)
mid = hz.weighted_mid(bids=[(0.50, 200)], asks=[(0.52, 100)])
Risk Analytics
Cornish-Fisher VaR/CVaR
Skew and kurtosis-adjusted Value at Risk - more accurate than Gaussian VaR for prediction market returns.
returns = [...] # historical returns
var = hz.cornish_fisher_var(returns, confidence=0.95)
cvar = hz.cornish_fisher_cvar(returns, confidence=0.95)
Prediction Greeks
Binary market sensitivities analogous to options Greeks.
greeks = hz.prediction_greeks(
price=0.65, # current market probability
size=100.0, # position size
is_yes=True, # YES side
t_hours=24.0, # hours to expiry
vol=0.2, # implied volatility
)
print(greeks.delta, greeks.gamma, greeks.theta, greeks.vega)
Signal Analysis
# Information Coefficient (Spearman rank correlation)
ic = hz.information_coefficient(predictions, outcomes)
# Signal half-life (AR(1) mean-reversion speed)
hl = hz.signal_half_life(signal_values)
# Hurst exponent (trending vs mean-reverting)
h = hz.hurst_exponent(price_series) # >0.5 trending, <0.5 mean-reverting
# Variance ratio test (random walk hypothesis)
vr = hz.variance_ratio(returns, period=2) # 1.0 = random walk
Statistical Testing
Deflated Sharpe Ratio
Tests whether an observed Sharpe ratio is statistically significant given multiple strategy trials (Bailey & Lopez de Prado, 2014).
p_value = hz.deflated_sharpe(
sharpe=2.1,
n_obs=500,
n_trials=50,
skew=0.1,
kurt=3.5,
)
# Bonferroni correction
threshold = hz.bonferroni_threshold(alpha=0.05, n_trials=50)
# Benjamini-Hochberg FDR control
rejected = hz.benjamini_hochberg([0.01, 0.03, 0.5], alpha=0.05)
# [True, True, False]
Streaming Detectors
Real-time detectors that maintain state across pipeline ticks.
vpin = hz.VpinDetector(bucket_volume=1000.0, n_buckets=50)
toxicity = vpin.update(0.65, 50.0, True)
print(vpin.current_vpin())
CUSUM Change-Point Detector
cusum = hz.CusumDetector(threshold=5.0, drift=0.0)
is_break = cusum.update(value=2.5) # True if structural break
print(cusum.upper(), cusum.lower())
Order Flow Imbalance (OFI) Tracker
ofi = hz.OfiTracker()
delta = ofi.update(best_bid_qty=150.0, best_ask_qty=100.0)
print(ofi.cumulative())
Pipeline Functions
Drop these into hz.run(pipeline=[...]) for real-time analytics.
Toxic Flow Detection
hz.run(
name="my_strategy",
pipeline=[
hz.toxic_flow("book", bucket_volume=500, threshold=0.7),
my_quoter,
],
)
# Injects: vpin, flow_toxicity, toxic_alert
Microstructure Analytics
hz.run(
pipeline=[
hz.microstructure("book", lookback=100),
my_quoter,
],
)
# Injects: ofi, kyle_lambda, amihud, roll_spread, market_entropy
Change-Point Detection
hz.run(
pipeline=[
hz.change_detector(threshold=5.0),
my_quoter,
],
)
# Injects: change_detected, cusum_upper, cusum_lower
Offline Analysis
Strategy Significance
# equity_curve should be a plain list of equity values (floats)
equity_values = [1000.0, 1005.0, 1002.0, 1010.0, 1008.0, 1015.0]
result = hz.strategy_significance(
result={"equity_curve": equity_values},
n_trials=50,
alpha=0.05,
)
print(result["deflated_sharpe_pvalue"])
print(result["is_significant"])
Signal Diagnostics
diag = hz.signal_diagnostics(predictions, outcomes)
print(diag["ic"], diag["half_life"], diag["hurst"])
Market Efficiency
eff = hz.market_efficiency(price_series)
print(eff["variance_ratio"], eff["hurst"], eff["is_efficient"])
Stress Testing
Run Monte Carlo simulations under adverse scenarios.
from horizon import SimPosition, stress_test, CORRELATION_SPIKE, ALL_RESOLVE_NO
positions = [
SimPosition("mkt1", "yes", 100.0, 0.50, 0.60),
SimPosition("mkt2", "yes", 50.0, 0.40, 0.55),
]
result = hz.stress_test(positions, n_simulations=10000, seed=42)
print(result.worst_scenario) # "all_resolve_no"
print(result.worst_pnl)
print(result.summary())
Custom Scenarios
from horizon import StressScenario
crash = StressScenario(
name="election_shock",
correlation_override=0.99,
price_shocks={"mkt1": 0.05, "mkt2": 0.10},
)
result = hz.stress_test(positions, scenarios=[crash])
Built-in Scenarios
| Scenario | Description |
|---|
CORRELATION_SPIKE | All correlations → 0.95 |
ALL_RESOLVE_NO | All prices → 0.01 (worst case for YES holders) |
LIQUIDITY_SHOCK | All prices → 0.50 (maximum uncertainty) |
TAIL_RISK | All correlations → 0.99 |
CPCV (Combinatorial Purged Cross-Validation)
Detect overfitting in backtest results using the Bailey et al. methodology.
from horizon import cpcv, probability_of_overfitting
def pipeline_factory(params):
multiplier = params["multiplier"]
def strategy(returns):
adjusted = [r * multiplier for r in returns]
mean = sum(adjusted) / len(adjusted)
std = (sum((r - mean)**2 for r in adjusted) / len(adjusted)) ** 0.5
return mean / std if std > 0 else 0.0
return strategy
result = cpcv(
data=returns_series,
pipeline_factory=pipeline_factory,
param_grid=[{"multiplier": m} for m in [0.5, 1.0, 1.5, 2.0]],
n_groups=8,
purge_gap=0.02,
)
print(f"PBO: {result.pbo:.2%}")
print(f"Overfit: {result.is_overfit}")
print(f"OOS Sharpes: {result.oos_sharpes}")
print(f"Combinations tested: {result.n_combinations}")
Hawkes Process
Self-exciting point process for modeling trade arrival intensity. Events cluster: each trade increases the probability of more trades.
Rust API
from horizon import HawkesProcess
hp = HawkesProcess(mu=0.1, alpha=0.5, beta=1.0)
hp.add_event(1000.0)
hp.add_event(1000.5)
intensity = hp.intensity(1001.0) # Current intensity
branching = hp.branching_ratio() # alpha/beta = 0.5
expected = hp.expected_events(1001.0, 60.0) # Expected events in next 60s
hp.reset() # Clear all events
Pipeline Function
import horizon as hz
hz.run(
pipeline=[
hz.hawkes_intensity(feed_name="book", mu=0.1, alpha=0.5, beta=1.0),
my_strategy,
],
...
)
# Injects: ctx.params["hawkes_intensity"], ctx.params["hawkes_branching"], ctx.params["hawkes_expected_1m"]
Triggers events on fills (fills_this_cycle > 0) and large price jumps (>2%). Per-market isolation.
Ledoit-Wolf Shrinkage
Optimal covariance matrix estimation that shrinks the sample covariance toward a scaled identity matrix.
Rust API
from horizon import ledoit_wolf_shrinkage
# rows = observations, cols = assets
returns = [[0.01, 0.02], [-0.01, 0.03], [0.02, -0.01]]
matrix, shrinkage = ledoit_wolf_shrinkage(returns)
# matrix: 2x2 shrunk covariance, shrinkage: intensity [0, 1]
Pipeline Function
import horizon as hz
hz.run(
pipeline=[
hz.correlation_estimator(feed_names=["btc", "eth"], window=100),
my_strategy,
],
...
)
# Injects: ctx.params["correlation_matrix"], ctx.params["correlation_shrinkage_intensity"], ctx.params["correlation_updated"]