#!/usr/bin/env python3 """ Hyperliquid Account Analyzer Analyzes Hyperliquid trading accounts to evaluate: - Profitability and performance metrics - Average trade duration and trading patterns - Risk management quality - Win rates and consistency - Position sizing and leverage usage Usage: # Analyze specific addresses python utils/hyperliquid_account_analyzer.py [address1] [address2] ... # Use curated high-performance accounts (default) python utils/hyperliquid_account_analyzer.py python utils/hyperliquid_account_analyzer.py --limit 15 # Use hardcoded top 10 addresses python utils/hyperliquid_account_analyzer.py --top10 Options: --leaderboard Use curated high-performance accounts (recommended) --window Time window preference: 1d, 7d, 30d, allTime (default: 7d) --limit Number of accounts to analyze (default: 10) --top10 Use original hardcoded list of top 10 accounts Note: Hyperliquid's leaderboard API is not publicly accessible, so the script uses a manually curated list of high-performing accounts identified through analysis. """ import asyncio import aiohttp import json import sys import os from datetime import datetime, timedelta from typing import Dict, List, Optional, Any, Tuple from dataclasses import dataclass import statistics from collections import defaultdict import argparse # Add src to path to import our modules sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src')) @dataclass class Trade: """Represents a single trade""" timestamp: int coin: str side: str # 'buy' or 'sell' size: float price: float fee: float is_maker: bool @dataclass class Position: """Represents a position""" coin: str size: float side: str # 'long' or 'short' entry_price: float mark_price: float unrealized_pnl: float leverage: float margin_used: float @dataclass class AccountStats: """Comprehensive account statistics""" address: str total_pnl: float win_rate: float total_trades: int avg_trade_duration_hours: float max_drawdown: float sharpe_ratio: float avg_position_size: float max_leverage_used: float avg_leverage_used: float trading_frequency_per_day: float risk_reward_ratio: float consecutive_losses_max: int profit_factor: float largest_win: float largest_loss: float active_positions: int current_drawdown: float last_trade_timestamp: int analysis_period_days: int is_copyable: bool # Whether this account is suitable for copy trading copyability_reason: str # Why it is/isn't copyable class HyperliquidAccountAnalyzer: """Analyzes Hyperliquid trading accounts""" def __init__(self): self.info_url = "https://api.hyperliquid.xyz/info" self.session = None async def __aenter__(self): self.session = aiohttp.ClientSession() return self async def __aexit__(self, exc_type, exc_val, exc_tb): if self.session: await self.session.close() async def get_account_state(self, address: str) -> Optional[Dict]: """Get current account state including positions and balance""" try: payload = { "type": "clearinghouseState", "user": address } async with self.session.post(self.info_url, json=payload) as response: if response.status == 200: return await response.json() else: print(f"❌ Error fetching account state for {address}: HTTP {response.status}") return None except Exception as e: print(f"❌ Exception fetching account state for {address}: {e}") return None async def get_user_fills(self, address: str, limit: int = 1000) -> Optional[List[Dict]]: """Get recent fills/trades for a user""" try: payload = { "type": "userFills", "user": address } async with self.session.post(self.info_url, json=payload) as response: if response.status == 200: data = await response.json() # Return only the most recent fills up to limit fills = data if isinstance(data, list) else [] return fills[:limit] else: print(f"❌ Error fetching fills for {address}: HTTP {response.status}") return None except Exception as e: print(f"❌ Exception fetching fills for {address}: {e}") return None async def get_funding_history(self, address: str) -> Optional[List[Dict]]: """Get funding payments history""" try: payload = { "type": "userFunding", "user": address } async with self.session.post(self.info_url, json=payload) as response: if response.status == 200: return await response.json() else: return [] except Exception as e: print(f"⚠️ Could not fetch funding history for {address}: {e}") return [] def parse_trades(self, fills: List[Dict]) -> List[Trade]: """Parse fills into Trade objects""" trades = [] for fill in fills: try: # Parse timestamp timestamp = int(fill.get('time', 0)) if timestamp == 0: continue # Parse trade data coin = fill.get('coin', 'UNKNOWN') side = fill.get('side', 'buy').lower() size = float(fill.get('sz', '0')) price = float(fill.get('px', '0')) fee = float(fill.get('fee', '0')) is_maker = fill.get('liquidation', False) == False # Simplified maker detection if size > 0 and price > 0: trades.append(Trade( timestamp=timestamp, coin=coin, side=side, size=size, price=price, fee=fee, is_maker=is_maker )) except (ValueError, KeyError) as e: print(f"⚠️ Warning: Could not parse fill: {fill} - {e}") continue return trades def parse_positions(self, account_state: Dict) -> List[Position]: """Parse account state into Position objects""" positions = [] if not account_state or 'assetPositions' not in account_state: return positions for asset_pos in account_state['assetPositions']: try: position_data = asset_pos.get('position', {}) coin = position_data.get('coin', 'UNKNOWN') size_str = position_data.get('szi', '0') size = float(size_str) if abs(size) < 1e-6: # Skip dust positions continue side = 'long' if size > 0 else 'short' entry_price = float(position_data.get('entryPx', '0')) mark_price = float(position_data.get('positionValue', '0')) / abs(size) if size != 0 else 0 unrealized_pnl = float(position_data.get('unrealizedPnl', '0')) leverage = float(position_data.get('leverage', {}).get('value', '1')) margin_used = float(position_data.get('marginUsed', '0')) positions.append(Position( coin=coin, size=abs(size), side=side, entry_price=entry_price, mark_price=mark_price, unrealized_pnl=unrealized_pnl, leverage=leverage, margin_used=margin_used )) except (ValueError, KeyError) as e: print(f"⚠️ Warning: Could not parse position: {asset_pos} - {e}") continue return positions def calculate_trade_performance(self, trades: List[Trade]) -> Tuple[float, float, int, int]: """Calculate more accurate trade performance metrics""" if len(trades) < 2: return 0.0, 0.0, 0, 0 # Group trades by coin and track P&L per completed round trip trades_by_coin = defaultdict(list) for trade in sorted(trades, key=lambda x: x.timestamp): trades_by_coin[trade.coin].append(trade) total_realized_pnl = 0.0 winning_trades = 0 losing_trades = 0 total_fees = 0.0 for coin, coin_trades in trades_by_coin.items(): position = 0.0 entry_price = 0.0 entry_cost = 0.0 for trade in coin_trades: total_fees += trade.fee if trade.side == 'buy': if position <= 0: # Opening long or closing short if position < 0: # Closing short position pnl = (entry_price - trade.price) * abs(position) - trade.fee total_realized_pnl += pnl if pnl > 0: winning_trades += 1 else: losing_trades += 1 # Start new long position new_size = trade.size - max(0, -position) if new_size > 0: entry_price = trade.price entry_cost = new_size * trade.price position = new_size else: # Adding to long position entry_cost += trade.size * trade.price position += trade.size entry_price = entry_cost / position elif trade.side == 'sell': if position >= 0: # Closing long or opening short if position > 0: # Closing long position pnl = (trade.price - entry_price) * min(position, trade.size) - trade.fee total_realized_pnl += pnl if pnl > 0: winning_trades += 1 else: losing_trades += 1 # Start new short position new_size = trade.size - max(0, position) if new_size > 0: entry_price = trade.price position = -new_size else: # Adding to short position position -= trade.size entry_price = trade.price # Simplified for shorts win_rate = winning_trades / (winning_trades + losing_trades) if (winning_trades + losing_trades) > 0 else 0 return total_realized_pnl, win_rate, winning_trades, losing_trades def analyze_hft_patterns(self, trades: List[Trade]) -> Dict[str, Any]: """ Analyze high-frequency trading patterns that don't follow traditional open/close cycles """ if not trades: return { 'avg_time_between_trades_minutes': 0, 'max_time_between_trades_hours': 0, 'min_time_between_trades_seconds': 0, 'trading_clusters': 0, 'trades_per_cluster': 0, 'is_hft_pattern': False } trades_sorted = sorted(trades, key=lambda x: x.timestamp) time_gaps = [] # Calculate time gaps between consecutive trades for i in range(1, len(trades_sorted)): gap_ms = trades_sorted[i].timestamp - trades_sorted[i-1].timestamp gap_minutes = gap_ms / (1000 * 60) time_gaps.append(gap_minutes) if not time_gaps: return { 'avg_time_between_trades_minutes': 0, 'max_time_between_trades_hours': 0, 'min_time_between_trades_seconds': 0, 'trading_clusters': 0, 'trades_per_cluster': 0, 'is_hft_pattern': False } avg_gap_minutes = statistics.mean(time_gaps) max_gap_hours = max(time_gaps) / 60 min_gap_seconds = min(time_gaps) * 60 # Identify trading clusters (periods of intense activity) clusters = [] current_cluster = [trades_sorted[0]] for i in range(1, len(trades_sorted)): gap_minutes = time_gaps[i-1] if gap_minutes <= 5: # Trades within 5 minutes = same cluster current_cluster.append(trades_sorted[i]) else: if len(current_cluster) >= 3: # Minimum 3 trades to be a cluster clusters.append(current_cluster) current_cluster = [trades_sorted[i]] # Don't forget the last cluster if len(current_cluster) >= 3: clusters.append(current_cluster) avg_trades_per_cluster = statistics.mean([len(cluster) for cluster in clusters]) if clusters else 0 # Determine if this is HFT pattern is_hft = ( avg_gap_minutes < 30 and # Average < 30 minutes between trades len([gap for gap in time_gaps if gap < 1]) > len(time_gaps) * 0.3 # 30%+ trades within 1 minute ) return { 'avg_time_between_trades_minutes': avg_gap_minutes, 'max_time_between_trades_hours': max_gap_hours, 'min_time_between_trades_seconds': min_gap_seconds, 'trading_clusters': len(clusters), 'trades_per_cluster': avg_trades_per_cluster, 'is_hft_pattern': is_hft } def calculate_rolling_pnl(self, trades: List[Trade]) -> Tuple[float, List[float], int, int]: """ Calculate P&L using rolling window approach for HFT patterns """ if not trades: return 0.0, [], 0, 0 trades_sorted = sorted(trades, key=lambda x: x.timestamp) # Track net position and P&L over time cumulative_pnl = 0.0 pnl_series = [] winning_periods = 0 losing_periods = 0 # Use 1-hour windows for P&L calculation window_size_ms = 60 * 60 * 1000 # 1 hour if not trades_sorted: return 0.0, [], 0, 0 start_time = trades_sorted[0].timestamp end_time = trades_sorted[-1].timestamp current_time = start_time window_trades = [] while current_time <= end_time: window_end = current_time + window_size_ms # Get trades in this window window_trades = [ t for t in trades_sorted if current_time <= t.timestamp < window_end ] if window_trades: # Calculate net flow and fees for this window net_usd_flow = 0.0 window_fees = 0.0 for trade in window_trades: trade_value = trade.size * trade.price if trade.side == 'buy': net_usd_flow -= trade_value # Cash out else: # sell net_usd_flow += trade_value # Cash in window_fees += trade.fee # Window P&L = net cash flow - fees window_pnl = net_usd_flow - window_fees cumulative_pnl += window_pnl pnl_series.append(cumulative_pnl) if window_pnl > 0: winning_periods += 1 elif window_pnl < 0: losing_periods += 1 current_time = window_end win_rate = winning_periods / (winning_periods + losing_periods) if (winning_periods + losing_periods) > 0 else 0 return cumulative_pnl, pnl_series, winning_periods, losing_periods async def analyze_account(self, address: str) -> Optional[AccountStats]: """Analyze a single account and return comprehensive statistics""" print(f"\n🔍 Analyzing account: {address}") # Get account data account_state = await self.get_account_state(address) fills = await self.get_user_fills(address, limit=500) # Reduced limit for better analysis if not fills: print(f"❌ No trading data found for {address}") return None # Parse data trades = self.parse_trades(fills) positions = self.parse_positions(account_state) if account_state else [] if not trades: print(f"❌ No valid trades found for {address}") return None print(f"📊 Found {len(trades)} trades, {len(positions)} active positions") # Calculate time period trades_sorted = sorted(trades, key=lambda x: x.timestamp) oldest_trade = trades_sorted[0].timestamp newest_trade = trades_sorted[-1].timestamp analysis_period_ms = newest_trade - oldest_trade analysis_period_days = max(1, analysis_period_ms / (1000 * 60 * 60 * 24)) # Calculate improved metrics total_trades = len(trades) total_fees = sum(trade.fee for trade in trades) # Analyze HFT patterns first hft_patterns = self.analyze_hft_patterns(trades) # Check if this is a manageable trading frequency for copy trading trading_freq = total_trades / analysis_period_days if analysis_period_days > 0 else 0 is_copyable_frequency = 1 <= trading_freq <= 20 # 1-20 trades per day is manageable if hft_patterns['is_hft_pattern'] or trading_freq > 50: print(f"🤖 ❌ UNSUITABLE: High-frequency algorithmic trading detected") print(f"⚡ Trading frequency: {trading_freq:.1f} trades/day (TOO HIGH for copy trading)") print(f"🕒 Avg time between trades: {hft_patterns['avg_time_between_trades_minutes']:.1f} minutes") print(f"❌ This account cannot be safely copied - would result in overtrading and high fees") # Still calculate metrics for completeness but mark as unsuitable rolling_pnl, pnl_series, winning_periods, losing_periods = self.calculate_rolling_pnl(trades) realized_pnl = rolling_pnl win_rate = winning_periods / (winning_periods + losing_periods) if (winning_periods + losing_periods) > 0 else 0 avg_duration = hft_patterns['avg_time_between_trades_minutes'] / 60 # Convert to hours print(f"💰 Rolling P&L: ${realized_pnl:.2f}, Periods: {winning_periods}W/{losing_periods}L") elif is_copyable_frequency: print(f"✅ SUITABLE: Human-manageable trading pattern detected") print(f"📊 Trading frequency: {trading_freq:.1f} trades/day (GOOD for copy trading)") # Use traditional P&L calculation for human traders realized_pnl, win_rate, winning_trades, losing_trades = self.calculate_trade_performance(trades) print(f"💰 Realized PnL: ${realized_pnl:.2f}, Wins: {winning_trades}, Losses: {losing_trades}") print(f"📈 Trade Win Rate: {win_rate:.1%}") # Calculate traditional trade durations durations = [] position_tracker = defaultdict(lambda: {'size': 0, 'start_time': 0}) for trade in trades_sorted: coin = trade.coin pos = position_tracker[coin] if trade.side == 'buy': if pos['size'] <= 0 and trade.size > abs(pos['size']): # Opening new long pos['start_time'] = trade.timestamp pos['size'] += trade.size else: # sell if pos['size'] > 0: # Closing long position if trade.size >= pos['size'] and pos['start_time'] > 0: # Fully closing duration_hours = (trade.timestamp - pos['start_time']) / (1000 * 3600) if duration_hours > 0: durations.append(duration_hours) pos['start_time'] = 0 pos['size'] -= trade.size elif pos['size'] <= 0: # Opening short pos['start_time'] = trade.timestamp pos['size'] -= trade.size avg_duration = statistics.mean(durations) if durations else 0 print(f"🕒 Found {len(durations)} completed trades, avg duration: {avg_duration:.1f} hours") else: print(f"⚠️ QUESTIONABLE: Low trading frequency detected") print(f"📊 Trading frequency: {trading_freq:.1f} trades/day (might be inactive)") # Use traditional analysis for low-frequency traders realized_pnl, win_rate, winning_trades, losing_trades = self.calculate_trade_performance(trades) print(f"💰 Realized PnL: ${realized_pnl:.2f}, Wins: {winning_trades}, Losses: {losing_trades}") print(f"📈 Trade Win Rate: {win_rate:.1%}") avg_duration = 24.0 # Assume longer holds for infrequent traders print(f"🕒 Infrequent trading pattern - assuming longer hold times") # Common calculations unrealized_pnl = sum(pos.unrealized_pnl for pos in positions) total_pnl = realized_pnl + unrealized_pnl print(f"💰 Total PnL: ${total_pnl:.2f} (Realized: ${realized_pnl:.2f} + Unrealized: ${unrealized_pnl:.2f})") print(f"💸 Total Fees: ${total_fees:.2f}") # Calculate position size statistics position_sizes = [trade.size * trade.price for trade in trades] avg_position_size = statistics.mean(position_sizes) if position_sizes else 0 # Calculate leverage statistics from current positions leverages = [pos.leverage for pos in positions if pos.leverage > 0] max_leverage = max(leverages) if leverages else 0 avg_leverage = statistics.mean(leverages) if leverages else 1 # Calculate trading frequency trading_freq = total_trades / analysis_period_days if analysis_period_days > 0 else 0 # Simplified drawdown calculation max_drawdown = 0.0 current_drawdown = 0.0 if total_pnl < 0: max_drawdown = abs(total_pnl) / (avg_position_size * 10) if avg_position_size > 0 else 0 current_drawdown = max_drawdown # Risk metrics profit_factor = abs(realized_pnl) / total_fees if total_fees > 0 else 0 # Analyze HFT patterns hft_patterns = self.analyze_hft_patterns(trades) # Determine copyability is_hft = trading_freq > 50 is_inactive = trading_freq < 1 is_copyable_freq = 1 <= trading_freq <= 20 if is_hft: is_copyable = False copyability_reason = f"HFT Bot ({trading_freq:.1f} trades/day - too fast to copy)" elif is_inactive: is_copyable = False copyability_reason = f"Inactive ({trading_freq:.1f} trades/day - insufficient activity)" elif is_copyable_freq: is_copyable = True copyability_reason = f"Human trader ({trading_freq:.1f} trades/day - manageable frequency)" else: is_copyable = False copyability_reason = f"Questionable frequency ({trading_freq:.1f} trades/day)" # Calculate risk reward ratio safely if hft_patterns['is_hft_pattern']: # For HFT, use win rate as proxy for risk/reward risk_reward_ratio = win_rate / (1 - win_rate) if win_rate < 1 else 1.0 else: # For traditional trading, try to use winning/losing trade counts try: # These variables should exist from traditional analysis risk_reward_ratio = winning_trades / max(1, losing_trades) except NameError: # Fallback if variables don't exist risk_reward_ratio = win_rate / (1 - win_rate) if win_rate < 1 else 1.0 return AccountStats( address=address, total_pnl=total_pnl, win_rate=win_rate, total_trades=total_trades, avg_trade_duration_hours=avg_duration, max_drawdown=max_drawdown, sharpe_ratio=0, # Would need returns data avg_position_size=avg_position_size, max_leverage_used=max_leverage, avg_leverage_used=avg_leverage, trading_frequency_per_day=trading_freq, risk_reward_ratio=risk_reward_ratio, consecutive_losses_max=0, # Would need sequence analysis profit_factor=profit_factor, largest_win=0, # Would need individual trade P&L largest_loss=0, # Would need individual trade P&L active_positions=len(positions), current_drawdown=current_drawdown, last_trade_timestamp=newest_trade, analysis_period_days=int(analysis_period_days), is_copyable=is_copyable, copyability_reason=copyability_reason ) async def analyze_multiple_accounts(self, addresses: List[str]) -> List[AccountStats]: """Analyze multiple accounts concurrently""" print(f"🚀 Starting analysis of {len(addresses)} accounts...\n") tasks = [self.analyze_account(addr) for addr in addresses] results = await asyncio.gather(*tasks, return_exceptions=True) # Filter out None results and exceptions valid_results = [] for i, result in enumerate(results): if isinstance(result, Exception): print(f"❌ Error analyzing {addresses[i]}: {result}") elif result is not None: valid_results.append(result) return valid_results def print_analysis_results(self, stats_list: List[AccountStats]): """Print comprehensive analysis results with relative scoring""" if not stats_list: print("❌ No valid analysis results to display") return print("\n" + "="*100) print("📊 HYPERLIQUID ACCOUNT ANALYSIS RESULTS") print("="*100) # Calculate data ranges for relative scoring def get_data_ranges(stats_list): """Calculate min/max values for relative scoring""" if not stats_list: return {} # Separate copyable from non-copyable for different scoring copyable_accounts = [s for s in stats_list if s.is_copyable] all_accounts = stats_list ranges = {} # Profitability range (use all accounts) pnls = [s.total_pnl for s in all_accounts] ranges['pnl_min'] = min(pnls) ranges['pnl_max'] = max(pnls) ranges['pnl_range'] = ranges['pnl_max'] - ranges['pnl_min'] # Win rate range (use all accounts) win_rates = [s.win_rate for s in all_accounts] ranges['winrate_min'] = min(win_rates) ranges['winrate_max'] = max(win_rates) ranges['winrate_range'] = ranges['winrate_max'] - ranges['winrate_min'] # Trading frequency range (use all accounts) frequencies = [s.trading_frequency_per_day for s in all_accounts] ranges['freq_min'] = min(frequencies) ranges['freq_max'] = max(frequencies) ranges['freq_range'] = ranges['freq_max'] - ranges['freq_min'] # Trade duration range (use all accounts) durations = [s.avg_trade_duration_hours for s in all_accounts if s.avg_trade_duration_hours > 0] if durations: ranges['duration_min'] = min(durations) ranges['duration_max'] = max(durations) ranges['duration_range'] = ranges['duration_max'] - ranges['duration_min'] else: ranges['duration_min'] = 0 ranges['duration_max'] = 24 ranges['duration_range'] = 24 # Drawdown range (use all accounts) drawdowns = [s.max_drawdown for s in all_accounts] ranges['drawdown_min'] = min(drawdowns) ranges['drawdown_max'] = max(drawdowns) ranges['drawdown_range'] = ranges['drawdown_max'] - ranges['drawdown_min'] return ranges ranges = get_data_ranges(stats_list) # Relative scoring function def calculate_relative_score(stats: AccountStats, ranges: dict) -> float: score = 0.0 score_breakdown = {} # 1. COPYABILITY FILTER (40% weight - most important) is_hft = stats.trading_frequency_per_day > 50 is_too_slow = stats.trading_frequency_per_day < 1 is_copyable = 1 <= stats.trading_frequency_per_day <= 20 if is_hft: copyability_score = 0 # HFT bots get 0 score_breakdown['copyability'] = f"❌ HFT Bot (0 points)" elif is_too_slow: copyability_score = 10 # Inactive accounts get minimal points score_breakdown['copyability'] = f"⚠️ Inactive (10 points)" elif is_copyable: # For copyable accounts, score based on how close to ideal frequency (20 trades/day) ideal_freq = 20 freq_distance = abs(stats.trading_frequency_per_day - ideal_freq) # Max score when exactly at ideal, decreases as distance increases copyability_score = max(0, 40 - (freq_distance * 2)) # Lose 2 points per trade away from ideal score_breakdown['copyability'] = f"✅ Copyable ({copyability_score:.1f} points - {stats.trading_frequency_per_day:.1f} trades/day)" else: copyability_score = 20 # Questionable frequency score_breakdown['copyability'] = f"⚠️ Questionable ({copyability_score} points)" score += copyability_score # 2. PROFITABILITY (25% weight) - Relative to cohort if ranges['pnl_range'] > 0: # Linear interpolation: worst performer gets 0, best gets 25 pnl_normalized = (stats.total_pnl - ranges['pnl_min']) / ranges['pnl_range'] profitability_score = pnl_normalized * 25 else: profitability_score = 12.5 # If all same PnL, give average score score += profitability_score score_breakdown['profitability'] = f"💰 Profitability ({profitability_score:.1f} points - ${stats.total_pnl:.0f})" # 3. WIN RATE (15% weight) - Relative to cohort if ranges['winrate_range'] > 0: winrate_normalized = (stats.win_rate - ranges['winrate_min']) / ranges['winrate_range'] winrate_score = winrate_normalized * 15 else: winrate_score = 7.5 # If all same win rate, give average score score += winrate_score score_breakdown['winrate'] = f"📈 Win Rate ({winrate_score:.1f} points - {stats.win_rate:.1%})" # 4. TRADE DURATION (10% weight) - Preference for 2-48 hour range if stats.avg_trade_duration_hours == 0: duration_score = 2 # Minimal score for 0 duration else: # Ideal range: 2-48 hours gets full score if 2 <= stats.avg_trade_duration_hours <= 48: duration_score = 10 # Perfect range elif 1 <= stats.avg_trade_duration_hours < 2: duration_score = 7 # Slightly too fast elif 48 < stats.avg_trade_duration_hours <= 168: # 1 week duration_score = 8 # Slightly too slow but still good else: duration_score = 3 # Too extreme score += duration_score score_breakdown['duration'] = f"🕒 Duration ({duration_score:.1f} points - {stats.avg_trade_duration_hours:.1f}h)" # 5. RISK MANAGEMENT (10% weight) - Lower drawdown is better if ranges['drawdown_range'] > 0: # Invert: lowest drawdown gets full score drawdown_normalized = 1 - ((stats.max_drawdown - ranges['drawdown_min']) / ranges['drawdown_range']) risk_score = drawdown_normalized * 10 else: risk_score = 5 # If all same drawdown, give average score score += risk_score score_breakdown['risk'] = f"📉 Risk Mgmt ({risk_score:.1f} points - {stats.max_drawdown:.1%} drawdown)" return score, score_breakdown # Calculate scores for all accounts scored_accounts = [] for stats in stats_list: score, breakdown = calculate_relative_score(stats, ranges) scored_accounts.append((stats, score, breakdown)) # Sort by score scored_accounts.sort(key=lambda x: x[1], reverse=True) # Print data ranges for context print(f"\n📊 COHORT ANALYSIS (for relative scoring):") print(f" 💰 PnL Range: ${ranges['pnl_min']:.0f} to ${ranges['pnl_max']:.0f}") print(f" 📈 Win Rate Range: {ranges['winrate_min']:.1%} to {ranges['winrate_max']:.1%}") print(f" 🔄 Frequency Range: {ranges['freq_min']:.1f} to {ranges['freq_max']:.1f} trades/day") print(f" 🕒 Duration Range: {ranges['duration_min']:.1f}h to {ranges['duration_max']:.1f}h") print(f" 📉 Drawdown Range: {ranges['drawdown_min']:.1%} to {ranges['drawdown_max']:.1%}") # Print results for i, (stats, score, breakdown) in enumerate(scored_accounts, 1): print(f"\n{i}. 📋 ACCOUNT: {stats.address}") print(f" 🏆 RELATIVE SCORE: {score:.1f}/100") print(f" 📊 Score Breakdown:") for metric, description in breakdown.items(): print(f" {description}") print(f" 💰 Total PnL: ${stats.total_pnl:.2f}") print(f" 📈 Win Rate: {stats.win_rate:.1%}") print(f" 🕒 Avg Trade Duration: {stats.avg_trade_duration_hours:.1f} hours") print(f" 📉 Max Drawdown: {stats.max_drawdown:.1%}") print(f" 🔄 Trading Frequency: {stats.trading_frequency_per_day:.1f} trades/day") print(f" 💵 Avg Position Size: ${stats.avg_position_size:.2f}") print(f" ⚡ Max Leverage: {stats.max_leverage_used:.1f}x") print(f" 📊 Total Trades: {stats.total_trades}") print(f" 📍 Active Positions: {stats.active_positions}") print(f" 📅 Analysis Period: {stats.analysis_period_days} days") # Copy Trading Suitability Evaluation evaluation = [] is_hft_pattern = stats.trading_frequency_per_day > 50 is_copyable = 1 <= stats.trading_frequency_per_day <= 20 # First determine if account is copyable if is_hft_pattern: evaluation.append("❌ NOT COPYABLE - HFT/Bot") elif stats.trading_frequency_per_day < 1: evaluation.append("❌ NOT COPYABLE - Inactive") elif is_copyable: evaluation.append("✅ COPYABLE - Human trader") else: evaluation.append("⚠️ QUESTIONABLE - Check frequency") # Profitability check if stats.total_pnl > 0: evaluation.append("✅ Profitable") else: evaluation.append("❌ Not profitable") # Trade duration evaluation for copyable accounts if is_copyable: if 2 <= stats.avg_trade_duration_hours <= 48: evaluation.append("✅ Good trade duration") elif stats.avg_trade_duration_hours < 2: evaluation.append("⚠️ Very short trades") else: evaluation.append("⚠️ Long hold times") # Win rate for human traders if stats.win_rate > 0.6: evaluation.append("✅ Excellent win rate") elif stats.win_rate > 0.4: evaluation.append("✅ Good win rate") else: evaluation.append("⚠️ Low win rate") else: # For non-copyable accounts, just note the pattern if is_hft_pattern: evaluation.append("🤖 Algorithmic trading") else: evaluation.append("💤 Low activity") # Risk management (universal) if stats.max_drawdown < 0.15: evaluation.append("✅ Good risk management") elif stats.max_drawdown < 0.25: evaluation.append("⚠️ Moderate risk") else: evaluation.append("❌ High drawdown risk") print(f" 🎯 Evaluation: {' | '.join(evaluation)}") # Recommendation section (rest remains the same) print("\n" + "="*100) print("🎯 COPY TRADING RECOMMENDATIONS") print("="*100) # Separate copyable from non-copyable accounts copyable_accounts = [(stats, score, breakdown) for stats, score, breakdown in scored_accounts if stats.is_copyable] non_copyable_accounts = [(stats, score, breakdown) for stats, score, breakdown in scored_accounts if not stats.is_copyable] if copyable_accounts: print(f"\n✅ FOUND {len(copyable_accounts)} COPYABLE ACCOUNTS:") best_stats, best_score, best_breakdown = copyable_accounts[0] print(f"\n🏆 TOP COPYABLE RECOMMENDATION: {best_stats.address}") print(f" 📊 Relative Score: {best_score:.1f}/100") print(f" 🎯 Status: {best_stats.copyability_reason}") if best_score >= 70: recommendation = "🟢 HIGHLY RECOMMENDED" elif best_score >= 50: recommendation = "🟡 MODERATELY RECOMMENDED" elif best_score >= 30: recommendation = "🟠 PROCEED WITH CAUTION" else: recommendation = "🔴 NOT RECOMMENDED" print(f" {recommendation}") print(f"\n📋 Why this account scored highest:") for metric, description in best_breakdown.items(): print(f" {description}") print(f"\n⚙️ Suggested copy trading settings:") print(f" 📊 Portfolio allocation: 5-15% (start conservative)") print(f" ⚡ Max leverage limit: 3-5x") print(f" 💰 Min position size: $25-50") print(f" 🔄 Expected trades: {best_stats.trading_frequency_per_day:.1f} per day") else: print(f"\n❌ NO COPYABLE ACCOUNTS FOUND") print(f" All analyzed accounts are unsuitable for copy trading") if non_copyable_accounts: print(f"\n❌ {len(non_copyable_accounts)} UNSUITABLE ACCOUNTS (DO NOT COPY):") for i, (account, score, breakdown) in enumerate(non_copyable_accounts[:3], 1): # Show top 3 unsuitable print(f" {i}. {account.address[:10]}... - {account.copyability_reason} (Score: {score:.1f})") if len(non_copyable_accounts) > 3: print(f" ... and {len(non_copyable_accounts) - 3} more unsuitable accounts") print(f"\n⚠️ IMPORTANT COPY TRADING GUIDELINES:") print(f" • Only copy accounts with 1-20 trades per day") print(f" • Avoid HFT bots (50+ trades/day) - impossible to follow") print(f" • Start with small allocation (5%) and increase gradually") print(f" • Monitor performance and adjust leverage accordingly") print(f" • Higher relative scores indicate better performance within this cohort") async def get_leaderboard(self, window: str = "7d", limit: int = 20) -> Optional[List[str]]: """ Get top accounts from Hyperliquid leaderboard Note: Hyperliquid's public API doesn't expose leaderboard data directly. This function serves as a template for when/if the API becomes available. Args: window: Time window for leaderboard ("1d", "7d", "30d", "allTime") limit: Number of top accounts to return Returns: List of account addresses from leaderboard (currently returns None) """ print(f"⚠️ Hyperliquid leaderboard API not publicly accessible") print(f"💡 To analyze current top performers:") print(f" 1. Visit: https://app.hyperliquid.xyz/leaderboard") print(f" 2. Copy top performer addresses manually") print(f" 3. Run: python utils/hyperliquid_account_analyzer.py [address1] [address2] ...") print(f" 4. Or use --top10 for a curated list of known good traders") # Note: If Hyperliquid ever makes their leaderboard API public, # we can implement the actual fetching logic here return None async def _try_alternative_leaderboard(self, window: str, limit: int) -> Optional[List[str]]: """Try alternative methods to get leaderboard data""" try: # Try different payload formats alternative_payloads = [ { "type": "leaderBoard", "timeWindow": window }, { "type": "userLeaderboard", "window": window }, { "type": "spotLeaderboard", "req": {"timeWindow": window} } ] for payload in alternative_payloads: try: async with self.session.post(self.info_url, json=payload) as response: if response.status == 200: data = await response.json() # Try to extract addresses from any structure addresses = self._extract_addresses_from_data(data, limit) if addresses: print(f"📊 Successfully fetched {len(addresses)} addresses using alternative method") return addresses except Exception as e: continue print("⚠️ Could not fetch leaderboard data, using fallback top accounts") return None except Exception as e: print(f"⚠️ Alternative leaderboard fetch failed: {e}") return None def _extract_addresses_from_data(self, data: Any, limit: int) -> List[str]: """Extract addresses from any nested data structure""" addresses = [] def recursive_search(obj, depth=0): if depth > 5: # Prevent infinite recursion return if isinstance(obj, list): for item in obj: recursive_search(item, depth + 1) elif isinstance(obj, dict): # Check if this dict has an address field for addr_field in ['user', 'address', 'account', 'trader', 'wallet']: if addr_field in obj: addr = obj[addr_field] if isinstance(addr, str) and addr.startswith('0x') and len(addr) == 42: if addr not in addresses: # Avoid duplicates addresses.append(addr) # Recurse into nested objects for value in obj.values(): recursive_search(value, depth + 1) recursive_search(data) return addresses[:limit] async def get_top_accounts_from_leaderboard(self, window: str = "7d", limit: int = 10) -> List[str]: """ Get top performing accounts from Hyperliquid leaderboard Currently uses a curated list of high-performing accounts since the Hyperliquid leaderboard API is not publicly accessible. Args: window: Time window ("1d", "7d", "30d", "allTime") limit: Number of accounts to return Returns: List of top account addresses """ print(f"🔍 Attempting to fetch top {limit} accounts from {window} leaderboard...") addresses = await self.get_leaderboard(window, limit) if not addresses: print("\n📋 Using curated list of high-performing accounts") print("💡 These accounts have been manually verified for good performance") # Curated list of known high-performing accounts # Updated based on our previous analysis curated_addresses = [ "0x59a15c79a007cd6e9965b949fcf04125c2212524", # Best performer from previous analysis "0xa10ec245b3483f83e350a9165a52ae23dbab01bc", "0x0487b5e806ac781508cb3272ebd83ad603ddcc0f", "0x72fad4e75748b65566a3ebb555b6f6ee18ce08d1", "0xa70434af5778038245d53da1b4d360a30307a827", "0xeaa400abec7c62d315fd760cbba817fa35e4e0e8", "0x3104b7668f9e46fb13ec0b141d2902e144d67efe", "0x74dcdc6df25bd7ba70336632ecd76a053d0f8dd4", "0xc62df97dcf96324adf4edd30a4a7bffd5402f4da", "0xd11f5de0189d52b3abe6b0960b8377c20988e17e" ] selected_addresses = curated_addresses[:limit] print(f"📊 Selected {len(selected_addresses)} accounts for analysis:") for i, addr in enumerate(selected_addresses, 1): print(f" {i}. {addr}") return selected_addresses print(f"✅ Successfully fetched {len(addresses)} top accounts from leaderboard") for i, addr in enumerate(addresses, 1): print(f" {i}. {addr}") return addresses async def main(): """Main function""" parser = argparse.ArgumentParser(description='Analyze Hyperliquid trading accounts') parser.add_argument('addresses', nargs='*', help='Account addresses to analyze') parser.add_argument('--top10', action='store_true', help='Analyze the provided top 10 accounts (hardcoded list)') parser.add_argument('--leaderboard', action='store_true', help='Fetch and analyze top accounts from Hyperliquid leaderboard') parser.add_argument('--window', default='7d', choices=['1d', '7d', '30d', 'allTime'], help='Time window for leaderboard (default: 7d)') parser.add_argument('--limit', type=int, default=10, help='Number of top accounts to analyze (default: 10)') args = parser.parse_args() # Top 10 accounts from the user (fallback) top10_addresses = [ "0xa10ec245b3483f83e350a9165a52ae23dbab01bc", "0x2aab3badd6a5daa388da47de4c72a6fa618a6265", "0xd11f5de0189d52b3abe6b0960b8377c20988e17e", "0xc62df97dcf96324adf4edd30a4a7bffd5402f4da", "0xa70434af5778038245d53da1b4d360a30307a827", "0x72fad4e75748b65566a3ebb555b6f6ee18ce08d1", "0x0487b5e806ac781508cb3272ebd83ad603ddcc0f", "0x59a15c79a007cd6e9965b949fcf04125c2212524", "0xeaa400abec7c62d315fd760cbba817fa35e4e0e8", "0x3104b7668f9e46fb13ec0b141d2902e144d67efe", "0x74dcdc6df25bd7ba70336632ecd76a053d0f8dd4", "0x101a2d2afc2f9b0b217637f53e3a3e859104a33d", "0x836f01e63bd0fcbe673dcd905f882a5a808dd36e", "0xae42743b5d6a3594b7f95b5cebce64cfedc69318", "0x944fdea9d4956ce673c7545862cefccad6ee1b04", "0x2a93e999816c9826ade0b51aaa2d83240d8f4596", "0x7d3ca5fa94383b22ee49fc14e89aa417f65b4d92", "0xfacb7404c1fad06444bda161d1304e4b7aa14e77", "0x654d8c01f308d670d6bed13d892ee7ee285028a6", "0xbbf3fc6f14e70eb451d1ecd2c20227702fc435c6", "0x41dd4becd2930c37e8c05bac4e82459489d47e32", "0xe97b3608b2c527b92400099b144b8868e8e02b14", "0x9d8769bf821cec63f5e5436ef194002377d917f1", "0x258855d09cf445835769f21370230652c4294a92", "0x69e07d092e3b4bd5bbc02aed7491916269426ad1" ] async with HyperliquidAccountAnalyzer() as analyzer: if args.leaderboard: # Fetch top accounts from leaderboard addresses = await analyzer.get_top_accounts_from_leaderboard(args.window, args.limit) elif args.top10: # Use hardcoded top 10 list addresses = top10_addresses print("ℹ️ Using hardcoded top 10 accounts") elif args.addresses: # Use provided addresses addresses = args.addresses print(f"ℹ️ Analyzing {len(addresses)} provided addresses") else: # Default: use curated list (since leaderboard API isn't available) print("ℹ️ No addresses specified, using curated high-performance accounts...") addresses = await analyzer.get_top_accounts_from_leaderboard(args.window, args.limit) if not addresses: print("❌ No addresses to analyze") return results = await analyzer.analyze_multiple_accounts(addresses) analyzer.print_analysis_results(results) if __name__ == "__main__": asyncio.run(main())