hyperliquid_account_analyzer.py 65 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442
  1. #!/usr/bin/env python3
  2. """
  3. Hyperliquid Account Analyzer
  4. Analyzes Hyperliquid trading accounts to evaluate:
  5. - Profitability and performance metrics
  6. - Average trade duration and trading patterns
  7. - Risk management quality
  8. - Win rates and consistency
  9. - Position sizing and leverage usage
  10. Usage:
  11. # Analyze specific addresses
  12. python utils/hyperliquid_account_analyzer.py [address1] [address2] ...
  13. # Use curated high-performance accounts (default)
  14. python utils/hyperliquid_account_analyzer.py
  15. python utils/hyperliquid_account_analyzer.py --limit 15
  16. # Use hardcoded top 10 addresses
  17. python utils/hyperliquid_account_analyzer.py --top10
  18. Options:
  19. --leaderboard Use curated high-performance accounts (recommended)
  20. --window Time window preference: 1d, 7d, 30d, allTime (default: 7d)
  21. --limit Number of accounts to analyze (default: 10)
  22. --top10 Use original hardcoded list of top 10 accounts
  23. Note: Hyperliquid's leaderboard API is not publicly accessible, so the script uses
  24. a manually curated list of high-performing accounts identified through analysis.
  25. """
  26. import asyncio
  27. import aiohttp
  28. import json
  29. import sys
  30. import os
  31. from datetime import datetime, timedelta
  32. from typing import Dict, List, Optional, Any, Tuple
  33. from dataclasses import dataclass
  34. import statistics
  35. from collections import defaultdict
  36. import argparse
  37. # Add src to path to import our modules
  38. sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src'))
  39. @dataclass
  40. class Trade:
  41. """Represents a single trade"""
  42. timestamp: int
  43. coin: str
  44. side: str # 'buy' or 'sell'
  45. size: float
  46. price: float
  47. fee: float
  48. is_maker: bool
  49. @dataclass
  50. class Position:
  51. """Represents a position"""
  52. coin: str
  53. size: float
  54. side: str # 'long' or 'short'
  55. entry_price: float
  56. mark_price: float
  57. unrealized_pnl: float
  58. leverage: float
  59. margin_used: float
  60. @dataclass
  61. class AccountStats:
  62. """Comprehensive account statistics"""
  63. address: str
  64. total_pnl: float
  65. win_rate: float
  66. total_trades: int
  67. avg_trade_duration_hours: float
  68. max_drawdown: float
  69. sharpe_ratio: float
  70. avg_position_size: float
  71. max_leverage_used: float
  72. avg_leverage_used: float
  73. trading_frequency_per_day: float
  74. risk_reward_ratio: float
  75. consecutive_losses_max: int
  76. profit_factor: float
  77. largest_win: float
  78. largest_loss: float
  79. active_positions: int
  80. current_drawdown: float
  81. last_trade_timestamp: int
  82. analysis_period_days: int
  83. is_copyable: bool # Whether this account is suitable for copy trading
  84. copyability_reason: str # Why it is/isn't copyable
  85. unique_tokens_traded: int # Number of unique tokens/coins traded
  86. trading_type: str # "spot", "perps", or "mixed"
  87. top_tokens: List[str] # Top 5 most traded tokens by volume
  88. short_percentage: float # Percentage of trades that are likely shorts
  89. trading_style: str # Directional trading style description
  90. buy_sell_ratio: float # Ratio of buys to sells
  91. class HyperliquidAccountAnalyzer:
  92. """Analyzes Hyperliquid trading accounts"""
  93. def __init__(self):
  94. self.info_url = "https://api.hyperliquid.xyz/info"
  95. self.session = None
  96. async def __aenter__(self):
  97. self.session = aiohttp.ClientSession()
  98. return self
  99. async def __aexit__(self, exc_type, exc_val, exc_tb):
  100. if self.session:
  101. await self.session.close()
  102. async def get_account_state(self, address: str) -> Optional[Dict]:
  103. """Get current account state including positions and balance"""
  104. try:
  105. payload = {
  106. "type": "clearinghouseState",
  107. "user": address
  108. }
  109. async with self.session.post(self.info_url, json=payload) as response:
  110. if response.status == 200:
  111. return await response.json()
  112. else:
  113. print(f"❌ Error fetching account state for {address}: HTTP {response.status}")
  114. return None
  115. except Exception as e:
  116. print(f"❌ Exception fetching account state for {address}: {e}")
  117. return None
  118. async def get_user_fills(self, address: str, limit: int = 1000) -> Optional[List[Dict]]:
  119. """Get recent fills/trades for a user"""
  120. try:
  121. payload = {
  122. "type": "userFills",
  123. "user": address
  124. }
  125. async with self.session.post(self.info_url, json=payload) as response:
  126. if response.status == 200:
  127. data = await response.json()
  128. # Return only the most recent fills up to limit
  129. fills = data if isinstance(data, list) else []
  130. return fills[:limit]
  131. else:
  132. print(f"❌ Error fetching fills for {address}: HTTP {response.status}")
  133. return None
  134. except Exception as e:
  135. print(f"❌ Exception fetching fills for {address}: {e}")
  136. return None
  137. async def get_funding_history(self, address: str) -> Optional[List[Dict]]:
  138. """Get funding payments history"""
  139. try:
  140. payload = {
  141. "type": "userFunding",
  142. "user": address
  143. }
  144. async with self.session.post(self.info_url, json=payload) as response:
  145. if response.status == 200:
  146. return await response.json()
  147. else:
  148. return []
  149. except Exception as e:
  150. print(f"⚠️ Could not fetch funding history for {address}: {e}")
  151. return []
  152. def parse_trades(self, fills: List[Dict]) -> List[Trade]:
  153. """Parse fills into Trade objects"""
  154. trades = []
  155. for fill in fills:
  156. try:
  157. # Parse timestamp
  158. timestamp = int(fill.get('time', 0))
  159. if timestamp == 0:
  160. continue
  161. # Parse trade data
  162. coin = fill.get('coin', 'UNKNOWN')
  163. side = fill.get('side', 'buy').lower()
  164. size = float(fill.get('sz', '0'))
  165. price = float(fill.get('px', '0'))
  166. fee = float(fill.get('fee', '0'))
  167. is_maker = fill.get('liquidation', False) == False # Simplified maker detection
  168. if size > 0 and price > 0:
  169. trades.append(Trade(
  170. timestamp=timestamp,
  171. coin=coin,
  172. side=side,
  173. size=size,
  174. price=price,
  175. fee=fee,
  176. is_maker=is_maker
  177. ))
  178. except (ValueError, KeyError) as e:
  179. print(f"⚠️ Warning: Could not parse fill: {fill} - {e}")
  180. continue
  181. return trades
  182. def parse_positions(self, account_state: Dict) -> List[Position]:
  183. """Parse account state into Position objects"""
  184. positions = []
  185. if not account_state or 'assetPositions' not in account_state:
  186. return positions
  187. for asset_pos in account_state['assetPositions']:
  188. try:
  189. position_data = asset_pos.get('position', {})
  190. coin = position_data.get('coin', 'UNKNOWN')
  191. size_str = position_data.get('szi', '0')
  192. size = float(size_str)
  193. if abs(size) < 1e-6: # Skip dust positions
  194. continue
  195. side = 'long' if size > 0 else 'short'
  196. entry_price = float(position_data.get('entryPx', '0'))
  197. mark_price = float(position_data.get('positionValue', '0')) / abs(size) if size != 0 else 0
  198. unrealized_pnl = float(position_data.get('unrealizedPnl', '0'))
  199. leverage = float(position_data.get('leverage', {}).get('value', '1'))
  200. margin_used = float(position_data.get('marginUsed', '0'))
  201. positions.append(Position(
  202. coin=coin,
  203. size=abs(size),
  204. side=side,
  205. entry_price=entry_price,
  206. mark_price=mark_price,
  207. unrealized_pnl=unrealized_pnl,
  208. leverage=leverage,
  209. margin_used=margin_used
  210. ))
  211. except (ValueError, KeyError) as e:
  212. print(f"⚠️ Warning: Could not parse position: {asset_pos} - {e}")
  213. continue
  214. return positions
  215. def calculate_trade_performance(self, trades: List[Trade]) -> Tuple[float, float, int, int]:
  216. """Calculate more accurate trade performance metrics"""
  217. if len(trades) < 2:
  218. return 0.0, 0.0, 0, 0
  219. # Group trades by coin and track P&L per completed round trip
  220. trades_by_coin = defaultdict(list)
  221. for trade in sorted(trades, key=lambda x: x.timestamp):
  222. trades_by_coin[trade.coin].append(trade)
  223. total_realized_pnl = 0.0
  224. winning_trades = 0
  225. losing_trades = 0
  226. total_fees = 0.0
  227. for coin, coin_trades in trades_by_coin.items():
  228. position = 0.0
  229. entry_price = 0.0
  230. entry_cost = 0.0
  231. for trade in coin_trades:
  232. total_fees += trade.fee
  233. if trade.side.lower() in ['buy', 'b']:
  234. if position <= 0: # Opening long or closing short
  235. if position < 0: # Closing short position
  236. pnl = (entry_price - trade.price) * abs(position) - trade.fee
  237. total_realized_pnl += pnl
  238. if pnl > 0:
  239. winning_trades += 1
  240. else:
  241. losing_trades += 1
  242. # Start new long position
  243. new_size = trade.size - max(0, -position)
  244. if new_size > 0:
  245. entry_price = trade.price
  246. entry_cost = new_size * trade.price
  247. position = new_size
  248. else: # Adding to long position
  249. entry_cost += trade.size * trade.price
  250. position += trade.size
  251. entry_price = entry_cost / position
  252. elif trade.side.lower() in ['sell', 's', 'a', 'ask']:
  253. if position >= 0: # Closing long or opening short
  254. if position > 0: # Closing long position
  255. pnl = (trade.price - entry_price) * min(position, trade.size) - trade.fee
  256. total_realized_pnl += pnl
  257. if pnl > 0:
  258. winning_trades += 1
  259. else:
  260. losing_trades += 1
  261. # Start new short position
  262. new_size = trade.size - max(0, position)
  263. if new_size > 0:
  264. entry_price = trade.price
  265. position = -new_size
  266. else: # Adding to short position
  267. position -= trade.size
  268. entry_price = trade.price # Simplified for shorts
  269. win_rate = winning_trades / (winning_trades + losing_trades) if (winning_trades + losing_trades) > 0 else 0
  270. return total_realized_pnl, win_rate, winning_trades, losing_trades
  271. def analyze_hft_patterns(self, trades: List[Trade]) -> Dict[str, Any]:
  272. """
  273. Analyze high-frequency trading patterns that don't follow traditional open/close cycles
  274. """
  275. if not trades:
  276. return {
  277. 'avg_time_between_trades_minutes': 0,
  278. 'max_time_between_trades_hours': 0,
  279. 'min_time_between_trades_seconds': 0,
  280. 'trading_clusters': 0,
  281. 'trades_per_cluster': 0,
  282. 'is_hft_pattern': False
  283. }
  284. trades_sorted = sorted(trades, key=lambda x: x.timestamp)
  285. time_gaps = []
  286. # Calculate time gaps between consecutive trades
  287. for i in range(1, len(trades_sorted)):
  288. gap_ms = trades_sorted[i].timestamp - trades_sorted[i-1].timestamp
  289. gap_minutes = gap_ms / (1000 * 60)
  290. time_gaps.append(gap_minutes)
  291. if not time_gaps:
  292. return {
  293. 'avg_time_between_trades_minutes': 0,
  294. 'max_time_between_trades_hours': 0,
  295. 'min_time_between_trades_seconds': 0,
  296. 'trading_clusters': 0,
  297. 'trades_per_cluster': 0,
  298. 'is_hft_pattern': False
  299. }
  300. avg_gap_minutes = statistics.mean(time_gaps)
  301. max_gap_hours = max(time_gaps) / 60
  302. min_gap_seconds = min(time_gaps) * 60
  303. # Identify trading clusters (periods of intense activity)
  304. clusters = []
  305. current_cluster = [trades_sorted[0]]
  306. for i in range(1, len(trades_sorted)):
  307. gap_minutes = time_gaps[i-1]
  308. if gap_minutes <= 5: # Trades within 5 minutes = same cluster
  309. current_cluster.append(trades_sorted[i])
  310. else:
  311. if len(current_cluster) >= 3: # Minimum 3 trades to be a cluster
  312. clusters.append(current_cluster)
  313. current_cluster = [trades_sorted[i]]
  314. # Don't forget the last cluster
  315. if len(current_cluster) >= 3:
  316. clusters.append(current_cluster)
  317. avg_trades_per_cluster = statistics.mean([len(cluster) for cluster in clusters]) if clusters else 0
  318. # Determine if this is HFT pattern
  319. is_hft = (
  320. avg_gap_minutes < 30 and # Average < 30 minutes between trades
  321. len([gap for gap in time_gaps if gap < 1]) > len(time_gaps) * 0.3 # 30%+ trades within 1 minute
  322. )
  323. return {
  324. 'avg_time_between_trades_minutes': avg_gap_minutes,
  325. 'max_time_between_trades_hours': max_gap_hours,
  326. 'min_time_between_trades_seconds': min_gap_seconds,
  327. 'trading_clusters': len(clusters),
  328. 'trades_per_cluster': avg_trades_per_cluster,
  329. 'is_hft_pattern': is_hft
  330. }
  331. def calculate_rolling_pnl(self, trades: List[Trade]) -> Tuple[float, List[float], int, int]:
  332. """
  333. Calculate P&L using rolling window approach for HFT patterns
  334. """
  335. if not trades:
  336. return 0.0, [], 0, 0
  337. trades_sorted = sorted(trades, key=lambda x: x.timestamp)
  338. # Track net position and P&L over time
  339. cumulative_pnl = 0.0
  340. pnl_series = []
  341. winning_periods = 0
  342. losing_periods = 0
  343. # Use 1-hour windows for P&L calculation
  344. window_size_ms = 60 * 60 * 1000 # 1 hour
  345. if not trades_sorted:
  346. return 0.0, [], 0, 0
  347. start_time = trades_sorted[0].timestamp
  348. end_time = trades_sorted[-1].timestamp
  349. current_time = start_time
  350. window_trades = []
  351. while current_time <= end_time:
  352. window_end = current_time + window_size_ms
  353. # Get trades in this window
  354. window_trades = [
  355. t for t in trades_sorted
  356. if current_time <= t.timestamp < window_end
  357. ]
  358. if window_trades:
  359. # Calculate net flow and fees for this window
  360. net_usd_flow = 0.0
  361. window_fees = 0.0
  362. for trade in window_trades:
  363. trade_value = trade.size * trade.price
  364. if trade.side.lower() in ['buy', 'b']:
  365. net_usd_flow -= trade_value # Cash out
  366. elif trade.side.lower() in ['sell', 's', 'a', 'ask']: # sell
  367. net_usd_flow += trade_value # Cash in
  368. window_fees += trade.fee
  369. # Window P&L = net cash flow - fees
  370. window_pnl = net_usd_flow - window_fees
  371. cumulative_pnl += window_pnl
  372. pnl_series.append(cumulative_pnl)
  373. if window_pnl > 0:
  374. winning_periods += 1
  375. elif window_pnl < 0:
  376. losing_periods += 1
  377. current_time = window_end
  378. win_rate = winning_periods / (winning_periods + losing_periods) if (winning_periods + losing_periods) > 0 else 0
  379. return cumulative_pnl, pnl_series, winning_periods, losing_periods
  380. def analyze_token_diversity_and_type(self, trades: List[Trade], positions: List[Position]) -> Tuple[int, str, List[str]]:
  381. """
  382. Analyze token diversity and determine trading type (spot vs perps)
  383. Returns:
  384. tuple: (unique_tokens_count, trading_type, top_tokens_list)
  385. """
  386. if not trades:
  387. return 0, "unknown", []
  388. # Count token frequency by volume
  389. token_volumes = defaultdict(float)
  390. for trade in trades:
  391. volume = trade.size * trade.price
  392. token_volumes[trade.coin] += volume
  393. # Get unique token count
  394. unique_tokens = len(token_volumes)
  395. # Get top 5 tokens by volume
  396. sorted_tokens = sorted(token_volumes.items(), key=lambda x: x[1], reverse=True)
  397. top_tokens = [token for token, _ in sorted_tokens[:5]]
  398. # Determine trading type based on positions and leverage
  399. trading_type = self._determine_trading_type(positions, trades)
  400. return unique_tokens, trading_type, top_tokens
  401. def _determine_trading_type(self, positions: List[Position], trades: List[Trade]) -> str:
  402. """
  403. Determine if account trades spot, perps, or mixed
  404. Logic:
  405. - If positions have leverage > 1.1, it's perps
  406. - If no positions with leverage, check for margin/leverage indicators
  407. - Hyperliquid primarily offers perps, so default to perps if uncertain
  408. """
  409. if not positions and not trades:
  410. return "unknown"
  411. # Check current positions for leverage
  412. leveraged_positions = 0
  413. total_positions = len(positions)
  414. for position in positions:
  415. if position.leverage > 1.1: # Consider leverage > 1.1 as perps
  416. leveraged_positions += 1
  417. # If we have positions, determine based on leverage
  418. if total_positions > 0:
  419. leverage_ratio = leveraged_positions / total_positions
  420. if leverage_ratio >= 0.8: # 80%+ leveraged positions = perps
  421. return "perps"
  422. elif leverage_ratio <= 0.2: # 20%- leveraged positions = spot
  423. return "spot"
  424. else: # Mixed
  425. return "mixed"
  426. # If no current positions, check historical leverage patterns
  427. # For Hyperliquid, most trading is perps, so default to perps
  428. # We could also check if trades show signs of leverage (frequent short selling, etc.)
  429. # Check for short selling patterns (indicator of perps)
  430. total_trades = len(trades)
  431. if total_trades > 0:
  432. sell_trades = sum(1 for trade in trades if trade.side.lower() in ['sell', 's', 'a', 'ask'])
  433. buy_trades = total_trades - sell_trades
  434. # If significantly more sells than buys, likely includes short selling (perps)
  435. if sell_trades > buy_trades * 1.2:
  436. return "perps"
  437. # If roughly balanced, could be perps with both long/short
  438. elif abs(sell_trades - buy_trades) / total_trades < 0.3:
  439. return "perps"
  440. # Default to perps for Hyperliquid (they primarily offer perps)
  441. return "perps"
  442. def analyze_short_long_patterns(self, trades: List[Trade]) -> Dict[str, Any]:
  443. """
  444. Analyze short/long trading patterns for perpetual traders
  445. Returns:
  446. dict: Analysis of directional trading patterns
  447. """
  448. if not trades:
  449. return {
  450. 'total_buys': 0,
  451. 'total_sells': 0,
  452. 'buy_sell_ratio': 0,
  453. 'likely_short_trades': 0,
  454. 'short_percentage': 0,
  455. 'directional_balance': 'unknown',
  456. 'trading_style': 'unknown'
  457. }
  458. # Handle Hyperliquid API format: 'b' = buy/bid, 'a' = sell/ask
  459. total_buys = sum(1 for trade in trades if trade.side.lower() in ['buy', 'b'])
  460. total_sells = sum(1 for trade in trades if trade.side.lower() in ['sell', 's', 'a', 'ask'])
  461. total_trades = len(trades)
  462. # Calculate buy/sell ratio (handle edge cases)
  463. if total_sells == 0:
  464. buy_sell_ratio = float('inf') if total_buys > 0 else 0
  465. else:
  466. buy_sell_ratio = total_buys / total_sells
  467. # Analyze trading patterns
  468. if total_sells == 0 and total_buys > 0:
  469. directional_balance = "buy_only"
  470. trading_style = "Long-Only (buy and hold strategy)"
  471. elif total_buys == 0 and total_sells > 0:
  472. directional_balance = "sell_only"
  473. trading_style = "Short-Only (bearish strategy)"
  474. elif abs(total_buys - total_sells) / total_trades < 0.1: # Within 10%
  475. directional_balance = "balanced"
  476. trading_style = "Long/Short Balanced (can profit both ways)"
  477. elif total_sells > total_buys * 1.3: # 30% more sells
  478. directional_balance = "sell_heavy"
  479. trading_style = "Short-Heavy (profits from price drops)"
  480. elif total_buys > total_sells * 1.3: # 30% more buys
  481. directional_balance = "buy_heavy"
  482. trading_style = "Long-Heavy (profits from price rises)"
  483. else:
  484. directional_balance = "moderately_balanced"
  485. trading_style = "Moderately Balanced (flexible direction)"
  486. # Estimate likely short positions (sells without preceding buys)
  487. likely_shorts = 0
  488. position_tracker = defaultdict(lambda: {'net_position': 0})
  489. for trade in sorted(trades, key=lambda x: x.timestamp):
  490. coin_pos = position_tracker[trade.coin]
  491. # Handle both 'sell'/'s' and 'buy'/'b' formats
  492. if trade.side.lower() in ['sell', 's', 'a', 'ask']:
  493. if coin_pos['net_position'] <= 0: # Selling without long position = likely short
  494. likely_shorts += 1
  495. coin_pos['net_position'] -= trade.size
  496. elif trade.side.lower() in ['buy', 'b']:
  497. coin_pos['net_position'] += trade.size
  498. short_percentage = (likely_shorts / total_trades * 100) if total_trades > 0 else 0
  499. return {
  500. 'total_buys': total_buys,
  501. 'total_sells': total_sells,
  502. 'buy_sell_ratio': buy_sell_ratio,
  503. 'likely_short_trades': likely_shorts,
  504. 'short_percentage': short_percentage,
  505. 'directional_balance': directional_balance,
  506. 'trading_style': trading_style
  507. }
  508. async def analyze_account(self, address: str) -> Optional[AccountStats]:
  509. """Analyze a single account and return comprehensive statistics"""
  510. print(f"\n🔍 Analyzing account: {address}")
  511. # Get account data
  512. account_state = await self.get_account_state(address)
  513. fills = await self.get_user_fills(address, limit=500) # Reduced limit for better analysis
  514. if not fills:
  515. print(f"❌ No trading data found for {address}")
  516. return None
  517. # Parse data
  518. trades = self.parse_trades(fills)
  519. positions = self.parse_positions(account_state) if account_state else []
  520. if not trades:
  521. print(f"❌ No valid trades found for {address}")
  522. return None
  523. print(f"📊 Found {len(trades)} trades, {len(positions)} active positions")
  524. # Calculate time period
  525. trades_sorted = sorted(trades, key=lambda x: x.timestamp)
  526. oldest_trade = trades_sorted[0].timestamp
  527. newest_trade = trades_sorted[-1].timestamp
  528. analysis_period_ms = newest_trade - oldest_trade
  529. analysis_period_days = max(1, analysis_period_ms / (1000 * 60 * 60 * 24))
  530. # Calculate improved metrics
  531. total_trades = len(trades)
  532. total_fees = sum(trade.fee for trade in trades)
  533. # Analyze HFT patterns first
  534. hft_patterns = self.analyze_hft_patterns(trades)
  535. # Check if this is a manageable trading frequency for copy trading
  536. trading_freq = total_trades / analysis_period_days if analysis_period_days > 0 else 0
  537. is_copyable_frequency = 1 <= trading_freq <= 20 # 1-20 trades per day is manageable
  538. if hft_patterns['is_hft_pattern'] or trading_freq > 50:
  539. print(f"🤖 ❌ UNSUITABLE: High-frequency algorithmic trading detected")
  540. print(f"⚡ Trading frequency: {trading_freq:.1f} trades/day (TOO HIGH for copy trading)")
  541. print(f"🕒 Avg time between trades: {hft_patterns['avg_time_between_trades_minutes']:.1f} minutes")
  542. print(f"❌ This account cannot be safely copied - would result in overtrading and high fees")
  543. # Still calculate metrics for completeness but mark as unsuitable
  544. rolling_pnl, pnl_series, winning_periods, losing_periods = self.calculate_rolling_pnl(trades)
  545. realized_pnl = rolling_pnl
  546. win_rate = winning_periods / (winning_periods + losing_periods) if (winning_periods + losing_periods) > 0 else 0
  547. avg_duration = hft_patterns['avg_time_between_trades_minutes'] / 60 # Convert to hours
  548. print(f"💰 Rolling P&L: ${realized_pnl:.2f}, Periods: {winning_periods}W/{losing_periods}L")
  549. elif is_copyable_frequency:
  550. print(f"✅ SUITABLE: Human-manageable trading pattern detected")
  551. print(f"📊 Trading frequency: {trading_freq:.1f} trades/day (GOOD for copy trading)")
  552. # Use traditional P&L calculation for human traders
  553. realized_pnl, win_rate, winning_trades, losing_trades = self.calculate_trade_performance(trades)
  554. print(f"💰 Realized PnL: ${realized_pnl:.2f}, Wins: {winning_trades}, Losses: {losing_trades}")
  555. print(f"📈 Trade Win Rate: {win_rate:.1%}")
  556. # Calculate traditional trade durations
  557. durations = []
  558. position_tracker = defaultdict(lambda: {'size': 0, 'start_time': 0})
  559. for trade in trades_sorted:
  560. coin = trade.coin
  561. pos = position_tracker[coin]
  562. if trade.side.lower() in ['buy', 'b']:
  563. if pos['size'] <= 0 and trade.size > abs(pos['size']): # Opening new long
  564. pos['start_time'] = trade.timestamp
  565. pos['size'] += trade.size
  566. else: # sell
  567. if pos['size'] > 0: # Closing long position
  568. if trade.size >= pos['size'] and pos['start_time'] > 0: # Fully closing
  569. duration_hours = (trade.timestamp - pos['start_time']) / (1000 * 3600)
  570. if duration_hours > 0:
  571. durations.append(duration_hours)
  572. pos['start_time'] = 0
  573. pos['size'] -= trade.size
  574. elif pos['size'] <= 0: # Opening short
  575. pos['start_time'] = trade.timestamp
  576. pos['size'] -= trade.size
  577. avg_duration = statistics.mean(durations) if durations else 0
  578. print(f"🕒 Found {len(durations)} completed trades, avg duration: {avg_duration:.1f} hours")
  579. else:
  580. print(f"⚠️ QUESTIONABLE: Low trading frequency detected")
  581. print(f"📊 Trading frequency: {trading_freq:.1f} trades/day (might be inactive)")
  582. # Use traditional analysis for low-frequency traders
  583. realized_pnl, win_rate, winning_trades, losing_trades = self.calculate_trade_performance(trades)
  584. print(f"💰 Realized PnL: ${realized_pnl:.2f}, Wins: {winning_trades}, Losses: {losing_trades}")
  585. print(f"📈 Trade Win Rate: {win_rate:.1%}")
  586. avg_duration = 24.0 # Assume longer holds for infrequent traders
  587. print(f"🕒 Infrequent trading pattern - assuming longer hold times")
  588. # Common calculations
  589. unrealized_pnl = sum(pos.unrealized_pnl for pos in positions)
  590. total_pnl = realized_pnl + unrealized_pnl
  591. print(f"💰 Total PnL: ${total_pnl:.2f} (Realized: ${realized_pnl:.2f} + Unrealized: ${unrealized_pnl:.2f})")
  592. print(f"💸 Total Fees: ${total_fees:.2f}")
  593. # Calculate position size statistics
  594. position_sizes = [trade.size * trade.price for trade in trades]
  595. avg_position_size = statistics.mean(position_sizes) if position_sizes else 0
  596. # Calculate leverage statistics from current positions
  597. leverages = [pos.leverage for pos in positions if pos.leverage > 0]
  598. max_leverage = max(leverages) if leverages else 0
  599. avg_leverage = statistics.mean(leverages) if leverages else 1
  600. # Calculate trading frequency
  601. trading_freq = total_trades / analysis_period_days if analysis_period_days > 0 else 0
  602. # Simplified drawdown calculation
  603. max_drawdown = 0.0
  604. current_drawdown = 0.0
  605. if total_pnl < 0:
  606. max_drawdown = abs(total_pnl) / (avg_position_size * 10) if avg_position_size > 0 else 0
  607. current_drawdown = max_drawdown
  608. # Risk metrics
  609. profit_factor = abs(realized_pnl) / total_fees if total_fees > 0 else 0
  610. # Analyze HFT patterns
  611. hft_patterns = self.analyze_hft_patterns(trades)
  612. # Determine copyability
  613. is_hft = trading_freq > 50
  614. is_inactive = trading_freq < 1
  615. is_copyable_freq = 1 <= trading_freq <= 20
  616. if is_hft:
  617. is_copyable = False
  618. copyability_reason = f"HFT Bot ({trading_freq:.1f} trades/day - too fast to copy)"
  619. elif is_inactive:
  620. is_copyable = False
  621. copyability_reason = f"Inactive ({trading_freq:.1f} trades/day - insufficient activity)"
  622. elif is_copyable_freq:
  623. is_copyable = True
  624. copyability_reason = f"Human trader ({trading_freq:.1f} trades/day - manageable frequency)"
  625. else:
  626. is_copyable = False
  627. copyability_reason = f"Questionable frequency ({trading_freq:.1f} trades/day)"
  628. # Calculate risk reward ratio safely
  629. if hft_patterns['is_hft_pattern']:
  630. # For HFT, use win rate as proxy for risk/reward
  631. risk_reward_ratio = win_rate / (1 - win_rate) if win_rate < 1 else 1.0
  632. else:
  633. # For traditional trading, try to use winning/losing trade counts
  634. try:
  635. # These variables should exist from traditional analysis
  636. risk_reward_ratio = winning_trades / max(1, losing_trades)
  637. except NameError:
  638. # Fallback if variables don't exist
  639. risk_reward_ratio = win_rate / (1 - win_rate) if win_rate < 1 else 1.0
  640. # Analyze token diversity and trading type
  641. unique_tokens, trading_type, top_tokens = self.analyze_token_diversity_and_type(trades, positions)
  642. # Analyze short/long patterns
  643. short_long_analysis = self.analyze_short_long_patterns(trades)
  644. return AccountStats(
  645. address=address,
  646. total_pnl=total_pnl,
  647. win_rate=win_rate,
  648. total_trades=total_trades,
  649. avg_trade_duration_hours=avg_duration,
  650. max_drawdown=max_drawdown,
  651. sharpe_ratio=0, # Would need returns data
  652. avg_position_size=avg_position_size,
  653. max_leverage_used=max_leverage,
  654. avg_leverage_used=avg_leverage,
  655. trading_frequency_per_day=trading_freq,
  656. risk_reward_ratio=risk_reward_ratio,
  657. consecutive_losses_max=0, # Would need sequence analysis
  658. profit_factor=profit_factor,
  659. largest_win=0, # Would need individual trade P&L
  660. largest_loss=0, # Would need individual trade P&L
  661. active_positions=len(positions),
  662. current_drawdown=current_drawdown,
  663. last_trade_timestamp=newest_trade,
  664. analysis_period_days=int(analysis_period_days),
  665. is_copyable=is_copyable,
  666. copyability_reason=copyability_reason,
  667. unique_tokens_traded=unique_tokens,
  668. trading_type=trading_type,
  669. top_tokens=top_tokens,
  670. short_percentage=short_long_analysis['short_percentage'],
  671. trading_style=short_long_analysis['trading_style'],
  672. buy_sell_ratio=short_long_analysis['buy_sell_ratio']
  673. )
  674. async def analyze_multiple_accounts(self, addresses: List[str]) -> List[AccountStats]:
  675. """Analyze multiple accounts concurrently"""
  676. print(f"🚀 Starting analysis of {len(addresses)} accounts...\n")
  677. tasks = [self.analyze_account(addr) for addr in addresses]
  678. results = await asyncio.gather(*tasks, return_exceptions=True)
  679. # Filter out None results and exceptions
  680. valid_results = []
  681. for i, result in enumerate(results):
  682. if isinstance(result, Exception):
  683. print(f"❌ Error analyzing {addresses[i]}: {result}")
  684. elif result is not None:
  685. valid_results.append(result)
  686. return valid_results
  687. def print_analysis_results(self, stats_list: List[AccountStats]):
  688. """Print comprehensive analysis results with relative scoring"""
  689. if not stats_list:
  690. print("❌ No valid analysis results to display")
  691. return
  692. print("\n" + "="*100)
  693. print("📊 HYPERLIQUID ACCOUNT ANALYSIS RESULTS")
  694. print("="*100)
  695. # Calculate data ranges for relative scoring
  696. def get_data_ranges(stats_list):
  697. """Calculate min/max values for relative scoring"""
  698. if not stats_list:
  699. return {}
  700. # Separate copyable from non-copyable for different scoring
  701. copyable_accounts = [s for s in stats_list if s.is_copyable]
  702. all_accounts = stats_list
  703. ranges = {}
  704. # Profitability range (use all accounts)
  705. pnls = [s.total_pnl for s in all_accounts]
  706. ranges['pnl_min'] = min(pnls)
  707. ranges['pnl_max'] = max(pnls)
  708. ranges['pnl_range'] = ranges['pnl_max'] - ranges['pnl_min']
  709. # Separate positive and negative PnL for different scoring
  710. positive_pnls = [p for p in pnls if p > 0]
  711. negative_pnls = [p for p in pnls if p < 0]
  712. ranges['has_profitable'] = len(positive_pnls) > 0
  713. ranges['has_unprofitable'] = len(negative_pnls) > 0
  714. ranges['most_profitable'] = max(positive_pnls) if positive_pnls else 0
  715. ranges['most_unprofitable'] = min(negative_pnls) if negative_pnls else 0
  716. # Win rate range (use all accounts)
  717. win_rates = [s.win_rate for s in all_accounts]
  718. ranges['winrate_min'] = min(win_rates)
  719. ranges['winrate_max'] = max(win_rates)
  720. ranges['winrate_range'] = ranges['winrate_max'] - ranges['winrate_min']
  721. # Trading frequency range (use all accounts)
  722. frequencies = [s.trading_frequency_per_day for s in all_accounts]
  723. ranges['freq_min'] = min(frequencies)
  724. ranges['freq_max'] = max(frequencies)
  725. ranges['freq_range'] = ranges['freq_max'] - ranges['freq_min']
  726. # Trade duration range (use all accounts)
  727. durations = [s.avg_trade_duration_hours for s in all_accounts if s.avg_trade_duration_hours > 0]
  728. if durations:
  729. ranges['duration_min'] = min(durations)
  730. ranges['duration_max'] = max(durations)
  731. ranges['duration_range'] = ranges['duration_max'] - ranges['duration_min']
  732. else:
  733. ranges['duration_min'] = 0
  734. ranges['duration_max'] = 24
  735. ranges['duration_range'] = 24
  736. # Drawdown range (use all accounts) - ENHANCED
  737. drawdowns = [s.max_drawdown for s in all_accounts]
  738. ranges['drawdown_min'] = min(drawdowns)
  739. ranges['drawdown_max'] = max(drawdowns)
  740. ranges['drawdown_range'] = ranges['drawdown_max'] - ranges['drawdown_min']
  741. # Account age range (NEW)
  742. ages = [s.analysis_period_days for s in all_accounts]
  743. ranges['age_min'] = min(ages)
  744. ranges['age_max'] = max(ages)
  745. ranges['age_range'] = ranges['age_max'] - ranges['age_min']
  746. return ranges
  747. ranges = get_data_ranges(stats_list)
  748. # Relative scoring function
  749. def calculate_relative_score(stats: AccountStats, ranges: dict) -> float:
  750. score = 0.0
  751. score_breakdown = {}
  752. # 1. COPYABILITY FILTER (35% weight - most important)
  753. is_hft = stats.trading_frequency_per_day > 50
  754. is_too_slow = stats.trading_frequency_per_day < 1
  755. is_copyable = 1 <= stats.trading_frequency_per_day <= 20
  756. if is_hft:
  757. copyability_score = 0 # HFT bots get 0
  758. score_breakdown['copyability'] = f"❌ HFT Bot (0 points)"
  759. elif is_too_slow:
  760. copyability_score = 5 # Inactive accounts get very low points
  761. score_breakdown['copyability'] = f"⚠️ Inactive (5 points)"
  762. elif is_copyable:
  763. # For copyable accounts, score based on how close to ideal frequency (15 trades/day)
  764. ideal_freq = 15
  765. freq_distance = abs(stats.trading_frequency_per_day - ideal_freq)
  766. # Max score when exactly at ideal, decreases as distance increases
  767. copyability_score = max(0, 35 - (freq_distance * 1.5)) # Lose 1.5 points per trade away from ideal
  768. score_breakdown['copyability'] = f"✅ Copyable ({copyability_score:.1f} points - {stats.trading_frequency_per_day:.1f} trades/day)"
  769. else:
  770. copyability_score = 15 # Questionable frequency
  771. score_breakdown['copyability'] = f"⚠️ Questionable ({copyability_score} points)"
  772. score += copyability_score
  773. # 2. PROFITABILITY (30% weight) - HARSH PUNISHMENT for losses
  774. if stats.total_pnl < 0:
  775. # Severe punishment for unprofitable accounts
  776. if ranges['has_unprofitable'] and ranges['most_unprofitable'] < stats.total_pnl:
  777. # Scale from -15 (worst) to 0 (break-even)
  778. loss_severity = abs(stats.total_pnl) / abs(ranges['most_unprofitable'])
  779. profitability_score = -15 * loss_severity # Negative score for losses!
  780. else:
  781. profitability_score = -15 # Maximum penalty
  782. score_breakdown['profitability'] = f"❌ LOSING ({profitability_score:.1f} points - ${stats.total_pnl:.0f} LOSS)"
  783. elif stats.total_pnl == 0:
  784. profitability_score = 0 # Breakeven gets no points
  785. score_breakdown['profitability'] = f"⚖️ Breakeven (0 points - ${stats.total_pnl:.0f})"
  786. else:
  787. # Positive PnL gets full scoring
  788. if ranges['has_profitable'] and ranges['most_profitable'] > 0:
  789. profit_ratio = stats.total_pnl / ranges['most_profitable']
  790. profitability_score = profit_ratio * 30
  791. else:
  792. profitability_score = 15 # Average score if only one profitable account
  793. score_breakdown['profitability'] = f"✅ Profitable ({profitability_score:.1f} points - ${stats.total_pnl:.0f})"
  794. score += profitability_score
  795. # 3. RISK MANAGEMENT (20% weight) - HARSH PUNISHMENT for high drawdown
  796. if stats.max_drawdown > 0.5: # 50%+ drawdown is disqualifying
  797. risk_score = -10 # Negative score for extreme risk!
  798. score_breakdown['risk'] = f"❌ EXTREME RISK ({risk_score} points - {stats.max_drawdown:.1%} drawdown)"
  799. elif stats.max_drawdown > 0.25: # 25%+ drawdown is very bad
  800. risk_score = -5 # Negative score for high risk
  801. score_breakdown['risk'] = f"❌ HIGH RISK ({risk_score} points - {stats.max_drawdown:.1%} drawdown)"
  802. elif stats.max_drawdown > 0.15: # 15%+ drawdown is concerning
  803. risk_score = 5 # Low positive score
  804. score_breakdown['risk'] = f"⚠️ Moderate Risk ({risk_score} points - {stats.max_drawdown:.1%} drawdown)"
  805. elif stats.max_drawdown > 0.05: # 5-15% drawdown is acceptable
  806. risk_score = 15 # Good score
  807. score_breakdown['risk'] = f"✅ Good Risk Mgmt ({risk_score} points - {stats.max_drawdown:.1%} drawdown)"
  808. else: # <5% drawdown is excellent
  809. risk_score = 20 # Full points
  810. score_breakdown['risk'] = f"✅ Excellent Risk ({risk_score} points - {stats.max_drawdown:.1%} drawdown)"
  811. score += risk_score
  812. # 4. ACCOUNT MATURITY (10% weight) - NEW FACTOR
  813. min_good_age = 30 # At least 30 days of history is preferred
  814. if stats.analysis_period_days < 7:
  815. age_score = 0 # Too new, no confidence
  816. score_breakdown['maturity'] = f"❌ Too New ({age_score} points - {stats.analysis_period_days} days)"
  817. elif stats.analysis_period_days < 14:
  818. age_score = 2 # Very new
  819. score_breakdown['maturity'] = f"⚠️ Very New ({age_score} points - {stats.analysis_period_days} days)"
  820. elif stats.analysis_period_days < min_good_age:
  821. age_score = 5 # Somewhat new
  822. score_breakdown['maturity'] = f"⚠️ Somewhat New ({age_score} points - {stats.analysis_period_days} days)"
  823. else:
  824. # Scale from 30 days (7 points) to max age (10 points)
  825. if ranges['age_range'] > 0:
  826. age_ratio = min(1.0, (stats.analysis_period_days - min_good_age) / max(1, ranges['age_max'] - min_good_age))
  827. age_score = 7 + (age_ratio * 3) # 7-10 points
  828. else:
  829. age_score = 8 # Average if all same age
  830. score_breakdown['maturity'] = f"✅ Mature ({age_score:.1f} points - {stats.analysis_period_days} days)"
  831. score += age_score
  832. # 5. WIN RATE (5% weight) - Reduced importance
  833. if ranges['winrate_range'] > 0:
  834. winrate_normalized = (stats.win_rate - ranges['winrate_min']) / ranges['winrate_range']
  835. winrate_score = winrate_normalized * 5
  836. else:
  837. winrate_score = 2.5 # If all same win rate, give average score
  838. score += winrate_score
  839. score_breakdown['winrate'] = f"📈 Win Rate ({winrate_score:.1f} points - {stats.win_rate:.1%})"
  840. return score, score_breakdown
  841. # Calculate scores for all accounts
  842. scored_accounts = []
  843. for stats in stats_list:
  844. score, breakdown = calculate_relative_score(stats, ranges)
  845. scored_accounts.append((stats, score, breakdown))
  846. # Sort by score
  847. scored_accounts.sort(key=lambda x: x[1], reverse=True)
  848. # Print data ranges for context
  849. print(f"\n📊 COHORT ANALYSIS (for relative scoring):")
  850. print(f" 💰 PnL Range: ${ranges['pnl_min']:.0f} to ${ranges['pnl_max']:.0f}")
  851. if ranges['has_unprofitable']:
  852. print(f" ❌ Worst Loss: ${ranges['most_unprofitable']:.0f}")
  853. if ranges['has_profitable']:
  854. print(f" ✅ Best Profit: ${ranges['most_profitable']:.0f}")
  855. print(f" 📈 Win Rate Range: {ranges['winrate_min']:.1%} to {ranges['winrate_max']:.1%}")
  856. print(f" 🔄 Frequency Range: {ranges['freq_min']:.1f} to {ranges['freq_max']:.1f} trades/day")
  857. print(f" 📉 Drawdown Range: {ranges['drawdown_min']:.1%} to {ranges['drawdown_max']:.1%}")
  858. print(f" 📅 Account Age Range: {ranges['age_min']} to {ranges['age_max']} days")
  859. print(f"\n⚠️ WARNING: Accounts with losses or high drawdown receive NEGATIVE scores!")
  860. # Print results
  861. for i, (stats, score, breakdown) in enumerate(scored_accounts, 1):
  862. print(f"\n{i}. 📋 ACCOUNT: {stats.address}")
  863. print(f" 🏆 RELATIVE SCORE: {score:.1f}/100")
  864. print(f" 📊 Score Breakdown:")
  865. for metric, description in breakdown.items():
  866. print(f" {description}")
  867. print(f" 💰 Total PnL: ${stats.total_pnl:.2f}")
  868. print(f" 📈 Win Rate: {stats.win_rate:.1%}")
  869. print(f" 🕒 Avg Trade Duration: {stats.avg_trade_duration_hours:.1f} hours")
  870. print(f" 📉 Max Drawdown: {stats.max_drawdown:.1%}")
  871. print(f" 🔄 Trading Frequency: {stats.trading_frequency_per_day:.1f} trades/day")
  872. print(f" 💵 Avg Position Size: ${stats.avg_position_size:.2f}")
  873. print(f" ⚡ Max Leverage: {stats.max_leverage_used:.1f}x")
  874. print(f" 📊 Total Trades: {stats.total_trades}")
  875. print(f" 📍 Active Positions: {stats.active_positions}")
  876. print(f" 📅 Analysis Period: {stats.analysis_period_days} days")
  877. # New token and trading type information
  878. print(f" 🪙 Unique Tokens: {stats.unique_tokens_traded}")
  879. # Trading type with emoji
  880. trading_type_display = {
  881. "perps": "🔄 Perpetuals",
  882. "spot": "💱 Spot Trading",
  883. "mixed": "🔀 Mixed (Spot + Perps)",
  884. "unknown": "❓ Unknown"
  885. }.get(stats.trading_type, f"❓ {stats.trading_type}")
  886. print(f" 📈 Trading Type: {trading_type_display}")
  887. # Short/Long patterns - KEY ADVANTAGE
  888. print(f" 📊 Trading Style: {stats.trading_style}")
  889. print(f" 📉 Short Trades: {stats.short_percentage:.1f}% (can profit from price drops)")
  890. # Format buy/sell ratio properly
  891. if stats.buy_sell_ratio == float('inf'):
  892. ratio_display = "∞ (only buys)"
  893. elif stats.buy_sell_ratio == 0:
  894. ratio_display = "0 (only sells)"
  895. else:
  896. ratio_display = f"{stats.buy_sell_ratio:.2f}"
  897. print(f" ⚖️ Buy/Sell Ratio: {ratio_display}")
  898. # Top tokens
  899. if stats.top_tokens:
  900. top_tokens_str = ", ".join(stats.top_tokens[:3]) # Show top 3
  901. if len(stats.top_tokens) > 3:
  902. top_tokens_str += f" +{len(stats.top_tokens)-3} more"
  903. print(f" 🏆 Top Tokens: {top_tokens_str}")
  904. # Copy Trading Suitability Evaluation
  905. evaluation = []
  906. is_hft_pattern = stats.trading_frequency_per_day > 50
  907. is_copyable = 1 <= stats.trading_frequency_per_day <= 20
  908. # First determine if account is copyable
  909. if is_hft_pattern:
  910. evaluation.append("❌ NOT COPYABLE - HFT/Bot")
  911. elif stats.trading_frequency_per_day < 1:
  912. evaluation.append("❌ NOT COPYABLE - Inactive")
  913. elif is_copyable:
  914. evaluation.append("✅ COPYABLE - Human trader")
  915. else:
  916. evaluation.append("⚠️ QUESTIONABLE - Check frequency")
  917. # Profitability check
  918. if stats.total_pnl > 0:
  919. evaluation.append("✅ Profitable")
  920. else:
  921. evaluation.append("❌ Not profitable")
  922. # Trade duration evaluation for copyable accounts
  923. if is_copyable:
  924. if 2 <= stats.avg_trade_duration_hours <= 48:
  925. evaluation.append("✅ Good trade duration")
  926. elif stats.avg_trade_duration_hours < 2:
  927. evaluation.append("⚠️ Very short trades")
  928. else:
  929. evaluation.append("⚠️ Long hold times")
  930. # Win rate for human traders
  931. if stats.win_rate > 0.6:
  932. evaluation.append("✅ Excellent win rate")
  933. elif stats.win_rate > 0.4:
  934. evaluation.append("✅ Good win rate")
  935. else:
  936. evaluation.append("⚠️ Low win rate")
  937. else:
  938. # For non-copyable accounts, just note the pattern
  939. if is_hft_pattern:
  940. evaluation.append("🤖 Algorithmic trading")
  941. else:
  942. evaluation.append("💤 Low activity")
  943. # Risk management (universal)
  944. if stats.max_drawdown < 0.15:
  945. evaluation.append("✅ Good risk management")
  946. elif stats.max_drawdown < 0.25:
  947. evaluation.append("⚠️ Moderate risk")
  948. else:
  949. evaluation.append("❌ High drawdown risk")
  950. print(f" 🎯 Evaluation: {' | '.join(evaluation)}")
  951. # Recommendation section (rest remains the same)
  952. print("\n" + "="*100)
  953. print("🎯 COPY TRADING RECOMMENDATIONS")
  954. print("="*100)
  955. # Separate copyable from non-copyable accounts
  956. copyable_accounts = [(stats, score, breakdown) for stats, score, breakdown in scored_accounts if stats.is_copyable]
  957. non_copyable_accounts = [(stats, score, breakdown) for stats, score, breakdown in scored_accounts if not stats.is_copyable]
  958. if copyable_accounts:
  959. print(f"\n✅ FOUND {len(copyable_accounts)} COPYABLE ACCOUNTS:")
  960. best_stats, best_score, best_breakdown = copyable_accounts[0]
  961. print(f"\n🏆 TOP COPYABLE RECOMMENDATION: {best_stats.address}")
  962. print(f" 📊 Relative Score: {best_score:.1f}/100")
  963. print(f" 🎯 Status: {best_stats.copyability_reason}")
  964. if best_score >= 60:
  965. recommendation = "🟢 HIGHLY RECOMMENDED"
  966. elif best_score >= 40:
  967. recommendation = "🟡 MODERATELY RECOMMENDED"
  968. elif best_score >= 20:
  969. recommendation = "🟠 PROCEED WITH EXTREME CAUTION"
  970. elif best_score >= 0:
  971. recommendation = "🔴 NOT RECOMMENDED (Risky)"
  972. else:
  973. recommendation = "⛔ DANGEROUS (Negative Score)"
  974. print(f" {recommendation}")
  975. print(f"\n📋 Why this account scored highest:")
  976. for metric, description in best_breakdown.items():
  977. print(f" {description}")
  978. print(f"\n⚙️ Suggested copy trading settings:")
  979. if best_score >= 60:
  980. print(f" 📊 Portfolio allocation: 10-25% (confident allocation)")
  981. print(f" ⚡ Max leverage limit: 5-10x")
  982. elif best_score >= 40:
  983. print(f" 📊 Portfolio allocation: 5-15% (moderate allocation)")
  984. print(f" ⚡ Max leverage limit: 3-5x")
  985. elif best_score >= 20:
  986. print(f" 📊 Portfolio allocation: 2-5% (very small allocation)")
  987. print(f" ⚡ Max leverage limit: 2-3x")
  988. else:
  989. print(f" 📊 Portfolio allocation: DO NOT COPY")
  990. print(f" ⚡ ACCOUNT IS TOO RISKY FOR COPY TRADING")
  991. print(f" 💰 Min position size: $25-50")
  992. print(f" 🔄 Expected trades: {best_stats.trading_frequency_per_day:.1f} per day")
  993. print(f" 📅 Account age: {best_stats.analysis_period_days} days")
  994. else:
  995. print(f"\n❌ NO COPYABLE ACCOUNTS FOUND")
  996. print(f" All analyzed accounts are unsuitable for copy trading")
  997. if non_copyable_accounts:
  998. print(f"\n❌ {len(non_copyable_accounts)} UNSUITABLE ACCOUNTS (DO NOT COPY):")
  999. for i, (account, score, breakdown) in enumerate(non_copyable_accounts[:3], 1): # Show top 3 unsuitable
  1000. score_indicator = "⛔ DANGEROUS" if score < 0 else "🔴 Risky" if score < 20 else "⚠️ Poor"
  1001. print(f" {i}. {account.address[:10]}... - {account.copyability_reason} ({score_indicator}: {score:.1f})")
  1002. if len(non_copyable_accounts) > 3:
  1003. print(f" ... and {len(non_copyable_accounts) - 3} more unsuitable accounts")
  1004. print(f"\n⚠️ ENHANCED COPY TRADING GUIDELINES:")
  1005. print(f" • ✅ ONLY copy accounts with 30+ days of history")
  1006. print(f" • ✅ ONLY copy PROFITABLE accounts (positive PnL)")
  1007. print(f" • ✅ AVOID accounts with >15% max drawdown")
  1008. print(f" • ✅ Ideal frequency: 5-15 trades per day")
  1009. print(f" • ❌ NEVER copy accounts with negative scores")
  1010. print(f" • ❌ NEVER copy accounts losing money")
  1011. print(f" • ⚠️ Start with 2-5% allocation even for good accounts")
  1012. print(f" • 📊 Higher scores = more reliable performance")
  1013. print(f" • 🔄 ADVANTAGE: Perpetual traders can profit in BOTH bull & bear markets!")
  1014. print(f" • 📈📉 They go long (profit when price rises) AND short (profit when price falls)")
  1015. print(f" • 💡 This means potential profits in any market condition")
  1016. # Show directional trading summary
  1017. if copyable_accounts:
  1018. print(f"\n🎯 DIRECTIONAL TRADING ANALYSIS OF COPYABLE ACCOUNTS:")
  1019. for i, (stats, score, breakdown) in enumerate(copyable_accounts, 1):
  1020. short_capability = "✅ Excellent" if stats.short_percentage > 30 else "⚠️ Limited" if stats.short_percentage > 10 else "❌ Minimal"
  1021. risk_indicator = "⛔ DANGEROUS" if score < 0 else "🔴 Risky" if score < 20 else "⚠️ Caution" if score < 40 else "✅ Good"
  1022. print(f" {i}. {stats.address[:10]}... - {stats.short_percentage:.1f}% shorts ({short_capability} short capability)")
  1023. print(f" Score: {score:.1f}/100 ({risk_indicator}) | Style: {stats.trading_style}")
  1024. print(f" Age: {stats.analysis_period_days} days | PnL: ${stats.total_pnl:.0f} | Drawdown: {stats.max_drawdown:.1%}")
  1025. print(f" Advantage: Can profit when {', '.join(stats.top_tokens[:2])} prices move in EITHER direction")
  1026. async def get_leaderboard(self, window: str = "7d", limit: int = 20) -> Optional[List[str]]:
  1027. """
  1028. Get top accounts from Hyperliquid leaderboard
  1029. Note: Hyperliquid's public API doesn't expose leaderboard data directly.
  1030. This function serves as a template for when/if the API becomes available.
  1031. Args:
  1032. window: Time window for leaderboard ("1d", "7d", "30d", "allTime")
  1033. limit: Number of top accounts to return
  1034. Returns:
  1035. List of account addresses from leaderboard (currently returns None)
  1036. """
  1037. print(f"⚠️ Hyperliquid leaderboard API not publicly accessible")
  1038. print(f"💡 To analyze current top performers:")
  1039. print(f" 1. Visit: https://app.hyperliquid.xyz/leaderboard")
  1040. print(f" 2. Copy top performer addresses manually")
  1041. print(f" 3. Run: python utils/hyperliquid_account_analyzer.py [address1] [address2] ...")
  1042. print(f" 4. Or use --top10 for a curated list of known good traders")
  1043. # Note: If Hyperliquid ever makes their leaderboard API public,
  1044. # we can implement the actual fetching logic here
  1045. return None
  1046. async def _try_alternative_leaderboard(self, window: str, limit: int) -> Optional[List[str]]:
  1047. """Try alternative methods to get leaderboard data"""
  1048. try:
  1049. # Try different payload formats
  1050. alternative_payloads = [
  1051. {
  1052. "type": "leaderBoard",
  1053. "timeWindow": window
  1054. },
  1055. {
  1056. "type": "userLeaderboard",
  1057. "window": window
  1058. },
  1059. {
  1060. "type": "spotLeaderboard",
  1061. "req": {"timeWindow": window}
  1062. }
  1063. ]
  1064. for payload in alternative_payloads:
  1065. try:
  1066. async with self.session.post(self.info_url, json=payload) as response:
  1067. if response.status == 200:
  1068. data = await response.json()
  1069. # Try to extract addresses from any structure
  1070. addresses = self._extract_addresses_from_data(data, limit)
  1071. if addresses:
  1072. print(f"📊 Successfully fetched {len(addresses)} addresses using alternative method")
  1073. return addresses
  1074. except Exception as e:
  1075. continue
  1076. print("⚠️ Could not fetch leaderboard data, using fallback top accounts")
  1077. return None
  1078. except Exception as e:
  1079. print(f"⚠️ Alternative leaderboard fetch failed: {e}")
  1080. return None
  1081. def _extract_addresses_from_data(self, data: Any, limit: int) -> List[str]:
  1082. """Extract addresses from any nested data structure"""
  1083. addresses = []
  1084. def recursive_search(obj, depth=0):
  1085. if depth > 5: # Prevent infinite recursion
  1086. return
  1087. if isinstance(obj, list):
  1088. for item in obj:
  1089. recursive_search(item, depth + 1)
  1090. elif isinstance(obj, dict):
  1091. # Check if this dict has an address field
  1092. for addr_field in ['user', 'address', 'account', 'trader', 'wallet']:
  1093. if addr_field in obj:
  1094. addr = obj[addr_field]
  1095. if isinstance(addr, str) and addr.startswith('0x') and len(addr) == 42:
  1096. if addr not in addresses: # Avoid duplicates
  1097. addresses.append(addr)
  1098. # Recurse into nested objects
  1099. for value in obj.values():
  1100. recursive_search(value, depth + 1)
  1101. recursive_search(data)
  1102. return addresses[:limit]
  1103. async def get_top_accounts_from_leaderboard(self, window: str = "7d", limit: int = 10) -> List[str]:
  1104. """
  1105. Get top performing accounts from Hyperliquid leaderboard
  1106. Currently uses a curated list of high-performing accounts since
  1107. the Hyperliquid leaderboard API is not publicly accessible.
  1108. Args:
  1109. window: Time window ("1d", "7d", "30d", "allTime")
  1110. limit: Number of accounts to return
  1111. Returns:
  1112. List of top account addresses
  1113. """
  1114. print(f"🔍 Attempting to fetch top {limit} accounts from {window} leaderboard...")
  1115. addresses = await self.get_leaderboard(window, limit)
  1116. if not addresses:
  1117. print("\n📋 Using curated list of high-performing accounts")
  1118. print("💡 These accounts have been manually verified for good performance")
  1119. # Curated list of known high-performing accounts
  1120. # Updated based on our previous analysis
  1121. curated_addresses = [
  1122. "0x59a15c79a007cd6e9965b949fcf04125c2212524", # Best performer from previous analysis
  1123. "0xa10ec245b3483f83e350a9165a52ae23dbab01bc",
  1124. "0x0487b5e806ac781508cb3272ebd83ad603ddcc0f",
  1125. "0x72fad4e75748b65566a3ebb555b6f6ee18ce08d1",
  1126. "0xa70434af5778038245d53da1b4d360a30307a827",
  1127. "0xeaa400abec7c62d315fd760cbba817fa35e4e0e8",
  1128. "0x3104b7668f9e46fb13ec0b141d2902e144d67efe",
  1129. "0x74dcdc6df25bd7ba70336632ecd76a053d0f8dd4",
  1130. "0xc62df97dcf96324adf4edd30a4a7bffd5402f4da",
  1131. "0xd11f5de0189d52b3abe6b0960b8377c20988e17e"
  1132. ]
  1133. selected_addresses = curated_addresses[:limit]
  1134. print(f"📊 Selected {len(selected_addresses)} accounts for analysis:")
  1135. for i, addr in enumerate(selected_addresses, 1):
  1136. print(f" {i}. {addr}")
  1137. return selected_addresses
  1138. print(f"✅ Successfully fetched {len(addresses)} top accounts from leaderboard")
  1139. for i, addr in enumerate(addresses, 1):
  1140. print(f" {i}. {addr}")
  1141. return addresses
  1142. async def main():
  1143. """Main function"""
  1144. parser = argparse.ArgumentParser(description='Analyze Hyperliquid trading accounts')
  1145. parser.add_argument('addresses', nargs='*', help='Account addresses to analyze')
  1146. parser.add_argument('--top10', action='store_true', help='Analyze the provided top 10 accounts (hardcoded list)')
  1147. parser.add_argument('--leaderboard', action='store_true', help='Fetch and analyze top accounts from Hyperliquid leaderboard')
  1148. parser.add_argument('--window', default='7d', choices=['1d', '7d', '30d', 'allTime'],
  1149. help='Time window for leaderboard (default: 7d)')
  1150. parser.add_argument('--limit', type=int, default=10, help='Number of top accounts to analyze (default: 10)')
  1151. args = parser.parse_args()
  1152. # Top 10 accounts from the user (fallback)
  1153. top10_addresses = [
  1154. "0xa10ec245b3483f83e350a9165a52ae23dbab01bc",
  1155. "0x2aab3badd6a5daa388da47de4c72a6fa618a6265",
  1156. "0xd11f5de0189d52b3abe6b0960b8377c20988e17e",
  1157. "0xc62df97dcf96324adf4edd30a4a7bffd5402f4da",
  1158. "0xa70434af5778038245d53da1b4d360a30307a827",
  1159. "0x72fad4e75748b65566a3ebb555b6f6ee18ce08d1",
  1160. "0x0487b5e806ac781508cb3272ebd83ad603ddcc0f",
  1161. "0x59a15c79a007cd6e9965b949fcf04125c2212524",
  1162. "0xeaa400abec7c62d315fd760cbba817fa35e4e0e8",
  1163. "0x3104b7668f9e46fb13ec0b141d2902e144d67efe",
  1164. "0x74dcdc6df25bd7ba70336632ecd76a053d0f8dd4",
  1165. "0x101a2d2afc2f9b0b217637f53e3a3e859104a33d",
  1166. "0x836f01e63bd0fcbe673dcd905f882a5a808dd36e",
  1167. "0xae42743b5d6a3594b7f95b5cebce64cfedc69318",
  1168. "0x944fdea9d4956ce673c7545862cefccad6ee1b04",
  1169. "0x2a93e999816c9826ade0b51aaa2d83240d8f4596",
  1170. "0x7d3ca5fa94383b22ee49fc14e89aa417f65b4d92",
  1171. "0xfacb7404c1fad06444bda161d1304e4b7aa14e77",
  1172. "0x654d8c01f308d670d6bed13d892ee7ee285028a6",
  1173. "0xbbf3fc6f14e70eb451d1ecd2c20227702fc435c6",
  1174. "0x41dd4becd2930c37e8c05bac4e82459489d47e32",
  1175. "0xe97b3608b2c527b92400099b144b8868e8e02b14",
  1176. "0x9d8769bf821cec63f5e5436ef194002377d917f1",
  1177. "0x258855d09cf445835769f21370230652c4294a92",
  1178. "0x69e07d092e3b4bd5bbc02aed7491916269426ad1"
  1179. ]
  1180. async with HyperliquidAccountAnalyzer() as analyzer:
  1181. if args.leaderboard:
  1182. # Fetch top accounts from leaderboard
  1183. addresses = await analyzer.get_top_accounts_from_leaderboard(args.window, args.limit)
  1184. elif args.top10:
  1185. # Use hardcoded top 10 list
  1186. addresses = top10_addresses
  1187. print("ℹ️ Using hardcoded top 10 accounts")
  1188. elif args.addresses:
  1189. # Use provided addresses
  1190. addresses = args.addresses
  1191. print(f"ℹ️ Analyzing {len(addresses)} provided addresses")
  1192. else:
  1193. # Default: use curated list (since leaderboard API isn't available)
  1194. print("ℹ️ No addresses specified, using curated high-performance accounts...")
  1195. addresses = await analyzer.get_top_accounts_from_leaderboard(args.window, args.limit)
  1196. if not addresses:
  1197. print("❌ No addresses to analyze")
  1198. return
  1199. results = await analyzer.analyze_multiple_accounts(addresses)
  1200. analyzer.print_analysis_results(results)
  1201. if __name__ == "__main__":
  1202. asyncio.run(main())