|
```python |
|
import krakenex |
|
import pandas as pd |
|
from datetime import datetime |
|
import time |
|
import os |
|
from typing import Dict, List, Optional |
|
import logging |
|
from huggingface_hub import HfApi |
|
|
|
|
|
logging.basicConfig( |
|
level=logging.INFO, |
|
format='%(asctime)s - %(levelname)s - %(message)s', |
|
handlers=[ |
|
logging.FileHandler(f'kraken_collection_{datetime.now().strftime("%Y%m%d")}.log'), |
|
logging.StreamHandler() |
|
] |
|
) |
|
logger = logging.getLogger(__name__) |
|
|
|
class KrakenHuggingFaceCollector: |
|
def __init__(self, kraken_key_path: str, repo_id: str): |
|
self.kraken_api = krakenex.API() |
|
try: |
|
self.kraken_api.load_key(kraken_key_path) |
|
logger.info("Successfully loaded Kraken API key") |
|
except Exception as e: |
|
logger.error(f"Failed to load Kraken API key: {e}") |
|
raise |
|
|
|
try: |
|
self.hf_api = HfApi() |
|
self.repo_id = repo_id |
|
logger.info("Successfully connected to Hugging Face") |
|
except Exception as e: |
|
logger.error(f"Failed to initialize Hugging Face API: {e}") |
|
raise |
|
|
|
self.pairs = [ |
|
"XXBTZUSD", |
|
"XETHZUSD", |
|
"XXRPZUSD", |
|
"ADAUSD", |
|
"XDGUSD", |
|
"SOLUSD", |
|
"DOTUSD", |
|
"MATICUSD", |
|
"LTCUSD" |
|
] |
|
|
|
self.running = True |
|
self.data_points_collected = 0 |
|
self.collection_start_time = None |
|
self.api_calls = 0 |
|
self.last_api_reset = datetime.now() |
|
|
|
def check_api_rate(self) -> bool: |
|
"""Monitor API call rate""" |
|
current_time = datetime.now() |
|
if (current_time - self.last_api_reset).total_seconds() >= 30: |
|
self.api_calls = 0 |
|
self.last_api_reset = current_time |
|
return self.api_calls < 15 |
|
|
|
def fetch_ticker_data(self, pair: str) -> Optional[Dict]: |
|
"""Fetch ticker data with rate limiting""" |
|
if not self.check_api_rate(): |
|
logger.warning("API rate limit approaching, waiting...") |
|
time.sleep(2) |
|
|
|
try: |
|
self.api_calls += 1 |
|
response = self.kraken_api.query_public('Ticker', {'pair': pair}) |
|
|
|
if 'error' in response and response['error']: |
|
logger.error(f"Kraken API error for {pair}: {response['error']}") |
|
return None |
|
|
|
data = response['result'] |
|
pair_data = list(data.values())[0] |
|
|
|
return { |
|
'timestamp': datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f'), |
|
'pair': pair, |
|
'price': float(pair_data['c'][0]), |
|
'volume': float(pair_data['v'][0]), |
|
'bid': float(pair_data['b'][0]), |
|
'ask': float(pair_data['a'][0]), |
|
'low': float(pair_data['l'][0]), |
|
'high': float(pair_data['h'][0]), |
|
'vwap': float(pair_data['p'][0]), |
|
'trades': int(pair_data['t'][0]) |
|
} |
|
|
|
except Exception as e: |
|
logger.error(f"Error fetching data for {pair}: {e}") |
|
return None |
|
|
|
def upload_to_huggingface(self, df: pd.DataFrame, timestamp: str) -> None: |
|
"""Upload DataFrame to Hugging Face as CSV""" |
|
try: |
|
|
|
os.makedirs('data/continuous', exist_ok=True) |
|
|
|
|
|
local_path = f'data/continuous/kraken_trades_{timestamp}.csv' |
|
df.to_csv(local_path, index=False) |
|
|
|
|
|
self.hf_api.upload_file( |
|
path_or_fileobj=local_path, |
|
path_in_repo=f"data/continuous/kraken_trades_{timestamp}.csv", |
|
repo_id=self.repo_id, |
|
repo_type="dataset" |
|
) |
|
|
|
logger.info(f"Successfully uploaded batch to Hugging Face") |
|
|
|
except Exception as e: |
|
logger.error(f"Error uploading to Hugging Face: {e}") |
|
logger.info(f"Data saved locally at: {local_path}") |
|
|
|
def collect_continuous(self, interval_minutes: int = 3, batch_size: int = 30): |
|
""" |
|
Enhanced continuous data collection |
|
|
|
Args: |
|
interval_minutes: Minutes between each collection (default: 3) |
|
batch_size: Number of snapshots per batch (default: 30) |
|
""" |
|
self.collection_start_time = datetime.now() |
|
logger.info(f"Starting enhanced continuous collection at {self.collection_start_time}") |
|
logger.info(f"Collecting {batch_size} snapshots every {interval_minutes} minutes") |
|
logger.info(f"Total API calls per batch: ~{batch_size * len(self.pairs)}") |
|
logger.info(f"Estimated daily data points: {(24 * 60 // interval_minutes) * batch_size * len(self.pairs)}") |
|
logger.info("Press CTRL+C to stop collection") |
|
|
|
while self.running: |
|
try: |
|
batch_start_time = datetime.now() |
|
records = [] |
|
|
|
for i in range(batch_size): |
|
if not self.running: |
|
break |
|
|
|
snapshot_start = datetime.now() |
|
logger.info(f"Collecting snapshot {i+1}/{batch_size}") |
|
|
|
for pair in self.pairs: |
|
if self.check_api_rate(): |
|
record = self.fetch_ticker_data(pair) |
|
if record: |
|
records.append(record) |
|
else: |
|
time.sleep(1) |
|
|
|
|
|
elapsed = (datetime.now() - snapshot_start).total_seconds() |
|
sleep_time = max(0.5, 1.5 - elapsed) |
|
|
|
if i < batch_size - 1 and self.running: |
|
time.sleep(sleep_time) |
|
|
|
if records: |
|
df = pd.DataFrame(records) |
|
current_timestamp = datetime.now().strftime('%Y%m%d_%H%M') |
|
self.upload_to_huggingface(df, current_timestamp) |
|
|
|
self.data_points_collected += len(records) |
|
collection_duration = (datetime.now() - self.collection_start_time) |
|
|
|
logger.info("\nBatch Summary:") |
|
logger.info(f"Records in batch: {len(records)}") |
|
logger.info(f"Pairs collected: {len(df['pair'].unique())}") |
|
logger.info(f"Total data points: {self.data_points_collected}") |
|
logger.info(f"Collection duration: {collection_duration}") |
|
logger.info(f"Data points per hour: {self.data_points_collected / collection_duration.total_seconds() * 3600:.2f}") |
|
|
|
|
|
batch_duration = (datetime.now() - batch_start_time).total_seconds() |
|
sleep_time = max(0, interval_minutes * 60 - batch_duration) |
|
|
|
if self.running and sleep_time > 0: |
|
logger.info(f"Waiting {sleep_time:.2f} seconds until next batch...") |
|
time.sleep(sleep_time) |
|
|
|
except Exception as e: |
|
logger.error(f"Error in continuous collection: {e}") |
|
logger.info("Waiting 30 seconds before retry...") |
|
time.sleep(30) |
|
|
|
logger.info("Data collection stopped") |
|
logger.info(f"Total data points collected: {self.data_points_collected}") |
|
logger.info(f"Total collection time: {datetime.now() - self.collection_start_time}") |
|
|
|
def main(): |
|
try: |
|
collector = KrakenHuggingFaceCollector( |
|
kraken_key_path="kraken.key", |
|
repo_id="GotThatData/kraken-trading-data" |
|
) |
|
|
|
|
|
collector.collect_continuous( |
|
interval_minutes=3, |
|
batch_size=30 |
|
) |
|
|
|
except KeyboardInterrupt: |
|
logger.info("Stopping collection (CTRL+C pressed)") |
|
collector.running = False |
|
except Exception as e: |
|
logger.error(f"Fatal error: {e}") |
|
raise |
|
|
|
if __name__ == "__main__": |
|
main() |
|
``` |
|
|
|
To use this script: |
|
|
|
1. Save it as `kraken_data_collector.py` |
|
|
|
2. Make sure you have your `kraken.key` file with your API credentials |
|
|
|
3. Install required packages if you haven't: |
|
```bash |
|
pip install krakenex pandas huggingface_hub |
|
``` |
|
|
|
4. Run the script: |
|
```bash |
|
python kraken_data_collector.py |
|
``` |
|
|
|
This will: |
|
- Collect 30 snapshots every 3 minutes |
|
- Save data locally and to Hugging Face |
|
- Provide detailed logging |
|
- Handle errors gracefully |
|
- Respect API rate limits |