Datasets:
GotThatData
commited on
Update
Browse files- kraken-data-collection-script +212 -71
kraken-data-collection-script
CHANGED
@@ -1,79 +1,200 @@
|
|
1 |
```python
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
try:
|
19 |
-
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
snapshot_start = datetime.now()
|
27 |
-
logger.info(f"Collecting snapshot {i+1}/{batch_size}")
|
28 |
-
|
29 |
-
for pair in self.pairs:
|
30 |
-
if self.check_api_rate():
|
31 |
-
record = self.fetch_ticker_data(pair)
|
32 |
-
if record:
|
33 |
-
records.append(record)
|
34 |
-
else:
|
35 |
-
time.sleep(1) # Wait if approaching rate limit
|
36 |
-
|
37 |
-
# Dynamic sleep calculation
|
38 |
-
elapsed = (datetime.now() - snapshot_start).total_seconds()
|
39 |
-
sleep_time = max(0.5, 1.5 - elapsed) # Ensure at least 0.5s between snapshots
|
40 |
-
|
41 |
-
if i < batch_size - 1 and self.running:
|
42 |
-
time.sleep(sleep_time)
|
43 |
|
44 |
-
if
|
45 |
-
|
46 |
-
|
47 |
-
self.upload_to_huggingface(df, current_timestamp)
|
48 |
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
-
#
|
61 |
-
|
62 |
-
|
|
|
|
|
|
|
|
|
63 |
|
64 |
-
|
65 |
-
logger.info(f"Waiting {sleep_time:.2f} seconds until next batch...")
|
66 |
-
time.sleep(sleep_time)
|
67 |
|
68 |
except Exception as e:
|
69 |
-
logger.error(f"Error
|
70 |
-
logger.info("
|
71 |
-
time.sleep(30)
|
72 |
-
```
|
73 |
|
74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
-
```python
|
77 |
def main():
|
78 |
try:
|
79 |
collector = KrakenHuggingFaceCollector(
|
@@ -81,7 +202,7 @@ def main():
|
|
81 |
repo_id="GotThatData/kraken-trading-data"
|
82 |
)
|
83 |
|
84 |
-
#
|
85 |
collector.collect_continuous(
|
86 |
interval_minutes=3, # Collect every 3 minutes
|
87 |
batch_size=30 # 30 snapshots per batch
|
@@ -93,10 +214,30 @@ def main():
|
|
93 |
except Exception as e:
|
94 |
logger.error(f"Fatal error: {e}")
|
95 |
raise
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
```
|
97 |
|
98 |
-
This
|
99 |
-
- 30 snapshots
|
100 |
-
-
|
101 |
-
-
|
102 |
-
-
|
|
|
|
1 |
```python
|
2 |
+
import krakenex
|
3 |
+
import pandas as pd
|
4 |
+
from datetime import datetime
|
5 |
+
import time
|
6 |
+
import os
|
7 |
+
from typing import Dict, List, Optional
|
8 |
+
import logging
|
9 |
+
from huggingface_hub import HfApi
|
10 |
+
|
11 |
+
# Set up logging
|
12 |
+
logging.basicConfig(
|
13 |
+
level=logging.INFO,
|
14 |
+
format='%(asctime)s - %(levelname)s - %(message)s',
|
15 |
+
handlers=[
|
16 |
+
logging.FileHandler(f'kraken_collection_{datetime.now().strftime("%Y%m%d")}.log'),
|
17 |
+
logging.StreamHandler()
|
18 |
+
]
|
19 |
+
)
|
20 |
+
logger = logging.getLogger(__name__)
|
21 |
+
|
22 |
+
class KrakenHuggingFaceCollector:
|
23 |
+
def __init__(self, kraken_key_path: str, repo_id: str):
|
24 |
+
self.kraken_api = krakenex.API()
|
25 |
+
try:
|
26 |
+
self.kraken_api.load_key(kraken_key_path)
|
27 |
+
logger.info("Successfully loaded Kraken API key")
|
28 |
+
except Exception as e:
|
29 |
+
logger.error(f"Failed to load Kraken API key: {e}")
|
30 |
+
raise
|
31 |
+
|
32 |
try:
|
33 |
+
self.hf_api = HfApi()
|
34 |
+
self.repo_id = repo_id
|
35 |
+
logger.info("Successfully connected to Hugging Face")
|
36 |
+
except Exception as e:
|
37 |
+
logger.error(f"Failed to initialize Hugging Face API: {e}")
|
38 |
+
raise
|
39 |
+
|
40 |
+
self.pairs = [
|
41 |
+
"XXBTZUSD", # Bitcoin/USD
|
42 |
+
"XETHZUSD", # Ethereum/USD
|
43 |
+
"XXRPZUSD", # Ripple/USD
|
44 |
+
"ADAUSD", # Cardano/USD
|
45 |
+
"XDGUSD", # Dogecoin/USD
|
46 |
+
"SOLUSD", # Solana/USD
|
47 |
+
"DOTUSD", # Polkadot/USD
|
48 |
+
"MATICUSD", # Polygon/USD
|
49 |
+
"LTCUSD" # Litecoin/USD
|
50 |
+
]
|
51 |
+
|
52 |
+
self.running = True
|
53 |
+
self.data_points_collected = 0
|
54 |
+
self.collection_start_time = None
|
55 |
+
self.api_calls = 0
|
56 |
+
self.last_api_reset = datetime.now()
|
57 |
+
|
58 |
+
def check_api_rate(self) -> bool:
|
59 |
+
"""Monitor API call rate"""
|
60 |
+
current_time = datetime.now()
|
61 |
+
if (current_time - self.last_api_reset).total_seconds() >= 30:
|
62 |
+
self.api_calls = 0
|
63 |
+
self.last_api_reset = current_time
|
64 |
+
return self.api_calls < 15
|
65 |
+
|
66 |
+
def fetch_ticker_data(self, pair: str) -> Optional[Dict]:
|
67 |
+
"""Fetch ticker data with rate limiting"""
|
68 |
+
if not self.check_api_rate():
|
69 |
+
logger.warning("API rate limit approaching, waiting...")
|
70 |
+
time.sleep(2)
|
71 |
|
72 |
+
try:
|
73 |
+
self.api_calls += 1
|
74 |
+
response = self.kraken_api.query_public('Ticker', {'pair': pair})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
+
if 'error' in response and response['error']:
|
77 |
+
logger.error(f"Kraken API error for {pair}: {response['error']}")
|
78 |
+
return None
|
|
|
79 |
|
80 |
+
data = response['result']
|
81 |
+
pair_data = list(data.values())[0]
|
82 |
+
|
83 |
+
return {
|
84 |
+
'timestamp': datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f'),
|
85 |
+
'pair': pair,
|
86 |
+
'price': float(pair_data['c'][0]),
|
87 |
+
'volume': float(pair_data['v'][0]),
|
88 |
+
'bid': float(pair_data['b'][0]),
|
89 |
+
'ask': float(pair_data['a'][0]),
|
90 |
+
'low': float(pair_data['l'][0]),
|
91 |
+
'high': float(pair_data['h'][0]),
|
92 |
+
'vwap': float(pair_data['p'][0]),
|
93 |
+
'trades': int(pair_data['t'][0])
|
94 |
+
}
|
95 |
+
|
96 |
+
except Exception as e:
|
97 |
+
logger.error(f"Error fetching data for {pair}: {e}")
|
98 |
+
return None
|
99 |
+
|
100 |
+
def upload_to_huggingface(self, df: pd.DataFrame, timestamp: str) -> None:
|
101 |
+
"""Upload DataFrame to Hugging Face as CSV"""
|
102 |
+
try:
|
103 |
+
# Create local directories
|
104 |
+
os.makedirs('data/continuous', exist_ok=True)
|
105 |
+
|
106 |
+
# Save locally first
|
107 |
+
local_path = f'data/continuous/kraken_trades_{timestamp}.csv'
|
108 |
+
df.to_csv(local_path, index=False)
|
109 |
|
110 |
+
# Upload to Hugging Face
|
111 |
+
self.hf_api.upload_file(
|
112 |
+
path_or_fileobj=local_path,
|
113 |
+
path_in_repo=f"data/continuous/kraken_trades_{timestamp}.csv",
|
114 |
+
repo_id=self.repo_id,
|
115 |
+
repo_type="dataset"
|
116 |
+
)
|
117 |
|
118 |
+
logger.info(f"Successfully uploaded batch to Hugging Face")
|
|
|
|
|
119 |
|
120 |
except Exception as e:
|
121 |
+
logger.error(f"Error uploading to Hugging Face: {e}")
|
122 |
+
logger.info(f"Data saved locally at: {local_path}")
|
|
|
|
|
123 |
|
124 |
+
def collect_continuous(self, interval_minutes: int = 3, batch_size: int = 30):
|
125 |
+
"""
|
126 |
+
Enhanced continuous data collection
|
127 |
+
|
128 |
+
Args:
|
129 |
+
interval_minutes: Minutes between each collection (default: 3)
|
130 |
+
batch_size: Number of snapshots per batch (default: 30)
|
131 |
+
"""
|
132 |
+
self.collection_start_time = datetime.now()
|
133 |
+
logger.info(f"Starting enhanced continuous collection at {self.collection_start_time}")
|
134 |
+
logger.info(f"Collecting {batch_size} snapshots every {interval_minutes} minutes")
|
135 |
+
logger.info(f"Total API calls per batch: ~{batch_size * len(self.pairs)}")
|
136 |
+
logger.info(f"Estimated daily data points: {(24 * 60 // interval_minutes) * batch_size * len(self.pairs)}")
|
137 |
+
logger.info("Press CTRL+C to stop collection")
|
138 |
+
|
139 |
+
while self.running:
|
140 |
+
try:
|
141 |
+
batch_start_time = datetime.now()
|
142 |
+
records = []
|
143 |
+
|
144 |
+
for i in range(batch_size):
|
145 |
+
if not self.running:
|
146 |
+
break
|
147 |
+
|
148 |
+
snapshot_start = datetime.now()
|
149 |
+
logger.info(f"Collecting snapshot {i+1}/{batch_size}")
|
150 |
+
|
151 |
+
for pair in self.pairs:
|
152 |
+
if self.check_api_rate():
|
153 |
+
record = self.fetch_ticker_data(pair)
|
154 |
+
if record:
|
155 |
+
records.append(record)
|
156 |
+
else:
|
157 |
+
time.sleep(1)
|
158 |
+
|
159 |
+
# Dynamic sleep calculation
|
160 |
+
elapsed = (datetime.now() - snapshot_start).total_seconds()
|
161 |
+
sleep_time = max(0.5, 1.5 - elapsed)
|
162 |
+
|
163 |
+
if i < batch_size - 1 and self.running:
|
164 |
+
time.sleep(sleep_time)
|
165 |
+
|
166 |
+
if records:
|
167 |
+
df = pd.DataFrame(records)
|
168 |
+
current_timestamp = datetime.now().strftime('%Y%m%d_%H%M')
|
169 |
+
self.upload_to_huggingface(df, current_timestamp)
|
170 |
+
|
171 |
+
self.data_points_collected += len(records)
|
172 |
+
collection_duration = (datetime.now() - self.collection_start_time)
|
173 |
+
|
174 |
+
logger.info("\nBatch Summary:")
|
175 |
+
logger.info(f"Records in batch: {len(records)}")
|
176 |
+
logger.info(f"Pairs collected: {len(df['pair'].unique())}")
|
177 |
+
logger.info(f"Total data points: {self.data_points_collected}")
|
178 |
+
logger.info(f"Collection duration: {collection_duration}")
|
179 |
+
logger.info(f"Data points per hour: {self.data_points_collected / collection_duration.total_seconds() * 3600:.2f}")
|
180 |
+
|
181 |
+
# Adaptive interval timing
|
182 |
+
batch_duration = (datetime.now() - batch_start_time).total_seconds()
|
183 |
+
sleep_time = max(0, interval_minutes * 60 - batch_duration)
|
184 |
+
|
185 |
+
if self.running and sleep_time > 0:
|
186 |
+
logger.info(f"Waiting {sleep_time:.2f} seconds until next batch...")
|
187 |
+
time.sleep(sleep_time)
|
188 |
+
|
189 |
+
except Exception as e:
|
190 |
+
logger.error(f"Error in continuous collection: {e}")
|
191 |
+
logger.info("Waiting 30 seconds before retry...")
|
192 |
+
time.sleep(30)
|
193 |
+
|
194 |
+
logger.info("Data collection stopped")
|
195 |
+
logger.info(f"Total data points collected: {self.data_points_collected}")
|
196 |
+
logger.info(f"Total collection time: {datetime.now() - self.collection_start_time}")
|
197 |
|
|
|
198 |
def main():
|
199 |
try:
|
200 |
collector = KrakenHuggingFaceCollector(
|
|
|
202 |
repo_id="GotThatData/kraken-trading-data"
|
203 |
)
|
204 |
|
205 |
+
# Start collection with enhanced parameters
|
206 |
collector.collect_continuous(
|
207 |
interval_minutes=3, # Collect every 3 minutes
|
208 |
batch_size=30 # 30 snapshots per batch
|
|
|
214 |
except Exception as e:
|
215 |
logger.error(f"Fatal error: {e}")
|
216 |
raise
|
217 |
+
|
218 |
+
if __name__ == "__main__":
|
219 |
+
main()
|
220 |
+
```
|
221 |
+
|
222 |
+
To use this script:
|
223 |
+
|
224 |
+
1. Save it as `kraken_data_collector.py`
|
225 |
+
|
226 |
+
2. Make sure you have your `kraken.key` file with your API credentials
|
227 |
+
|
228 |
+
3. Install required packages if you haven't:
|
229 |
+
```bash
|
230 |
+
pip install krakenex pandas huggingface_hub
|
231 |
+
```
|
232 |
+
|
233 |
+
4. Run the script:
|
234 |
+
```bash
|
235 |
+
python kraken_data_collector.py
|
236 |
```
|
237 |
|
238 |
+
This will:
|
239 |
+
- Collect 30 snapshots every 3 minutes
|
240 |
+
- Save data locally and to Hugging Face
|
241 |
+
- Provide detailed logging
|
242 |
+
- Handle errors gracefully
|
243 |
+
- Respect API rate limits
|