updated README.md
Browse files
README.md
CHANGED
@@ -2,8 +2,113 @@
|
|
2 |
tags:
|
3 |
- model_hub_mixin
|
4 |
- pytorch_model_hub_mixin
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
---
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
tags:
|
3 |
- model_hub_mixin
|
4 |
- pytorch_model_hub_mixin
|
5 |
+
pipeline_tag: tabular-regression
|
6 |
+
library_name: pytorch
|
7 |
+
datasets:
|
8 |
+
- gvlassis/california_housing
|
9 |
+
metrics:
|
10 |
+
- rmse
|
11 |
---
|
12 |
|
13 |
+
# wide-and-deep-net-california-housing-v3
|
14 |
+
|
15 |
+
A wide & deep neural network trained on the California Housing dataset.
|
16 |
+
|
17 |
+
It takes eight features: `'MedInc'`, `'HouseAge'`, `'AveRooms'`, `'AveBedrms'`, `'Population'`, `'AveOccup'`, `'Latitude'` and `'Longitude'`. It predicts `'MedHouseVal'`.
|
18 |
+
|
19 |
+
The first five features (`'MedInc'`, `'HouseAge'`, `'AveRooms'`, `'AveBedrms'` and `'Population'`) flow through the wide path.
|
20 |
+
|
21 |
+
The last six features (`'AveRooms'`, `'AveBedrms'`, `'Population'`, `'AveOccup'`, `'Latitude'` and `'Longitude'`) flow through the deep path.
|
22 |
+
|
23 |
+
Note: The features `'AveRooms'`, `'AveBedrms'` and `'Population'` flow through both the wide path and the deep path.
|
24 |
+
|
25 |
+
This model is a PyTorch adaptation of the TensorFlow model in Chapter 10 of Aurelien Geron's book 'Hands-On Machine Learning with Scikit-Learn, Keras, and TensorFlow'.
|
26 |
+
|
27 |
+
![](https://raw.githubusercontent.com/sambitmukherjee/handson-ml3-pytorch/main/chapter10/Figure_10-15.png)
|
28 |
+
|
29 |
+
Code: https://github.com/sambitmukherjee/handson-ml3-pytorch/blob/main/chapter10/wide_and_deep_net_california_housing_v3.ipynb
|
30 |
+
|
31 |
+
Experiment tracking: https://wandb.ai/sadhaklal/wide-and-deep-net-california-housing
|
32 |
+
|
33 |
+
## Usage
|
34 |
+
|
35 |
+
```
|
36 |
+
from sklearn.datasets import fetch_california_housing
|
37 |
+
|
38 |
+
housing = fetch_california_housing(as_frame=True)
|
39 |
+
|
40 |
+
from sklearn.model_selection import train_test_split
|
41 |
+
|
42 |
+
X_train_full, X_test, y_train_full, y_test = train_test_split(housing['data'], housing['target'], test_size=0.25, random_state=42)
|
43 |
+
X_train, X_valid, y_train, y_valid = train_test_split(X_train_full, y_train_full, test_size=0.25, random_state=42)
|
44 |
+
|
45 |
+
X_means, X_stds = X_train.mean(axis=0), X_train.std(axis=0)
|
46 |
+
X_train = (X_train - X_means) / X_stds
|
47 |
+
X_valid = (X_valid - X_means) / X_stds
|
48 |
+
X_test = (X_test - X_means) / X_stds
|
49 |
+
|
50 |
+
import torch
|
51 |
+
|
52 |
+
device = torch.device("cpu")
|
53 |
+
|
54 |
+
from dataclasses import dataclass
|
55 |
+
from typing import Optional
|
56 |
+
|
57 |
+
@dataclass
|
58 |
+
class WideAndDeepNetOutput:
|
59 |
+
main_output: torch.Tensor
|
60 |
+
aux_output: torch.Tensor
|
61 |
+
main_loss: Optional[torch.Tensor] = None
|
62 |
+
aux_loss: Optional[torch.Tensor] = None
|
63 |
+
loss: Optional[torch.Tensor] = None
|
64 |
+
|
65 |
+
import torch.nn as nn
|
66 |
+
from huggingface_hub import PyTorchModelHubMixin
|
67 |
+
|
68 |
+
class WideAndDeepNet(nn.Module, PyTorchModelHubMixin):
|
69 |
+
def __init__(self):
|
70 |
+
super().__init__()
|
71 |
+
self.hidden1 = nn.Linear(6, 30)
|
72 |
+
self.hidden2 = nn.Linear(30, 30)
|
73 |
+
self.main_head = nn.Linear(35, 1)
|
74 |
+
self.aux_head = nn.Linear(30, 1)
|
75 |
+
self.main_loss_fn = nn.MSELoss(reduction='sum')
|
76 |
+
self.aux_loss_fn = nn.MSELoss(reduction='sum')
|
77 |
+
|
78 |
+
def forward(self, input_wide, input_deep, label=None):
|
79 |
+
act = torch.relu(self.hidden1(input_deep))
|
80 |
+
act = torch.relu(self.hidden2(act))
|
81 |
+
concat = torch.cat([input_wide, act], dim=1)
|
82 |
+
main_output = self.main_head(concat)
|
83 |
+
aux_output = self.aux_head(act)
|
84 |
+
if label is not None:
|
85 |
+
main_loss = self.main_loss_fn(main_output.squeeze(), label)
|
86 |
+
aux_loss = self.aux_loss_fn(aux_output.squeeze(), label)
|
87 |
+
loss = 0.9 * main_loss + 0.1 * aux_loss
|
88 |
+
return WideAndDeepNetOutput(main_output, aux_output, main_loss, aux_loss, loss)
|
89 |
+
else:
|
90 |
+
return WideAndDeepNetOutput(main_output, aux_output)
|
91 |
+
|
92 |
+
model = WideAndDeepNet.from_pretrained("sadhaklal/wide-and-deep-net-california-housing-v3")
|
93 |
+
model.to(device)
|
94 |
+
model.eval()
|
95 |
+
|
96 |
+
# Let's predict on 3 unseen examples from the test set:
|
97 |
+
print(f"Ground truth housing prices: {y_test.values[:3]}")
|
98 |
+
new = {
|
99 |
+
'input_wide': torch.tensor(X_test.values[:3, :5], dtype=torch.float32),
|
100 |
+
'input_deep': torch.tensor(X_test.values[:3, 2:], dtype=torch.float32)
|
101 |
+
}
|
102 |
+
new = {k: v.to(device) for k, v in new.items()}
|
103 |
+
with torch.no_grad():
|
104 |
+
output = model(**new)
|
105 |
+
print(f"Predicted housing prices: {output.main_output.squeeze()}")
|
106 |
+
```
|
107 |
+
|
108 |
+
## Metric
|
109 |
+
|
110 |
+
RMSE on the test set: 0.574
|
111 |
+
|
112 |
+
---
|
113 |
+
|
114 |
+
This model has been pushed to the Hub using the [PyTorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration.
|