reset
Browse files- Graph/ChebNet/code/Chebnet_Blog+Code.ipynb +1034 -0
- Graph/ChebNet/code/coarsening.py +310 -0
- Graph/ChebNet/code/grid_graph.py +69 -0
- Graph/DeepWalk/code/DeepWalk.py +164 -0
- Graph/DeepWalk/code/DeepWalk_Blog+Code.ipynb +612 -0
- Graph/GAT/code/GAT_Blog+Code.ipynb +475 -0
- Graph/GAT/code/GAT_PyG.py +78 -0
- Graph/GCN/code/GCN.py +129 -0
- Graph/GCN/code/GCN_Blog+Code.ipynb +0 -0
- Graph/GCN/code/GCN_PyG.ipynb +0 -0
- Graph/GCN/code/GCN_PyG.py +78 -0
- Graph/GraphSAGE/code/GraphSAGE.py +209 -0
- Graph/GraphSAGE/code/GraphSAGE_Code+Blog.ipynb +668 -0
- Image/utils/train_utils.py +15 -8
- count.py +0 -10
- feature_predict/AlexNet/code/layer_info.json +1 -0
- feature_predict/AlexNet/code/model.py +89 -0
- feature_predict/AlexNet/code/train.py +41 -0
- feature_predict/AlexNet/dataset/.gitkeep +0 -0
- feature_predict/AlexNet/model/.gitkeep +0 -0
- feature_predict/AlexNet/model/0/epoch_195/index.json +0 -0
- feature_predict/AlexNet/model/0/epoch_195/subject_model.pth +3 -0
- feature_predict/AlexNet/model/0/epoch_195/train_data.npy +3 -0
- feature_predict/AlexNet/model/0/epoch_200/index.json +0 -0
- feature_predict/AlexNet/model/0/epoch_200/subject_model.pth +3 -0
- feature_predict/AlexNet/model/0/epoch_200/train_data.npy +3 -0
- feature_predict/__init__.py +3 -0
- feature_predict/feature_predictor.py +410 -0
- feature_predict/test_feature.py +86 -0
- feature_predict/utils/dataset_utils.py +110 -0
- feature_predict/utils/parse_args.py +19 -0
- feature_predict/utils/train_utils.py +484 -0
Graph/ChebNet/code/Chebnet_Blog+Code.ipynb
ADDED
@@ -0,0 +1,1034 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {},
|
6 |
+
"source": [
|
7 |
+
"# ChebNet: CNN on Graphs with Fast Localized Spectral Filtering"
|
8 |
+
]
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"cell_type": "markdown",
|
12 |
+
"metadata": {},
|
13 |
+
"source": [
|
14 |
+
"## Motivation"
|
15 |
+
]
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"cell_type": "markdown",
|
19 |
+
"metadata": {},
|
20 |
+
"source": [
|
21 |
+
"As a part of this blog series, this time we'll be looking at a spectral convolution technique introduced in the paper by M. Defferrard, X. Bresson, and P. Vandergheynst, on \"Convolutional Neural Networks on Graphs with Fast Localized Spectral Filtering\".\n",
|
22 |
+
"\n",
|
23 |
+
"<br/>\n",
|
24 |
+
"\n",
|
25 |
+
"As mentioned in our previous blog on [A Review : Graph Convolutional Networks (GCN)](https://dsgiitr.com/blogs/gcn/), the spatial convolution and pooling operations are well-defined only for the Euclidean domain. Hence, we cannot apply the convolution directly on the irregular structured data such as graphs.\n",
|
26 |
+
"\n",
|
27 |
+
"The technique proposed in this paper provide us with a way to perform convolution on graph like data, for which they used convolution theorem. According to which, Convolution in spatial domain is equivalent to multiplication in Fourier domain. Hence, instead of performing convolution explicitly in the spatial domain, we will transform the graph data and the filter into Fourier domain. Do element-wise multiplication and the result is converted back to spatial domain by performing inverse Fourier transform. Following figure illustrates the proposed technique:\n",
|
28 |
+
"![title](img/fft.jpg)"
|
29 |
+
]
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"cell_type": "markdown",
|
33 |
+
"metadata": {},
|
34 |
+
"source": [
|
35 |
+
"## But How to Take This Fourier Transform?"
|
36 |
+
]
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"cell_type": "markdown",
|
40 |
+
"metadata": {},
|
41 |
+
"source": [
|
42 |
+
"As mentioned we have to take a fourier transform of graph signal. In spectral graph theory, the important operator used for Fourier analysis of graph is the Laplacian operator. For the graph $G=(V,E)$, with set of vertices $V$ of size $n$ and set of edges $E$. The Laplacian is given by<br>\n",
|
43 |
+
"$Δ=D−A$<br>\n",
|
44 |
+
"where $D$ denotes the diagonal degree matrix and $A$ denotes the adjacency matrix of the graph.<br>\n",
|
45 |
+
"When we do eigen-decomposition of the Laplacian, we get the orthonormal eigenvectors, as the Laplacian is real symmetric positive semi-definite matrix (side note: positive semidefinite matrices have orthogonal eigenvectors and symmetric matrix has real eigenvalues). These eigenvectors are denoted by $\\{ϕ_l\\}^n_{l=0}$ and also called as Fourier modes. The corresponding eigenvalues $\\{λ_l\\}^n_{l=0}$ acts as frequencies of the graph.<br>\n",
|
46 |
+
"\n",
|
47 |
+
"The Laplacian can be diagonalized by the Fourier basis.<br>\n",
|
48 |
+
"$Δ=ΦΛΦ^T$<br>\n",
|
49 |
+
"\n",
|
50 |
+
"where, $Φ=\\{ϕ_l\\}^n_{l=0}$ is a matrix with eigenvectors as columns and $Λ$ is a diagonal matrix of eigenvalues.<br>\n",
|
51 |
+
"\n",
|
52 |
+
"Now the graph can be transformed to Fourier domain just by multiplying by the Fourier basis. Hence, the Fourier transform of graph signal $x:V→R$ which is defined on nodes of the graph $x∈R^n$ is given by:<br>\n",
|
53 |
+
"$\\hat{x}=Φ^Tx$, where $\\hat{x}$ denotes the graph Fourier transform. Hence, the task of transforming the graph signal to Fourier domain is nothing but the matrix-vector multiplication.<br>\n",
|
54 |
+
"\n",
|
55 |
+
"Similarly, the inverse graph Fourier transform is given by:<br>\n",
|
56 |
+
"$x=Φ\\hat{x}$.<br>\n",
|
57 |
+
"This formulation of Fourier transform on graph gives us the required tools to perform convolution on graphs. "
|
58 |
+
]
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"cell_type": "markdown",
|
62 |
+
"metadata": {},
|
63 |
+
"source": [
|
64 |
+
"## Filtering of signals on graph"
|
65 |
+
]
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"cell_type": "markdown",
|
69 |
+
"metadata": {},
|
70 |
+
"source": [
|
71 |
+
"As we now have the two necessary tools to define convolution on non-Euclidean domain:\n",
|
72 |
+
"\n",
|
73 |
+
"1) Way to transform graph to Fourier domain.\n",
|
74 |
+
"\n",
|
75 |
+
"2) Convolution in Fourier domain, the convolution operation between graph signal $x$ and filter $g$ is given by the graph convolution of the input signal $x$ with a filter $g∈R^n$ defined as:\n",
|
76 |
+
"\n",
|
77 |
+
"\n",
|
78 |
+
"$x∗_Gg=ℱ^{−1}(ℱ(x)⊙ℱ(g))=Φ(Φ^Tx⊙Φ^Tg)$,\n",
|
79 |
+
"\n",
|
80 |
+
"\n",
|
81 |
+
"where $⊙$ denotes the element-wise product. If we denote a filter as $g_θ=diag(Φ^Tg)$, then the spectral graph convolution is simplified as $x∗_Gg_θ=Φg_θΦ^Tx$"
|
82 |
+
]
|
83 |
+
},
|
84 |
+
{
|
85 |
+
"cell_type": "markdown",
|
86 |
+
"metadata": {},
|
87 |
+
"source": [
|
88 |
+
"## Why can't we go forward with this scheme only?"
|
89 |
+
]
|
90 |
+
},
|
91 |
+
{
|
92 |
+
"cell_type": "markdown",
|
93 |
+
"metadata": {},
|
94 |
+
"source": [
|
95 |
+
"All spectral-based ConvGNNs follow this definition. But, this method has three major problems:\n",
|
96 |
+
"\n",
|
97 |
+
"1. The number of filter parameters to learn depends on the dimensionality of the input which translates into O(n) complexity and filter is non-parametric.\n",
|
98 |
+
"\n",
|
99 |
+
"2. The filters are not localized i.e. filters learnt for graph considers the entire graph, unlike traditional CNN which takes only nearby local pixels to compute convolution.\n",
|
100 |
+
"\n",
|
101 |
+
"3. The algorithm needs to calculate the eigen-decomposition explicitly and multiply signal with Fourier basis as there is no Fast Fourier Transform algorithm defined for graphs, hence the computation is $O(n^2)$. (Fast Fourier Transform defined for Euclidean data has $O(nlogn)$ complexity)"
|
102 |
+
]
|
103 |
+
},
|
104 |
+
{
|
105 |
+
"cell_type": "markdown",
|
106 |
+
"metadata": {},
|
107 |
+
"source": [
|
108 |
+
"## Polynomial parametrization of filters"
|
109 |
+
]
|
110 |
+
},
|
111 |
+
{
|
112 |
+
"cell_type": "markdown",
|
113 |
+
"metadata": {},
|
114 |
+
"source": [
|
115 |
+
"To overcome these problems they used an polynomial approximation to parametrize the filter.<br>\n",
|
116 |
+
"Now, filter is of the form of:<br>\n",
|
117 |
+
"$g_θ(Λ) =\\sum_{k=0}^{K-1}θ_kΛ_k$, where the parameter $θ∈R^K$ is a vector of polynomial coefficients.<br>\n",
|
118 |
+
"These spectral filters represented by $Kth$-order polynomials of the Laplacian are exactly $K$-localized. Besides, their learning complexity is $O(K)$, the support size of the filter, and thus the same complexity as classical CNNs."
|
119 |
+
]
|
120 |
+
},
|
121 |
+
{
|
122 |
+
"cell_type": "markdown",
|
123 |
+
"metadata": {},
|
124 |
+
"source": [
|
125 |
+
"## Is everything fixed now?"
|
126 |
+
]
|
127 |
+
},
|
128 |
+
{
|
129 |
+
"cell_type": "markdown",
|
130 |
+
"metadata": {},
|
131 |
+
"source": [
|
132 |
+
"No, the cost to filter a signal is still high with $O(n^2)$ operations because of the multiplication with the Fourier basis U. (calculating the eigen-decomposition explicitly and multiply signal with Fourier basis)\n",
|
133 |
+
"\n",
|
134 |
+
"To bypass this problem, the authors parametrize $g_θ(Δ)$ as a polynomial function that can be computed recursively from $Δ$. One such polynomial, traditionally used in Graph Signal Processing to approximate kernels, is the <b>Chebyshev expansion</b>. The Chebyshev polynomial $T_k(x)$ of order $k$ may be computed by the stable recurrence relation $T_k(x) = 2xT_{k−1}(x)−T_{k−2}(x)$ with $T_0=1$ and $T_1=x$.\n",
|
135 |
+
"\n",
|
136 |
+
"The spectral filter is now given by a truncated Chebyshev polynomial:\n",
|
137 |
+
"\n",
|
138 |
+
"$$g_θ(\\barΔ)=Φg(\\barΛ)Φ^T=\\sum_{k=0}^{K-1}θ_kT_k(\\barΔ)$$\n",
|
139 |
+
"\n",
|
140 |
+
"where, $Θ∈R^K$ now represents a vector of the Chebyshev coefficients, the $\\barΔ$ denotes the rescaled $Δ$. (This rescaling is necessary as the Chebyshev polynomial form orthonormal basis in the interval [-1,1] and the eigenvalues of original Laplacian lies in the interval $[0,λ_{max}]$). Scaling is done as $\\barΔ= 2Δ/λ_{max}−I_n$.\n",
|
141 |
+
"\n",
|
142 |
+
"The filtering operation can now be written as $y=g_θ(Δ)x=\\sum_{k=0}^{K-1}θ_kT_k(\\barΔ)x$, where, $x_{i,k}$ are the input feature maps, $Θ_k$ are the trainable parameters."
|
143 |
+
]
|
144 |
+
},
|
145 |
+
{
|
146 |
+
"cell_type": "markdown",
|
147 |
+
"metadata": {},
|
148 |
+
"source": [
|
149 |
+
"## Pooling Operation"
|
150 |
+
]
|
151 |
+
},
|
152 |
+
{
|
153 |
+
"cell_type": "markdown",
|
154 |
+
"metadata": {},
|
155 |
+
"source": [
|
156 |
+
"In case of images, the pooling operation consists of taking a fixed size patch of pixels, say 2x2, and keeping only the pixel with max value (assuming you apply max pooling) and discarding the other pixels from the patch. Similar concept of pooling can be applied to graphs.\n",
|
157 |
+
"\n",
|
158 |
+
"Defferrard et al. address this issue by using the coarsening phase of the Graclus multilevel clustering algorithm. Graclus’ greedy rule consists, at each coarsening level, in picking an unmarked vertex $i$ and matching it with one of its unmarked neighbors $j$ that maximizes the local normalized cut $Wij(1/di+ 1/dj)$. The two matched vertices are then marked and the coarsened weights are set as the sum of their weights. The matching is repeated until all nodes have been explored. This is an very fast coarsening scheme which divides the number of nodes by approximately two from one level to the next coarser level. After coarsening, the nodes of the input graph and its coarsened version are rearranged into a balanced binary tree. Arbitrarily aggregating the balanced binary tree from bottom to top will arrange similar nodes together. Pooling such a rearranged signal is much more efficient than pooling the original. The following figure shows the example of graph coarsening and pooling.\n",
|
159 |
+
"![title](img/pool.png)\n"
|
160 |
+
]
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"cell_type": "markdown",
|
164 |
+
"metadata": {},
|
165 |
+
"source": [
|
166 |
+
"# Implementing ChebNET in PyTorch"
|
167 |
+
]
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"cell_type": "markdown",
|
171 |
+
"metadata": {},
|
172 |
+
"source": [
|
173 |
+
"## Imports"
|
174 |
+
]
|
175 |
+
},
|
176 |
+
{
|
177 |
+
"cell_type": "code",
|
178 |
+
"execution_count": 0,
|
179 |
+
"metadata": {
|
180 |
+
"colab": {},
|
181 |
+
"colab_type": "code",
|
182 |
+
"id": "H0A29S0FAJLI"
|
183 |
+
},
|
184 |
+
"outputs": [],
|
185 |
+
"source": [
|
186 |
+
"import torch\n",
|
187 |
+
"from torch.autograd import Variable\n",
|
188 |
+
"import torch.nn.functional as F\n",
|
189 |
+
"import torch.nn as nn\n",
|
190 |
+
"import collections\n",
|
191 |
+
"import time\n",
|
192 |
+
"import numpy as np\n",
|
193 |
+
"from tensorflow.examples.tutorials.mnist import input_data\n",
|
194 |
+
"\n",
|
195 |
+
"import sys\n",
|
196 |
+
"\n",
|
197 |
+
"import os"
|
198 |
+
]
|
199 |
+
},
|
200 |
+
{
|
201 |
+
"cell_type": "code",
|
202 |
+
"execution_count": 0,
|
203 |
+
"metadata": {
|
204 |
+
"colab": {
|
205 |
+
"base_uri": "https://localhost:8080/",
|
206 |
+
"height": 34
|
207 |
+
},
|
208 |
+
"colab_type": "code",
|
209 |
+
"id": "2CMl-YsaAwgq",
|
210 |
+
"outputId": "aa1ae30a-7420-49f8-bf89-952ef6133232"
|
211 |
+
},
|
212 |
+
"outputs": [
|
213 |
+
{
|
214 |
+
"name": "stdout",
|
215 |
+
"output_type": "stream",
|
216 |
+
"text": [
|
217 |
+
"cuda available\n"
|
218 |
+
]
|
219 |
+
}
|
220 |
+
],
|
221 |
+
"source": [
|
222 |
+
"if torch.cuda.is_available():\n",
|
223 |
+
" print('cuda available')\n",
|
224 |
+
" dtypeFloat = torch.cuda.FloatTensor\n",
|
225 |
+
" dtypeLong = torch.cuda.LongTensor\n",
|
226 |
+
" torch.cuda.manual_seed(1)\n",
|
227 |
+
"else:\n",
|
228 |
+
" print('cuda not available')\n",
|
229 |
+
" dtypeFloat = torch.FloatTensor\n",
|
230 |
+
" dtypeLong = torch.LongTensor\n",
|
231 |
+
" torch.manual_seed(1)"
|
232 |
+
]
|
233 |
+
},
|
234 |
+
{
|
235 |
+
"cell_type": "markdown",
|
236 |
+
"metadata": {},
|
237 |
+
"source": [
|
238 |
+
"## Data Prepration"
|
239 |
+
]
|
240 |
+
},
|
241 |
+
{
|
242 |
+
"cell_type": "code",
|
243 |
+
"execution_count": null,
|
244 |
+
"metadata": {
|
245 |
+
"colab": {
|
246 |
+
"base_uri": "https://localhost:8080/",
|
247 |
+
"height": 535
|
248 |
+
},
|
249 |
+
"colab_type": "code",
|
250 |
+
"id": "SB56sMJSAzt4",
|
251 |
+
"outputId": "48f13a05-b8e6-4251-cd66-9841de43506e"
|
252 |
+
},
|
253 |
+
"outputs": [],
|
254 |
+
"source": [
|
255 |
+
"# load data in folder datasets\n",
|
256 |
+
"mnist = input_data.read_data_sets('datasets', one_hot=False)\n",
|
257 |
+
"\n",
|
258 |
+
"train_data = mnist.train.images.astype(np.float32)\n",
|
259 |
+
"val_data = mnist.validation.images.astype(np.float32)\n",
|
260 |
+
"test_data = mnist.test.images.astype(np.float32)\n",
|
261 |
+
"train_labels = mnist.train.labels\n",
|
262 |
+
"val_labels = mnist.validation.labels\n",
|
263 |
+
"test_labels = mnist.test.labels"
|
264 |
+
]
|
265 |
+
},
|
266 |
+
{
|
267 |
+
"cell_type": "code",
|
268 |
+
"execution_count": 0,
|
269 |
+
"metadata": {
|
270 |
+
"colab": {
|
271 |
+
"base_uri": "https://localhost:8080/",
|
272 |
+
"height": 34
|
273 |
+
},
|
274 |
+
"colab_type": "code",
|
275 |
+
"id": "hyjuGFcVA_Xj",
|
276 |
+
"outputId": "f19604d0-84a0-471d-9ced-b9dc46aaeab1"
|
277 |
+
},
|
278 |
+
"outputs": [
|
279 |
+
{
|
280 |
+
"name": "stdout",
|
281 |
+
"output_type": "stream",
|
282 |
+
"text": [
|
283 |
+
"nb edges: 6396\n"
|
284 |
+
]
|
285 |
+
}
|
286 |
+
],
|
287 |
+
"source": [
|
288 |
+
"from grid_graph import grid_graph\n",
|
289 |
+
"from coarsening import coarsen\n",
|
290 |
+
"from coarsening import lmax_L\n",
|
291 |
+
"from coarsening import perm_data\n",
|
292 |
+
"from coarsening import rescale_L\n",
|
293 |
+
"\n",
|
294 |
+
"# Construct graph\n",
|
295 |
+
"t_start = time.time()\n",
|
296 |
+
"grid_side = 28\n",
|
297 |
+
"number_edges = 8\n",
|
298 |
+
"metric = 'euclidean'\n",
|
299 |
+
"A = grid_graph(grid_side,number_edges,metric) # create graph of Euclidean grid"
|
300 |
+
]
|
301 |
+
},
|
302 |
+
{
|
303 |
+
"cell_type": "code",
|
304 |
+
"execution_count": 0,
|
305 |
+
"metadata": {
|
306 |
+
"colab": {
|
307 |
+
"base_uri": "https://localhost:8080/",
|
308 |
+
"height": 121
|
309 |
+
},
|
310 |
+
"colab_type": "code",
|
311 |
+
"id": "ocadJoz3BahS",
|
312 |
+
"outputId": "12a160e8-147c-4cfb-92d9-337f08cc2f7f"
|
313 |
+
},
|
314 |
+
"outputs": [
|
315 |
+
{
|
316 |
+
"name": "stdout",
|
317 |
+
"output_type": "stream",
|
318 |
+
"text": [
|
319 |
+
"Heavy Edge Matching coarsening with Xavier version\n",
|
320 |
+
"Layer 0: M_0 = |V| = 976 nodes (192 added), |E| = 3198 edges\n",
|
321 |
+
"Layer 1: M_1 = |V| = 488 nodes (83 added), |E| = 1619 edges\n",
|
322 |
+
"Layer 2: M_2 = |V| = 244 nodes (29 added), |E| = 794 edges\n",
|
323 |
+
"Layer 3: M_3 = |V| = 122 nodes (7 added), |E| = 396 edges\n",
|
324 |
+
"Layer 4: M_4 = |V| = 61 nodes (0 added), |E| = 194 edges\n"
|
325 |
+
]
|
326 |
+
}
|
327 |
+
],
|
328 |
+
"source": [
|
329 |
+
"# Compute coarsened graphs\n",
|
330 |
+
"coarsening_levels = 4\n",
|
331 |
+
"L, perm = coarsen(A, coarsening_levels)"
|
332 |
+
]
|
333 |
+
},
|
334 |
+
{
|
335 |
+
"cell_type": "code",
|
336 |
+
"execution_count": 0,
|
337 |
+
"metadata": {
|
338 |
+
"colab": {
|
339 |
+
"base_uri": "https://localhost:8080/",
|
340 |
+
"height": 34
|
341 |
+
},
|
342 |
+
"colab_type": "code",
|
343 |
+
"id": "yJCEugS8CCFo",
|
344 |
+
"outputId": "c2eb3fe7-798a-4cde-e69f-d6b5a7f47daa"
|
345 |
+
},
|
346 |
+
"outputs": [
|
347 |
+
{
|
348 |
+
"name": "stdout",
|
349 |
+
"output_type": "stream",
|
350 |
+
"text": [
|
351 |
+
"lmax: [1.3857538, 1.3440963, 1.1994357, 1.0239158]\n"
|
352 |
+
]
|
353 |
+
}
|
354 |
+
],
|
355 |
+
"source": [
|
356 |
+
"# Compute max eigenvalue of graph Laplacians\n",
|
357 |
+
"lmax = []\n",
|
358 |
+
"for i in range(coarsening_levels):\n",
|
359 |
+
" lmax.append(lmax_L(L[i]))\n",
|
360 |
+
"print('lmax: ' + str([lmax[i] for i in range(coarsening_levels)]))"
|
361 |
+
]
|
362 |
+
},
|
363 |
+
{
|
364 |
+
"cell_type": "code",
|
365 |
+
"execution_count": 0,
|
366 |
+
"metadata": {
|
367 |
+
"colab": {
|
368 |
+
"base_uri": "https://localhost:8080/",
|
369 |
+
"height": 86
|
370 |
+
},
|
371 |
+
"colab_type": "code",
|
372 |
+
"id": "-lxQNsyxHKvj",
|
373 |
+
"outputId": "cc4e9c8d-0d2c-4542-b52d-fea8533b8762"
|
374 |
+
},
|
375 |
+
"outputs": [
|
376 |
+
{
|
377 |
+
"name": "stdout",
|
378 |
+
"output_type": "stream",
|
379 |
+
"text": [
|
380 |
+
"(55000, 976)\n",
|
381 |
+
"(5000, 976)\n",
|
382 |
+
"(10000, 976)\n",
|
383 |
+
"Execution time: 4.18s\n"
|
384 |
+
]
|
385 |
+
}
|
386 |
+
],
|
387 |
+
"source": [
|
388 |
+
"# Reindex nodes to satisfy a binary tree structure\n",
|
389 |
+
"train_data = perm_data(train_data, perm)\n",
|
390 |
+
"val_data = perm_data(val_data, perm)\n",
|
391 |
+
"test_data = perm_data(test_data, perm)\n",
|
392 |
+
"\n",
|
393 |
+
"print(train_data.shape)\n",
|
394 |
+
"print(val_data.shape)\n",
|
395 |
+
"print(test_data.shape)\n",
|
396 |
+
"\n",
|
397 |
+
"print('Execution time: {:.2f}s'.format(time.time() - t_start))\n",
|
398 |
+
"del perm"
|
399 |
+
]
|
400 |
+
},
|
401 |
+
{
|
402 |
+
"cell_type": "markdown",
|
403 |
+
"metadata": {},
|
404 |
+
"source": [
|
405 |
+
"## Model"
|
406 |
+
]
|
407 |
+
},
|
408 |
+
{
|
409 |
+
"cell_type": "code",
|
410 |
+
"execution_count": 0,
|
411 |
+
"metadata": {
|
412 |
+
"colab": {},
|
413 |
+
"colab_type": "code",
|
414 |
+
"id": "pLD_gJMwHW6b"
|
415 |
+
},
|
416 |
+
"outputs": [],
|
417 |
+
"source": [
|
418 |
+
"# class definitions\n",
|
419 |
+
"\n",
|
420 |
+
"class my_sparse_mm(torch.autograd.Function):\n",
|
421 |
+
" \"\"\"\n",
|
422 |
+
" Implementation of a new autograd function for sparse variables, \n",
|
423 |
+
" called \"my_sparse_mm\", by subclassing torch.autograd.Function \n",
|
424 |
+
" and implementing the forward and backward passes.\n",
|
425 |
+
" \"\"\"\n",
|
426 |
+
" \n",
|
427 |
+
" def forward(self, W, x): # W is SPARSE\n",
|
428 |
+
" self.save_for_backward(W, x)\n",
|
429 |
+
" y = torch.mm(W, x)\n",
|
430 |
+
" return y\n",
|
431 |
+
" \n",
|
432 |
+
" def backward(self, grad_output):\n",
|
433 |
+
" W, x = self.saved_tensors \n",
|
434 |
+
" grad_input = grad_output.clone()\n",
|
435 |
+
" grad_input_dL_dW = torch.mm(grad_input, x.t()) \n",
|
436 |
+
" grad_input_dL_dx = torch.mm(W.t(), grad_input )\n",
|
437 |
+
" return grad_input_dL_dW, grad_input_dL_dx\n",
|
438 |
+
" \n",
|
439 |
+
" \n",
|
440 |
+
"class Graph_ConvNet_LeNet5(nn.Module):\n",
|
441 |
+
" \n",
|
442 |
+
" def __init__(self, net_parameters):\n",
|
443 |
+
" \n",
|
444 |
+
" print('Graph ConvNet: LeNet5')\n",
|
445 |
+
" \n",
|
446 |
+
" super(Graph_ConvNet_LeNet5, self).__init__()\n",
|
447 |
+
" \n",
|
448 |
+
" # parameters\n",
|
449 |
+
" D, CL1_F, CL1_K, CL2_F, CL2_K, FC1_F, FC2_F = net_parameters\n",
|
450 |
+
" FC1Fin = CL2_F*(D//16)\n",
|
451 |
+
" \n",
|
452 |
+
" # graph CL1\n",
|
453 |
+
" self.cl1 = nn.Linear(CL1_K, CL1_F) \n",
|
454 |
+
" Fin = CL1_K; Fout = CL1_F;\n",
|
455 |
+
" scale = np.sqrt( 2.0/ (Fin+Fout) )\n",
|
456 |
+
" self.cl1.weight.data.uniform_(-scale, scale)\n",
|
457 |
+
" self.cl1.bias.data.fill_(0.0)\n",
|
458 |
+
" self.CL1_K = CL1_K; self.CL1_F = CL1_F; \n",
|
459 |
+
" \n",
|
460 |
+
" # graph CL2\n",
|
461 |
+
" self.cl2 = nn.Linear(CL2_K*CL1_F, CL2_F) \n",
|
462 |
+
" Fin = CL2_K*CL1_F; Fout = CL2_F;\n",
|
463 |
+
" scale = np.sqrt( 2.0/ (Fin+Fout) )\n",
|
464 |
+
" self.cl2.weight.data.uniform_(-scale, scale)\n",
|
465 |
+
" self.cl2.bias.data.fill_(0.0)\n",
|
466 |
+
" self.CL2_K = CL2_K; self.CL2_F = CL2_F; \n",
|
467 |
+
"\n",
|
468 |
+
" # FC1\n",
|
469 |
+
" self.fc1 = nn.Linear(FC1Fin, FC1_F) \n",
|
470 |
+
" Fin = FC1Fin; Fout = FC1_F;\n",
|
471 |
+
" scale = np.sqrt( 2.0/ (Fin+Fout) )\n",
|
472 |
+
" self.fc1.weight.data.uniform_(-scale, scale)\n",
|
473 |
+
" self.fc1.bias.data.fill_(0.0)\n",
|
474 |
+
" self.FC1Fin = FC1Fin\n",
|
475 |
+
" \n",
|
476 |
+
" # FC2\n",
|
477 |
+
" self.fc2 = nn.Linear(FC1_F, FC2_F)\n",
|
478 |
+
" Fin = FC1_F; Fout = FC2_F;\n",
|
479 |
+
" scale = np.sqrt( 2.0/ (Fin+Fout) )\n",
|
480 |
+
" self.fc2.weight.data.uniform_(-scale, scale)\n",
|
481 |
+
" self.fc2.bias.data.fill_(0.0)\n",
|
482 |
+
"\n",
|
483 |
+
" # nb of parameters\n",
|
484 |
+
" nb_param = CL1_K* CL1_F + CL1_F # CL1\n",
|
485 |
+
" nb_param += CL2_K* CL1_F* CL2_F + CL2_F # CL2\n",
|
486 |
+
" nb_param += FC1Fin* FC1_F + FC1_F # FC1\n",
|
487 |
+
" nb_param += FC1_F* FC2_F + FC2_F # FC2\n",
|
488 |
+
" print('nb of parameters=',nb_param,'\\n')\n",
|
489 |
+
" \n",
|
490 |
+
" \n",
|
491 |
+
" def init_weights(self, W, Fin, Fout):\n",
|
492 |
+
"\n",
|
493 |
+
" scale = np.sqrt( 2.0/ (Fin+Fout) )\n",
|
494 |
+
" W.uniform_(-scale, scale)\n",
|
495 |
+
"\n",
|
496 |
+
" return W\n",
|
497 |
+
" \n",
|
498 |
+
" \n",
|
499 |
+
" def graph_conv_cheby(self, x, cl, L, lmax, Fout, K):\n",
|
500 |
+
"\n",
|
501 |
+
" # parameters\n",
|
502 |
+
" # B = batch size\n",
|
503 |
+
" # V = nb vertices\n",
|
504 |
+
" # Fin = nb input features\n",
|
505 |
+
" # Fout = nb output features\n",
|
506 |
+
" # K = Chebyshev order & support size\n",
|
507 |
+
" B, V, Fin = x.size(); B, V, Fin = int(B), int(V), int(Fin) \n",
|
508 |
+
"\n",
|
509 |
+
" # rescale Laplacian\n",
|
510 |
+
" lmax = lmax_L(L)\n",
|
511 |
+
" L = rescale_L(L, lmax) \n",
|
512 |
+
" \n",
|
513 |
+
" # convert scipy sparse matric L to pytorch\n",
|
514 |
+
" L = L.tocoo()\n",
|
515 |
+
" indices = np.column_stack((L.row, L.col)).T \n",
|
516 |
+
" indices = indices.astype(np.int64)\n",
|
517 |
+
" indices = torch.from_numpy(indices)\n",
|
518 |
+
" indices = indices.type(torch.LongTensor)\n",
|
519 |
+
" L_data = L.data.astype(np.float32)\n",
|
520 |
+
" L_data = torch.from_numpy(L_data) \n",
|
521 |
+
" L_data = L_data.type(torch.FloatTensor)\n",
|
522 |
+
" L = torch.sparse.FloatTensor(indices, L_data, torch.Size(L.shape))\n",
|
523 |
+
" L = Variable( L , requires_grad=False)\n",
|
524 |
+
" if torch.cuda.is_available():\n",
|
525 |
+
" L = L.cuda()\n",
|
526 |
+
" \n",
|
527 |
+
" # transform to Chebyshev basis\n",
|
528 |
+
" x0 = x.permute(1,2,0).contiguous() # V x Fin x B\n",
|
529 |
+
" x0 = x0.view([V, Fin*B]) # V x Fin*B\n",
|
530 |
+
" x = x0.unsqueeze(0) # 1 x V x Fin*B\n",
|
531 |
+
" \n",
|
532 |
+
" def concat(x, x_):\n",
|
533 |
+
" x_ = x_.unsqueeze(0) # 1 x V x Fin*B\n",
|
534 |
+
" return torch.cat((x, x_), 0) # K x V x Fin*B \n",
|
535 |
+
" \n",
|
536 |
+
" if K > 1: \n",
|
537 |
+
" x1 = my_sparse_mm()(L,x0) # V x Fin*B\n",
|
538 |
+
" x = torch.cat((x, x1.unsqueeze(0)),0) # 2 x V x Fin*B\n",
|
539 |
+
" for k in range(2, K):\n",
|
540 |
+
" x2 = 2 * my_sparse_mm()(L,x1) - x0 \n",
|
541 |
+
" x = torch.cat((x, x2.unsqueeze(0)),0) # M x Fin*B\n",
|
542 |
+
" x0, x1 = x1, x2 \n",
|
543 |
+
" \n",
|
544 |
+
" x = x.view([K, V, Fin, B]) # K x V x Fin x B \n",
|
545 |
+
" x = x.permute(3,1,2,0).contiguous() # B x V x Fin x K \n",
|
546 |
+
" x = x.view([B*V, Fin*K]) # B*V x Fin*K\n",
|
547 |
+
" \n",
|
548 |
+
" # Compose linearly Fin features to get Fout features\n",
|
549 |
+
" x = cl(x) # B*V x Fout \n",
|
550 |
+
" x = x.view([B, V, Fout]) # B x V x Fout\n",
|
551 |
+
" \n",
|
552 |
+
" return x\n",
|
553 |
+
" \n",
|
554 |
+
" \n",
|
555 |
+
" # Max pooling of size p. Must be a power of 2.\n",
|
556 |
+
" def graph_max_pool(self, x, p): \n",
|
557 |
+
" if p > 1: \n",
|
558 |
+
" x = x.permute(0,2,1).contiguous() # x = B x F x V\n",
|
559 |
+
" x = nn.MaxPool1d(p)(x) # B x F x V/p \n",
|
560 |
+
" x = x.permute(0,2,1).contiguous() # x = B x V/p x F\n",
|
561 |
+
" return x \n",
|
562 |
+
" else:\n",
|
563 |
+
" return x \n",
|
564 |
+
" \n",
|
565 |
+
" \n",
|
566 |
+
" def forward(self, x, d, L, lmax):\n",
|
567 |
+
" \n",
|
568 |
+
" # graph CL1\n",
|
569 |
+
" x = x.unsqueeze(2) # B x V x Fin=1 \n",
|
570 |
+
" x = self.graph_conv_cheby(x, self.cl1, L[0], lmax[0], self.CL1_F, self.CL1_K)\n",
|
571 |
+
" x = F.relu(x)\n",
|
572 |
+
" x = self.graph_max_pool(x, 4)\n",
|
573 |
+
" \n",
|
574 |
+
" # graph CL2\n",
|
575 |
+
" x = self.graph_conv_cheby(x, self.cl2, L[2], lmax[2], self.CL2_F, self.CL2_K)\n",
|
576 |
+
" x = F.relu(x)\n",
|
577 |
+
" x = self.graph_max_pool(x, 4)\n",
|
578 |
+
" \n",
|
579 |
+
" # FC1\n",
|
580 |
+
" x = x.view(-1, self.FC1Fin)\n",
|
581 |
+
" x = self.fc1(x)\n",
|
582 |
+
" x = F.relu(x)\n",
|
583 |
+
" x = nn.Dropout(d)(x)\n",
|
584 |
+
" \n",
|
585 |
+
" # FC2\n",
|
586 |
+
" x = self.fc2(x)\n",
|
587 |
+
" \n",
|
588 |
+
" return x\n",
|
589 |
+
" \n",
|
590 |
+
" \n",
|
591 |
+
" def loss(self, y, y_target, l2_regularization):\n",
|
592 |
+
" \n",
|
593 |
+
" loss = nn.CrossEntropyLoss()(y,y_target)\n",
|
594 |
+
"\n",
|
595 |
+
" l2_loss = 0.0\n",
|
596 |
+
" for param in self.parameters():\n",
|
597 |
+
" data = param* param\n",
|
598 |
+
" l2_loss += data.sum()\n",
|
599 |
+
" \n",
|
600 |
+
" loss += 0.5* l2_regularization* l2_loss\n",
|
601 |
+
" \n",
|
602 |
+
" return loss\n",
|
603 |
+
" \n",
|
604 |
+
" \n",
|
605 |
+
" def update(self, lr):\n",
|
606 |
+
" \n",
|
607 |
+
" update = torch.optim.SGD( self.parameters(), lr=lr, momentum=0.9 )\n",
|
608 |
+
" \n",
|
609 |
+
" return update\n",
|
610 |
+
" \n",
|
611 |
+
" \n",
|
612 |
+
" def update_learning_rate(self, optimizer, lr):\n",
|
613 |
+
" \n",
|
614 |
+
" for param_group in optimizer.param_groups:\n",
|
615 |
+
" param_group['lr'] = lr\n",
|
616 |
+
"\n",
|
617 |
+
" return optimizer\n",
|
618 |
+
"\n",
|
619 |
+
" \n",
|
620 |
+
" def evaluation(self, y_predicted, test_l):\n",
|
621 |
+
" \n",
|
622 |
+
" _, class_predicted = torch.max(y_predicted.data, 1)\n",
|
623 |
+
" return 100.0* (class_predicted == test_l).sum()/ y_predicted.size(0)\n"
|
624 |
+
]
|
625 |
+
},
|
626 |
+
{
|
627 |
+
"cell_type": "code",
|
628 |
+
"execution_count": 0,
|
629 |
+
"metadata": {
|
630 |
+
"colab": {},
|
631 |
+
"colab_type": "code",
|
632 |
+
"id": "Prw5O_pzHpSi"
|
633 |
+
},
|
634 |
+
"outputs": [],
|
635 |
+
"source": [
|
636 |
+
"# network parameters\n",
|
637 |
+
"D = train_data.shape[1]\n",
|
638 |
+
"CL1_F = 32\n",
|
639 |
+
"CL1_K = 25\n",
|
640 |
+
"CL2_F = 64\n",
|
641 |
+
"CL2_K = 25\n",
|
642 |
+
"FC1_F = 512\n",
|
643 |
+
"FC2_F = 10\n",
|
644 |
+
"net_parameters = [D, CL1_F, CL1_K, CL2_F, CL2_K, FC1_F, FC2_F]"
|
645 |
+
]
|
646 |
+
},
|
647 |
+
{
|
648 |
+
"cell_type": "code",
|
649 |
+
"execution_count": 0,
|
650 |
+
"metadata": {
|
651 |
+
"colab": {
|
652 |
+
"base_uri": "https://localhost:8080/",
|
653 |
+
"height": 173
|
654 |
+
},
|
655 |
+
"colab_type": "code",
|
656 |
+
"id": "YwatnOugHvCe",
|
657 |
+
"outputId": "2ec5a709-2001-4e31-b5c7-04195ffe4f8b"
|
658 |
+
},
|
659 |
+
"outputs": [
|
660 |
+
{
|
661 |
+
"name": "stdout",
|
662 |
+
"output_type": "stream",
|
663 |
+
"text": [
|
664 |
+
"Graph ConvNet: LeNet5\n",
|
665 |
+
"nb of parameters= 2056586 \n",
|
666 |
+
"\n",
|
667 |
+
"Graph_ConvNet_LeNet5(\n",
|
668 |
+
" (cl1): Linear(in_features=25, out_features=32, bias=True)\n",
|
669 |
+
" (cl2): Linear(in_features=800, out_features=64, bias=True)\n",
|
670 |
+
" (fc1): Linear(in_features=3904, out_features=512, bias=True)\n",
|
671 |
+
" (fc2): Linear(in_features=512, out_features=10, bias=True)\n",
|
672 |
+
")\n"
|
673 |
+
]
|
674 |
+
}
|
675 |
+
],
|
676 |
+
"source": [
|
677 |
+
"# instantiate the object net of the class \n",
|
678 |
+
"net = Graph_ConvNet_LeNet5(net_parameters)\n",
|
679 |
+
"if torch.cuda.is_available():\n",
|
680 |
+
" net.cuda()\n",
|
681 |
+
"print(net)"
|
682 |
+
]
|
683 |
+
},
|
684 |
+
{
|
685 |
+
"cell_type": "code",
|
686 |
+
"execution_count": 0,
|
687 |
+
"metadata": {
|
688 |
+
"colab": {},
|
689 |
+
"colab_type": "code",
|
690 |
+
"id": "H2XxYUFaHxJr"
|
691 |
+
},
|
692 |
+
"outputs": [],
|
693 |
+
"source": [
|
694 |
+
"# Weights\n",
|
695 |
+
"L_net = list(net.parameters())"
|
696 |
+
]
|
697 |
+
},
|
698 |
+
{
|
699 |
+
"cell_type": "markdown",
|
700 |
+
"metadata": {},
|
701 |
+
"source": [
|
702 |
+
"## Hyper parameters setting"
|
703 |
+
]
|
704 |
+
},
|
705 |
+
{
|
706 |
+
"cell_type": "code",
|
707 |
+
"execution_count": 0,
|
708 |
+
"metadata": {
|
709 |
+
"colab": {
|
710 |
+
"base_uri": "https://localhost:8080/",
|
711 |
+
"height": 34
|
712 |
+
},
|
713 |
+
"colab_type": "code",
|
714 |
+
"id": "HNTmNQaIH4UI",
|
715 |
+
"outputId": "a44ccf61-746e-451f-c0b6-1728c2d98bdd"
|
716 |
+
},
|
717 |
+
"outputs": [
|
718 |
+
{
|
719 |
+
"name": "stdout",
|
720 |
+
"output_type": "stream",
|
721 |
+
"text": [
|
722 |
+
"num_epochs= 20 , train_size= 55000 , nb_iter= 11000\n"
|
723 |
+
]
|
724 |
+
}
|
725 |
+
],
|
726 |
+
"source": [
|
727 |
+
"# learning parameters\n",
|
728 |
+
"learning_rate = 0.05\n",
|
729 |
+
"dropout_value = 0.5\n",
|
730 |
+
"l2_regularization = 5e-4 \n",
|
731 |
+
"batch_size = 100\n",
|
732 |
+
"num_epochs = 20\n",
|
733 |
+
"train_size = train_data.shape[0]\n",
|
734 |
+
"nb_iter = int(num_epochs * train_size) // batch_size\n",
|
735 |
+
"print('num_epochs=',num_epochs,', train_size=',train_size,', nb_iter=',nb_iter)"
|
736 |
+
]
|
737 |
+
},
|
738 |
+
{
|
739 |
+
"cell_type": "markdown",
|
740 |
+
"metadata": {},
|
741 |
+
"source": [
|
742 |
+
"## Training & Evaluation "
|
743 |
+
]
|
744 |
+
},
|
745 |
+
{
|
746 |
+
"cell_type": "code",
|
747 |
+
"execution_count": 0,
|
748 |
+
"metadata": {
|
749 |
+
"colab": {
|
750 |
+
"base_uri": "https://localhost:8080/",
|
751 |
+
"height": 1000
|
752 |
+
},
|
753 |
+
"colab_type": "code",
|
754 |
+
"id": "JmePIZCLH-eN",
|
755 |
+
"outputId": "fff9afd8-bab3-4fb1-82d8-5fcfb2164c97"
|
756 |
+
},
|
757 |
+
"outputs": [
|
758 |
+
{
|
759 |
+
"name": "stdout",
|
760 |
+
"output_type": "stream",
|
761 |
+
"text": [
|
762 |
+
"epoch= 1, i= 100, loss(batch)= 0.4181, accuray(batch)= 90.00\n",
|
763 |
+
"epoch= 1, i= 200, loss(batch)= 0.3011, accuray(batch)= 89.00\n",
|
764 |
+
"epoch= 1, i= 300, loss(batch)= 0.2579, accuray(batch)= 95.00\n",
|
765 |
+
"epoch= 1, i= 400, loss(batch)= 0.2399, accuray(batch)= 96.00\n",
|
766 |
+
"epoch= 1, i= 500, loss(batch)= 0.2154, accuray(batch)= 96.00\n",
|
767 |
+
"epoch= 1, loss(train)= 0.387, accuracy(train)= 90.976, time= 89.638, lr= 0.05000\n",
|
768 |
+
" accuracy(test) = 97.560 %, time= 9.941\n",
|
769 |
+
"epoch= 2, i= 100, loss(batch)= 0.2784, accuray(batch)= 95.00\n",
|
770 |
+
"epoch= 2, i= 200, loss(batch)= 0.2130, accuray(batch)= 94.00\n",
|
771 |
+
"epoch= 2, i= 300, loss(batch)= 0.1589, accuray(batch)= 98.00\n",
|
772 |
+
"epoch= 2, i= 400, loss(batch)= 0.1755, accuray(batch)= 98.00\n",
|
773 |
+
"epoch= 2, i= 500, loss(batch)= 0.2534, accuray(batch)= 95.00\n",
|
774 |
+
"epoch= 2, loss(train)= 0.186, accuracy(train)= 97.556, time= 89.675, lr= 0.04750\n",
|
775 |
+
" accuracy(test) = 98.530 %, time= 9.967\n",
|
776 |
+
"epoch= 3, i= 100, loss(batch)= 0.2390, accuray(batch)= 95.00\n",
|
777 |
+
"epoch= 3, i= 200, loss(batch)= 0.1573, accuray(batch)= 96.00\n",
|
778 |
+
"epoch= 3, i= 300, loss(batch)= 0.1216, accuray(batch)= 99.00\n",
|
779 |
+
"epoch= 3, i= 400, loss(batch)= 0.2020, accuray(batch)= 98.00\n",
|
780 |
+
"epoch= 3, i= 500, loss(batch)= 0.1684, accuray(batch)= 98.00\n",
|
781 |
+
"epoch= 3, loss(train)= 0.155, accuracy(train)= 98.264, time= 89.724, lr= 0.04512\n",
|
782 |
+
" accuracy(test) = 98.570 %, time= 9.961\n",
|
783 |
+
"epoch= 4, i= 100, loss(batch)= 0.1359, accuray(batch)= 99.00\n",
|
784 |
+
"epoch= 4, i= 200, loss(batch)= 0.1273, accuray(batch)= 98.00\n",
|
785 |
+
"epoch= 4, i= 300, loss(batch)= 0.1234, accuray(batch)= 99.00\n",
|
786 |
+
"epoch= 4, i= 400, loss(batch)= 0.1352, accuray(batch)= 99.00\n",
|
787 |
+
"epoch= 4, i= 500, loss(batch)= 0.1034, accuray(batch)= 100.00\n",
|
788 |
+
"epoch= 4, loss(train)= 0.136, accuracy(train)= 98.540, time= 89.717, lr= 0.04287\n",
|
789 |
+
" accuracy(test) = 98.840 %, time= 9.976\n",
|
790 |
+
"epoch= 5, i= 100, loss(batch)= 0.1229, accuray(batch)= 99.00\n",
|
791 |
+
"epoch= 5, i= 200, loss(batch)= 0.0896, accuray(batch)= 100.00\n",
|
792 |
+
"epoch= 5, i= 300, loss(batch)= 0.0974, accuray(batch)= 100.00\n",
|
793 |
+
"epoch= 5, i= 400, loss(batch)= 0.1413, accuray(batch)= 98.00\n",
|
794 |
+
"epoch= 5, i= 500, loss(batch)= 0.0997, accuray(batch)= 99.00\n",
|
795 |
+
"epoch= 5, loss(train)= 0.123, accuracy(train)= 98.824, time= 89.633, lr= 0.04073\n",
|
796 |
+
" accuracy(test) = 98.770 %, time= 9.939\n",
|
797 |
+
"epoch= 6, i= 100, loss(batch)= 0.1051, accuray(batch)= 99.00\n",
|
798 |
+
"epoch= 6, i= 200, loss(batch)= 0.1060, accuray(batch)= 98.00\n",
|
799 |
+
"epoch= 6, i= 300, loss(batch)= 0.0966, accuray(batch)= 99.00\n",
|
800 |
+
"epoch= 6, i= 400, loss(batch)= 0.0942, accuray(batch)= 100.00\n",
|
801 |
+
"epoch= 6, i= 500, loss(batch)= 0.1439, accuray(batch)= 98.00\n",
|
802 |
+
"epoch= 6, loss(train)= 0.110, accuracy(train)= 98.998, time= 89.748, lr= 0.03869\n",
|
803 |
+
" accuracy(test) = 98.860 %, time= 9.885\n",
|
804 |
+
"epoch= 7, i= 100, loss(batch)= 0.2120, accuray(batch)= 96.00\n",
|
805 |
+
"epoch= 7, i= 200, loss(batch)= 0.1200, accuray(batch)= 98.00\n",
|
806 |
+
"epoch= 7, i= 300, loss(batch)= 0.1138, accuray(batch)= 99.00\n",
|
807 |
+
"epoch= 7, i= 400, loss(batch)= 0.0879, accuray(batch)= 100.00\n",
|
808 |
+
"epoch= 7, i= 500, loss(batch)= 0.1056, accuray(batch)= 99.00\n",
|
809 |
+
"epoch= 7, loss(train)= 0.101, accuracy(train)= 99.138, time= 89.662, lr= 0.03675\n",
|
810 |
+
" accuracy(test) = 98.950 %, time= 9.961\n",
|
811 |
+
"epoch= 8, i= 100, loss(batch)= 0.1075, accuray(batch)= 99.00\n",
|
812 |
+
"epoch= 8, i= 200, loss(batch)= 0.0909, accuray(batch)= 99.00\n",
|
813 |
+
"epoch= 8, i= 300, loss(batch)= 0.0770, accuray(batch)= 100.00\n",
|
814 |
+
"epoch= 8, i= 400, loss(batch)= 0.0718, accuray(batch)= 100.00\n",
|
815 |
+
"epoch= 8, i= 500, loss(batch)= 0.0801, accuray(batch)= 99.00\n",
|
816 |
+
"epoch= 8, loss(train)= 0.094, accuracy(train)= 99.145, time= 89.611, lr= 0.03492\n",
|
817 |
+
" accuracy(test) = 99.080 %, time= 9.954\n",
|
818 |
+
"epoch= 9, i= 100, loss(batch)= 0.1403, accuray(batch)= 96.00\n",
|
819 |
+
"epoch= 9, i= 200, loss(batch)= 0.0803, accuray(batch)= 100.00\n",
|
820 |
+
"epoch= 9, i= 300, loss(batch)= 0.0778, accuray(batch)= 100.00\n",
|
821 |
+
"epoch= 9, i= 400, loss(batch)= 0.0727, accuray(batch)= 100.00\n",
|
822 |
+
"epoch= 9, i= 500, loss(batch)= 0.0680, accuray(batch)= 100.00\n",
|
823 |
+
"epoch= 9, loss(train)= 0.090, accuracy(train)= 99.169, time= 89.386, lr= 0.03317\n",
|
824 |
+
" accuracy(test) = 99.020 %, time= 9.926\n",
|
825 |
+
"epoch= 10, i= 100, loss(batch)= 0.1055, accuray(batch)= 99.00\n",
|
826 |
+
"epoch= 10, i= 200, loss(batch)= 0.0800, accuray(batch)= 100.00\n",
|
827 |
+
"epoch= 10, i= 300, loss(batch)= 0.0802, accuray(batch)= 99.00\n",
|
828 |
+
"epoch= 10, i= 400, loss(batch)= 0.0751, accuray(batch)= 100.00\n",
|
829 |
+
"epoch= 10, i= 500, loss(batch)= 0.1007, accuray(batch)= 99.00\n",
|
830 |
+
"epoch= 10, loss(train)= 0.083, accuracy(train)= 99.333, time= 89.463, lr= 0.03151\n",
|
831 |
+
" accuracy(test) = 99.190 %, time= 9.909\n",
|
832 |
+
"epoch= 11, i= 100, loss(batch)= 0.0904, accuray(batch)= 98.00\n",
|
833 |
+
"epoch= 11, i= 200, loss(batch)= 0.0698, accuray(batch)= 100.00\n",
|
834 |
+
"epoch= 11, i= 300, loss(batch)= 0.0759, accuray(batch)= 99.00\n",
|
835 |
+
"epoch= 11, i= 400, loss(batch)= 0.0873, accuray(batch)= 99.00\n",
|
836 |
+
"epoch= 11, i= 500, loss(batch)= 0.1021, accuray(batch)= 98.00\n",
|
837 |
+
"epoch= 11, loss(train)= 0.080, accuracy(train)= 99.340, time= 88.944, lr= 0.02994\n",
|
838 |
+
" accuracy(test) = 98.910 %, time= 9.756\n",
|
839 |
+
"epoch= 12, i= 100, loss(batch)= 0.0617, accuray(batch)= 100.00\n",
|
840 |
+
"epoch= 12, i= 200, loss(batch)= 0.0923, accuray(batch)= 99.00\n",
|
841 |
+
"epoch= 12, i= 300, loss(batch)= 0.0951, accuray(batch)= 98.00\n",
|
842 |
+
"epoch= 12, i= 400, loss(batch)= 0.0960, accuray(batch)= 99.00\n",
|
843 |
+
"epoch= 12, i= 500, loss(batch)= 0.0774, accuray(batch)= 99.00\n",
|
844 |
+
"epoch= 12, loss(train)= 0.076, accuracy(train)= 99.431, time= 88.541, lr= 0.02844\n",
|
845 |
+
" accuracy(test) = 99.110 %, time= 9.737\n",
|
846 |
+
"epoch= 13, i= 100, loss(batch)= 0.0574, accuray(batch)= 100.00\n",
|
847 |
+
"epoch= 13, i= 200, loss(batch)= 0.0579, accuray(batch)= 100.00\n",
|
848 |
+
"epoch= 13, i= 300, loss(batch)= 0.0695, accuray(batch)= 100.00\n",
|
849 |
+
"epoch= 13, i= 400, loss(batch)= 0.0741, accuray(batch)= 100.00\n",
|
850 |
+
"epoch= 13, i= 500, loss(batch)= 0.0762, accuray(batch)= 99.00\n",
|
851 |
+
"epoch= 13, loss(train)= 0.072, accuracy(train)= 99.455, time= 88.890, lr= 0.02702\n",
|
852 |
+
" accuracy(test) = 99.070 %, time= 9.904\n",
|
853 |
+
"epoch= 14, i= 100, loss(batch)= 0.0727, accuray(batch)= 99.00\n",
|
854 |
+
"epoch= 14, i= 200, loss(batch)= 0.0621, accuray(batch)= 100.00\n",
|
855 |
+
"epoch= 14, i= 300, loss(batch)= 0.0973, accuray(batch)= 99.00\n",
|
856 |
+
"epoch= 14, i= 400, loss(batch)= 0.0736, accuray(batch)= 100.00\n",
|
857 |
+
"epoch= 14, i= 500, loss(batch)= 0.0742, accuray(batch)= 99.00\n",
|
858 |
+
"epoch= 14, loss(train)= 0.069, accuracy(train)= 99.482, time= 89.169, lr= 0.02567\n",
|
859 |
+
" accuracy(test) = 99.090 %, time= 9.814\n",
|
860 |
+
"epoch= 15, i= 100, loss(batch)= 0.0727, accuray(batch)= 99.00\n",
|
861 |
+
"epoch= 15, i= 200, loss(batch)= 0.0880, accuray(batch)= 98.00\n",
|
862 |
+
"epoch= 15, i= 300, loss(batch)= 0.0589, accuray(batch)= 100.00\n",
|
863 |
+
"epoch= 15, i= 400, loss(batch)= 0.0529, accuray(batch)= 100.00\n",
|
864 |
+
"epoch= 15, i= 500, loss(batch)= 0.0529, accuray(batch)= 100.00\n",
|
865 |
+
"epoch= 15, loss(train)= 0.066, accuracy(train)= 99.567, time= 88.707, lr= 0.02438\n",
|
866 |
+
" accuracy(test) = 99.120 %, time= 9.723\n",
|
867 |
+
"epoch= 16, i= 100, loss(batch)= 0.0523, accuray(batch)= 100.00\n",
|
868 |
+
"epoch= 16, i= 200, loss(batch)= 0.0550, accuray(batch)= 100.00\n",
|
869 |
+
"epoch= 16, i= 300, loss(batch)= 0.0558, accuray(batch)= 100.00\n",
|
870 |
+
"epoch= 16, i= 400, loss(batch)= 0.0682, accuray(batch)= 99.00\n",
|
871 |
+
"epoch= 16, i= 500, loss(batch)= 0.0549, accuray(batch)= 100.00\n",
|
872 |
+
"epoch= 16, loss(train)= 0.065, accuracy(train)= 99.573, time= 88.703, lr= 0.02316\n",
|
873 |
+
" accuracy(test) = 99.040 %, time= 9.811\n",
|
874 |
+
"epoch= 17, i= 100, loss(batch)= 0.0621, accuray(batch)= 100.00\n",
|
875 |
+
"epoch= 17, i= 200, loss(batch)= 0.0651, accuray(batch)= 99.00\n",
|
876 |
+
"epoch= 17, i= 300, loss(batch)= 0.0539, accuray(batch)= 100.00\n",
|
877 |
+
"epoch= 17, i= 400, loss(batch)= 0.0705, accuray(batch)= 99.00\n",
|
878 |
+
"epoch= 17, i= 500, loss(batch)= 0.0695, accuray(batch)= 99.00\n",
|
879 |
+
"epoch= 17, loss(train)= 0.062, accuracy(train)= 99.604, time= 88.800, lr= 0.02201\n",
|
880 |
+
" accuracy(test) = 99.130 %, time= 9.832\n",
|
881 |
+
"epoch= 18, i= 100, loss(batch)= 0.0560, accuray(batch)= 100.00\n",
|
882 |
+
"epoch= 18, i= 200, loss(batch)= 0.0697, accuray(batch)= 99.00\n",
|
883 |
+
"epoch= 18, i= 300, loss(batch)= 0.0637, accuray(batch)= 100.00\n",
|
884 |
+
"epoch= 18, i= 400, loss(batch)= 0.0576, accuray(batch)= 99.00\n",
|
885 |
+
"epoch= 18, i= 500, loss(batch)= 0.0584, accuray(batch)= 100.00\n",
|
886 |
+
"epoch= 18, loss(train)= 0.061, accuracy(train)= 99.653, time= 88.983, lr= 0.02091\n",
|
887 |
+
" accuracy(test) = 99.150 %, time= 9.936\n",
|
888 |
+
"epoch= 19, i= 100, loss(batch)= 0.0710, accuray(batch)= 100.00\n",
|
889 |
+
"epoch= 19, i= 200, loss(batch)= 0.0473, accuray(batch)= 100.00\n",
|
890 |
+
"epoch= 19, i= 300, loss(batch)= 0.0551, accuray(batch)= 100.00\n",
|
891 |
+
"epoch= 19, i= 400, loss(batch)= 0.0506, accuray(batch)= 100.00\n",
|
892 |
+
"epoch= 19, i= 500, loss(batch)= 0.0491, accuray(batch)= 100.00\n",
|
893 |
+
"epoch= 19, loss(train)= 0.059, accuracy(train)= 99.669, time= 89.217, lr= 0.01986\n",
|
894 |
+
" accuracy(test) = 99.160 %, time= 9.873\n",
|
895 |
+
"epoch= 20, i= 100, loss(batch)= 0.0525, accuray(batch)= 100.00\n",
|
896 |
+
"epoch= 20, i= 200, loss(batch)= 0.0482, accuray(batch)= 100.00\n",
|
897 |
+
"epoch= 20, i= 300, loss(batch)= 0.0642, accuray(batch)= 100.00\n",
|
898 |
+
"epoch= 20, i= 400, loss(batch)= 0.0515, accuray(batch)= 100.00\n",
|
899 |
+
"epoch= 20, i= 500, loss(batch)= 0.0504, accuray(batch)= 100.00\n",
|
900 |
+
"epoch= 20, loss(train)= 0.057, accuracy(train)= 99.698, time= 89.425, lr= 0.01887\n",
|
901 |
+
" accuracy(test) = 99.030 %, time= 9.874\n"
|
902 |
+
]
|
903 |
+
}
|
904 |
+
],
|
905 |
+
"source": [
|
906 |
+
"# Optimizer\n",
|
907 |
+
"global_lr = learning_rate\n",
|
908 |
+
"global_step = 0\n",
|
909 |
+
"decay = 0.95\n",
|
910 |
+
"decay_steps = train_size\n",
|
911 |
+
"lr = learning_rate\n",
|
912 |
+
"optimizer = net.update(lr) \n",
|
913 |
+
"\n",
|
914 |
+
"\n",
|
915 |
+
"# loop over epochs\n",
|
916 |
+
"indices = collections.deque()\n",
|
917 |
+
"for epoch in range(num_epochs): # loop over the dataset multiple times\n",
|
918 |
+
"\n",
|
919 |
+
" # reshuffle \n",
|
920 |
+
" indices.extend(np.random.permutation(train_size)) # rand permutation\n",
|
921 |
+
" \n",
|
922 |
+
" # reset time\n",
|
923 |
+
" t_start = time.time()\n",
|
924 |
+
" \n",
|
925 |
+
" # extract batches\n",
|
926 |
+
" running_loss = 0.0\n",
|
927 |
+
" running_accuray = 0\n",
|
928 |
+
" running_total = 0\n",
|
929 |
+
" while len(indices) >= batch_size:\n",
|
930 |
+
" \n",
|
931 |
+
" # extract batches\n",
|
932 |
+
" batch_idx = [indices.popleft() for i in range(batch_size)]\n",
|
933 |
+
" train_x, train_y = train_data[batch_idx,:], train_labels[batch_idx]\n",
|
934 |
+
" train_x = Variable( torch.FloatTensor(train_x).type(dtypeFloat) , requires_grad=False) \n",
|
935 |
+
" train_y = train_y.astype(np.int64)\n",
|
936 |
+
" train_y = torch.LongTensor(train_y).type(dtypeLong)\n",
|
937 |
+
" train_y = Variable( train_y , requires_grad=False) \n",
|
938 |
+
" \n",
|
939 |
+
" # Forward \n",
|
940 |
+
" y = net.forward(train_x, dropout_value, L, lmax)\n",
|
941 |
+
" loss = net.loss(y,train_y,l2_regularization) \n",
|
942 |
+
" loss_train = loss.data\n",
|
943 |
+
" \n",
|
944 |
+
" # Accuracy\n",
|
945 |
+
" acc_train = net.evaluation(y,train_y.data)\n",
|
946 |
+
" \n",
|
947 |
+
" # backward\n",
|
948 |
+
" loss.backward()\n",
|
949 |
+
" \n",
|
950 |
+
" # Update \n",
|
951 |
+
" global_step += batch_size # to update learning rate\n",
|
952 |
+
" optimizer.step()\n",
|
953 |
+
" optimizer.zero_grad()\n",
|
954 |
+
" \n",
|
955 |
+
" # loss, accuracy\n",
|
956 |
+
" running_loss += loss_train\n",
|
957 |
+
" running_accuray += acc_train\n",
|
958 |
+
" running_total += 1\n",
|
959 |
+
" \n",
|
960 |
+
" # print \n",
|
961 |
+
" if not running_total%100: # print every x mini-batches\n",
|
962 |
+
" print('epoch= %d, i= %4d, loss(batch)= %.4f, accuray(batch)= %.2f' % (epoch+1, running_total, loss_train, acc_train))\n",
|
963 |
+
" \n",
|
964 |
+
" \n",
|
965 |
+
" # print \n",
|
966 |
+
" t_stop = time.time() - t_start\n",
|
967 |
+
" print('epoch= %d, loss(train)= %.3f, accuracy(train)= %.3f, time= %.3f, lr= %.5f' % \n",
|
968 |
+
" (epoch+1, running_loss/running_total, running_accuray/running_total, t_stop, lr))\n",
|
969 |
+
" \n",
|
970 |
+
"\n",
|
971 |
+
" # update learning rate \n",
|
972 |
+
" lr = global_lr * pow( decay , float(global_step// decay_steps) )\n",
|
973 |
+
" optimizer = net.update_learning_rate(optimizer, lr)\n",
|
974 |
+
" \n",
|
975 |
+
" \n",
|
976 |
+
" # Test set\n",
|
977 |
+
" running_accuray_test = 0\n",
|
978 |
+
" running_total_test = 0\n",
|
979 |
+
" indices_test = collections.deque()\n",
|
980 |
+
" indices_test.extend(range(test_data.shape[0]))\n",
|
981 |
+
" t_start_test = time.time()\n",
|
982 |
+
" while len(indices_test) >= batch_size:\n",
|
983 |
+
" batch_idx_test = [indices_test.popleft() for i in range(batch_size)]\n",
|
984 |
+
" test_x, test_y = test_data[batch_idx_test,:], test_labels[batch_idx_test]\n",
|
985 |
+
" test_x = Variable( torch.FloatTensor(test_x).type(dtypeFloat) , requires_grad=False) \n",
|
986 |
+
" y = net.forward(test_x, 0.0, L, lmax) \n",
|
987 |
+
" test_y = test_y.astype(np.int64)\n",
|
988 |
+
" test_y = torch.LongTensor(test_y).type(dtypeLong)\n",
|
989 |
+
" test_y = Variable( test_y , requires_grad=False) \n",
|
990 |
+
" acc_test = net.evaluation(y,test_y.data)\n",
|
991 |
+
" running_accuray_test += acc_test\n",
|
992 |
+
" running_total_test += 1\n",
|
993 |
+
" t_stop_test = time.time() - t_start_test\n",
|
994 |
+
" print(' accuracy(test) = %.3f %%, time= %.3f' % (running_accuray_test / running_total_test, t_stop_test))"
|
995 |
+
]
|
996 |
+
},
|
997 |
+
{
|
998 |
+
"cell_type": "markdown",
|
999 |
+
"metadata": {},
|
1000 |
+
"source": [
|
1001 |
+
"## References\n",
|
1002 |
+
"\n",
|
1003 |
+
"- [Convolutional Neural Networks on Graphs with Fast Localized Spectral Filtering](https://arxiv.org/abs/1606.09375)\n",
|
1004 |
+
"- [Xavier Bresson: \"Convolutional Neural Networks on Graphs\"](https://www.youtube.com/watch?v=v3jZRkvIOIM)"
|
1005 |
+
]
|
1006 |
+
}
|
1007 |
+
],
|
1008 |
+
"metadata": {
|
1009 |
+
"accelerator": "GPU",
|
1010 |
+
"colab": {
|
1011 |
+
"name": "Untitled14.ipynb",
|
1012 |
+
"provenance": []
|
1013 |
+
},
|
1014 |
+
"kernelspec": {
|
1015 |
+
"display_name": "Python 3",
|
1016 |
+
"language": "python",
|
1017 |
+
"name": "python3"
|
1018 |
+
},
|
1019 |
+
"language_info": {
|
1020 |
+
"codemirror_mode": {
|
1021 |
+
"name": "ipython",
|
1022 |
+
"version": 3
|
1023 |
+
},
|
1024 |
+
"file_extension": ".py",
|
1025 |
+
"mimetype": "text/x-python",
|
1026 |
+
"name": "python",
|
1027 |
+
"nbconvert_exporter": "python",
|
1028 |
+
"pygments_lexer": "ipython3",
|
1029 |
+
"version": "3.7.5"
|
1030 |
+
}
|
1031 |
+
},
|
1032 |
+
"nbformat": 4,
|
1033 |
+
"nbformat_minor": 1
|
1034 |
+
}
|
Graph/ChebNet/code/coarsening.py
ADDED
@@ -0,0 +1,310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import scipy.sparse
|
3 |
+
import sklearn.metrics
|
4 |
+
|
5 |
+
|
6 |
+
def laplacian(W, normalized=True):
|
7 |
+
"""Return graph Laplacian"""
|
8 |
+
|
9 |
+
# Degree matrix.
|
10 |
+
d = W.sum(axis=0)
|
11 |
+
|
12 |
+
# Laplacian matrix.
|
13 |
+
if not normalized:
|
14 |
+
D = scipy.sparse.diags(d.A.squeeze(), 0)
|
15 |
+
L = D - W
|
16 |
+
else:
|
17 |
+
d += np.spacing(np.array(0, W.dtype))
|
18 |
+
d = 1 / np.sqrt(d)
|
19 |
+
D = scipy.sparse.diags(d.A.squeeze(), 0)
|
20 |
+
I = scipy.sparse.identity(d.size, dtype=W.dtype)
|
21 |
+
L = I - D * W * D
|
22 |
+
|
23 |
+
assert np.abs(L - L.T).mean() < 1e-9
|
24 |
+
assert type(L) is scipy.sparse.csr.csr_matrix
|
25 |
+
return L
|
26 |
+
|
27 |
+
|
28 |
+
|
29 |
+
def rescale_L(L, lmax=2):
|
30 |
+
"""Rescale Laplacian eigenvalues to [-1,1]"""
|
31 |
+
M, M = L.shape
|
32 |
+
I = scipy.sparse.identity(M, format='csr', dtype=L.dtype)
|
33 |
+
L /= lmax * 2
|
34 |
+
L -= I
|
35 |
+
return L
|
36 |
+
|
37 |
+
|
38 |
+
def lmax_L(L):
|
39 |
+
"""Compute largest Laplacian eigenvalue"""
|
40 |
+
return scipy.sparse.linalg.eigsh(L, k=1, which='LM', return_eigenvectors=False)[0]
|
41 |
+
|
42 |
+
|
43 |
+
# graph coarsening with Heavy Edge Matching
|
44 |
+
def coarsen(A, levels):
|
45 |
+
|
46 |
+
graphs, parents = HEM(A, levels)
|
47 |
+
perms = compute_perm(parents)
|
48 |
+
|
49 |
+
laplacians = []
|
50 |
+
for i,A in enumerate(graphs):
|
51 |
+
M, M = A.shape
|
52 |
+
|
53 |
+
if i < levels:
|
54 |
+
A = perm_adjacency(A, perms[i])
|
55 |
+
|
56 |
+
A = A.tocsr()
|
57 |
+
A.eliminate_zeros()
|
58 |
+
Mnew, Mnew = A.shape
|
59 |
+
print('Layer {0}: M_{0} = |V| = {1} nodes ({2} added), |E| = {3} edges'.format(i, Mnew, Mnew-M, A.nnz//2))
|
60 |
+
|
61 |
+
L = laplacian(A, normalized=True)
|
62 |
+
laplacians.append(L)
|
63 |
+
|
64 |
+
return laplacians, perms[0] if len(perms) > 0 else None
|
65 |
+
|
66 |
+
|
67 |
+
def HEM(W, levels, rid=None):
|
68 |
+
"""
|
69 |
+
Coarsen a graph multiple times using the Heavy Edge Matching (HEM).
|
70 |
+
|
71 |
+
Input
|
72 |
+
W: symmetric sparse weight (adjacency) matrix
|
73 |
+
levels: the number of coarsened graphs
|
74 |
+
|
75 |
+
Output
|
76 |
+
graph[0]: original graph of size N_1
|
77 |
+
graph[2]: coarser graph of size N_2 < N_1
|
78 |
+
graph[levels]: coarsest graph of Size N_levels < ... < N_2 < N_1
|
79 |
+
parents[i] is a vector of size N_i with entries ranging from 1 to N_{i+1}
|
80 |
+
which indicate the parents in the coarser graph[i+1]
|
81 |
+
nd_sz{i} is a vector of size N_i that contains the size of the supernode in the graph{i}
|
82 |
+
|
83 |
+
Note
|
84 |
+
if "graph" is a list of length k, then "parents" will be a list of length k-1
|
85 |
+
"""
|
86 |
+
|
87 |
+
N, N = W.shape
|
88 |
+
|
89 |
+
if rid is None:
|
90 |
+
rid = np.random.permutation(range(N))
|
91 |
+
|
92 |
+
ss = np.array(W.sum(axis=0)).squeeze()
|
93 |
+
rid = np.argsort(ss)
|
94 |
+
|
95 |
+
|
96 |
+
parents = []
|
97 |
+
degree = W.sum(axis=0) - W.diagonal()
|
98 |
+
graphs = []
|
99 |
+
graphs.append(W)
|
100 |
+
|
101 |
+
print('Heavy Edge Matching coarsening with Xavier version')
|
102 |
+
|
103 |
+
for _ in range(levels):
|
104 |
+
|
105 |
+
weights = degree # graclus weights
|
106 |
+
weights = np.array(weights).squeeze()
|
107 |
+
|
108 |
+
# PAIR THE VERTICES AND CONSTRUCT THE ROOT VECTOR
|
109 |
+
idx_row, idx_col, val = scipy.sparse.find(W)
|
110 |
+
cc = idx_row
|
111 |
+
rr = idx_col
|
112 |
+
vv = val
|
113 |
+
|
114 |
+
if not (list(cc)==list(np.sort(cc))):
|
115 |
+
tmp=cc
|
116 |
+
cc=rr
|
117 |
+
rr=tmp
|
118 |
+
|
119 |
+
cluster_id = HEM_one_level(cc,rr,vv,rid,weights)
|
120 |
+
parents.append(cluster_id)
|
121 |
+
|
122 |
+
# COMPUTE THE EDGES WEIGHTS FOR THE NEW GRAPH
|
123 |
+
nrr = cluster_id[rr]
|
124 |
+
ncc = cluster_id[cc]
|
125 |
+
nvv = vv
|
126 |
+
Nnew = cluster_id.max() + 1
|
127 |
+
# CSR is more appropriate: row,val pairs appear multiple times
|
128 |
+
W = scipy.sparse.csr_matrix((nvv,(nrr,ncc)), shape=(Nnew,Nnew))
|
129 |
+
W.eliminate_zeros()
|
130 |
+
|
131 |
+
# Add new graph to the list of all coarsened graphs
|
132 |
+
graphs.append(W)
|
133 |
+
N, N = W.shape
|
134 |
+
|
135 |
+
# COMPUTE THE DEGREE (OMIT OR NOT SELF LOOPS)
|
136 |
+
degree = W.sum(axis=0)
|
137 |
+
|
138 |
+
# CHOOSE THE ORDER IN WHICH VERTICES WILL BE VISTED AT THE NEXT PASS
|
139 |
+
ss = np.array(W.sum(axis=0)).squeeze()
|
140 |
+
rid = np.argsort(ss)
|
141 |
+
|
142 |
+
return graphs, parents
|
143 |
+
|
144 |
+
|
145 |
+
# Coarsen a graph given by rr,cc,vv. rr is assumed to be ordered
|
146 |
+
def HEM_one_level(rr,cc,vv,rid,weights):
|
147 |
+
|
148 |
+
nnz = rr.shape[0]
|
149 |
+
N = rr[nnz-1] + 1
|
150 |
+
|
151 |
+
marked = np.zeros(N, np.bool)
|
152 |
+
rowstart = np.zeros(N, np.int32)
|
153 |
+
rowlength = np.zeros(N, np.int32)
|
154 |
+
cluster_id = np.zeros(N, np.int32)
|
155 |
+
|
156 |
+
oldval = rr[0]
|
157 |
+
count = 0
|
158 |
+
clustercount = 0
|
159 |
+
|
160 |
+
for ii in range(nnz):
|
161 |
+
rowlength[count] = rowlength[count] + 1
|
162 |
+
if rr[ii] > oldval:
|
163 |
+
oldval = rr[ii]
|
164 |
+
rowstart[count+1] = ii
|
165 |
+
count = count + 1
|
166 |
+
|
167 |
+
for ii in range(N):
|
168 |
+
tid = rid[ii]
|
169 |
+
if not marked[tid]:
|
170 |
+
wmax = 0.0
|
171 |
+
rs = rowstart[tid]
|
172 |
+
marked[tid] = True
|
173 |
+
bestneighbor = -1
|
174 |
+
for jj in range(rowlength[tid]):
|
175 |
+
nid = cc[rs+jj]
|
176 |
+
if marked[nid]:
|
177 |
+
tval = 0.0
|
178 |
+
else:
|
179 |
+
|
180 |
+
# First approach
|
181 |
+
if 2==1:
|
182 |
+
tval = vv[rs+jj] * (1.0/weights[tid] + 1.0/weights[nid])
|
183 |
+
|
184 |
+
# Second approach
|
185 |
+
if 1==1:
|
186 |
+
Wij = vv[rs+jj]
|
187 |
+
Wii = vv[rowstart[tid]]
|
188 |
+
Wjj = vv[rowstart[nid]]
|
189 |
+
di = weights[tid]
|
190 |
+
dj = weights[nid]
|
191 |
+
tval = (2.*Wij + Wii + Wjj) * 1./(di+dj+1e-9)
|
192 |
+
|
193 |
+
if tval > wmax:
|
194 |
+
wmax = tval
|
195 |
+
bestneighbor = nid
|
196 |
+
|
197 |
+
cluster_id[tid] = clustercount
|
198 |
+
|
199 |
+
if bestneighbor > -1:
|
200 |
+
cluster_id[bestneighbor] = clustercount
|
201 |
+
marked[bestneighbor] = True
|
202 |
+
|
203 |
+
clustercount += 1
|
204 |
+
|
205 |
+
return cluster_id
|
206 |
+
|
207 |
+
|
208 |
+
def compute_perm(parents):
|
209 |
+
"""
|
210 |
+
Return a list of indices to reorder the adjacency and data matrices so
|
211 |
+
that the union of two neighbors from layer to layer forms a binary tree.
|
212 |
+
"""
|
213 |
+
|
214 |
+
# Order of last layer is random (chosen by the clustering algorithm).
|
215 |
+
indices = []
|
216 |
+
if len(parents) > 0:
|
217 |
+
M_last = max(parents[-1]) + 1
|
218 |
+
indices.append(list(range(M_last)))
|
219 |
+
|
220 |
+
for parent in parents[::-1]:
|
221 |
+
|
222 |
+
# Fake nodes go after real ones.
|
223 |
+
pool_singeltons = len(parent)
|
224 |
+
|
225 |
+
indices_layer = []
|
226 |
+
for i in indices[-1]:
|
227 |
+
indices_node = list(np.where(parent == i)[0])
|
228 |
+
assert 0 <= len(indices_node) <= 2
|
229 |
+
|
230 |
+
# Add a node to go with a singelton.
|
231 |
+
if len(indices_node) is 1:
|
232 |
+
indices_node.append(pool_singeltons)
|
233 |
+
pool_singeltons += 1
|
234 |
+
|
235 |
+
# Add two nodes as children of a singelton in the parent.
|
236 |
+
elif len(indices_node) is 0:
|
237 |
+
indices_node.append(pool_singeltons+0)
|
238 |
+
indices_node.append(pool_singeltons+1)
|
239 |
+
pool_singeltons += 2
|
240 |
+
|
241 |
+
indices_layer.extend(indices_node)
|
242 |
+
indices.append(indices_layer)
|
243 |
+
|
244 |
+
# Sanity checks.
|
245 |
+
for i,indices_layer in enumerate(indices):
|
246 |
+
M = M_last*2**i
|
247 |
+
# Reduction by 2 at each layer (binary tree).
|
248 |
+
assert len(indices[0] == M)
|
249 |
+
# The new ordering does not omit an indice.
|
250 |
+
assert sorted(indices_layer) == list(range(M))
|
251 |
+
|
252 |
+
return indices[::-1]
|
253 |
+
|
254 |
+
assert (compute_perm([np.array([4,1,1,2,2,3,0,0,3]),np.array([2,1,0,1,0])])
|
255 |
+
== [[3,4,0,9,1,2,5,8,6,7,10,11],[2,4,1,3,0,5],[0,1,2]])
|
256 |
+
|
257 |
+
|
258 |
+
|
259 |
+
def perm_adjacency(A, indices):
|
260 |
+
"""
|
261 |
+
Permute adjacency matrix, i.e. exchange node ids,
|
262 |
+
so that binary unions form the clustering tree.
|
263 |
+
"""
|
264 |
+
if indices is None:
|
265 |
+
return A
|
266 |
+
|
267 |
+
M, M = A.shape
|
268 |
+
Mnew = len(indices)
|
269 |
+
A = A.tocoo()
|
270 |
+
|
271 |
+
# Add Mnew - M isolated vertices.
|
272 |
+
rows = scipy.sparse.coo_matrix((Mnew-M, M), dtype=np.float32)
|
273 |
+
cols = scipy.sparse.coo_matrix((Mnew, Mnew-M), dtype=np.float32)
|
274 |
+
A = scipy.sparse.vstack([A, rows])
|
275 |
+
A = scipy.sparse.hstack([A, cols])
|
276 |
+
|
277 |
+
# Permute the rows and the columns.
|
278 |
+
perm = np.argsort(indices)
|
279 |
+
A.row = np.array(perm)[A.row]
|
280 |
+
A.col = np.array(perm)[A.col]
|
281 |
+
|
282 |
+
assert np.abs(A - A.T).mean() < 1e-8 # 1e-9
|
283 |
+
assert type(A) is scipy.sparse.coo.coo_matrix
|
284 |
+
return A
|
285 |
+
|
286 |
+
|
287 |
+
|
288 |
+
def perm_data(x, indices):
|
289 |
+
"""
|
290 |
+
Permute data matrix, i.e. exchange node ids,
|
291 |
+
so that binary unions form the clustering tree.
|
292 |
+
"""
|
293 |
+
if indices is None:
|
294 |
+
return x
|
295 |
+
|
296 |
+
N, M = x.shape
|
297 |
+
Mnew = len(indices)
|
298 |
+
assert Mnew >= M
|
299 |
+
xnew = np.empty((N, Mnew))
|
300 |
+
for i,j in enumerate(indices):
|
301 |
+
# Existing vertex, i.e. real data.
|
302 |
+
if j < M:
|
303 |
+
xnew[:,i] = x[:,j]
|
304 |
+
# Fake vertex because of singeltons.
|
305 |
+
# They will stay 0 so that max pooling chooses the singelton.
|
306 |
+
# Or -infty ?
|
307 |
+
else:
|
308 |
+
xnew[:,i] = np.zeros(N)
|
309 |
+
return xnew
|
310 |
+
|
Graph/ChebNet/code/grid_graph.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sklearn
|
2 |
+
import sklearn.metrics
|
3 |
+
import scipy.sparse, scipy.sparse.linalg # scipy.spatial.distance
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
|
7 |
+
def grid_graph(grid_side,number_edges,metric):
|
8 |
+
"""Generate graph of a grid"""
|
9 |
+
z = grid(grid_side)
|
10 |
+
dist, idx = distance_sklearn_metrics(z, k=number_edges, metric=metric)
|
11 |
+
A = adjacency(dist, idx)
|
12 |
+
print("nb edges: ",A.nnz)
|
13 |
+
return A
|
14 |
+
|
15 |
+
|
16 |
+
def grid(m, dtype=np.float32):
|
17 |
+
"""Return coordinates of grid points"""
|
18 |
+
M = m**2
|
19 |
+
x = np.linspace(0,1,m, dtype=dtype)
|
20 |
+
y = np.linspace(0,1,m, dtype=dtype)
|
21 |
+
xx, yy = np.meshgrid(x, y)
|
22 |
+
z = np.empty((M,2), dtype)
|
23 |
+
z[:,0] = xx.reshape(M)
|
24 |
+
z[:,1] = yy.reshape(M)
|
25 |
+
return z
|
26 |
+
|
27 |
+
|
28 |
+
def distance_sklearn_metrics(z, k=4, metric='euclidean'):
|
29 |
+
"""Compute pairwise distances"""
|
30 |
+
d = sklearn.metrics.pairwise.pairwise_distances(z, metric=metric, n_jobs=1)
|
31 |
+
# k-NN
|
32 |
+
idx = np.argsort(d)[:,1:k+1]
|
33 |
+
d.sort()
|
34 |
+
d = d[:,1:k+1]
|
35 |
+
return d, idx
|
36 |
+
|
37 |
+
|
38 |
+
def adjacency(dist, idx):
|
39 |
+
"""Return adjacency matrix of a kNN graph"""
|
40 |
+
M, k = dist.shape
|
41 |
+
assert M, k == idx.shape
|
42 |
+
assert dist.min() >= 0
|
43 |
+
assert dist.max() <= 1
|
44 |
+
|
45 |
+
# Pairwise distances
|
46 |
+
sigma2 = np.mean(dist[:,-1])**2
|
47 |
+
dist = np.exp(- dist**2 / sigma2)
|
48 |
+
|
49 |
+
# Weight matrix
|
50 |
+
I = np.arange(0, M).repeat(k)
|
51 |
+
J = idx.reshape(M*k)
|
52 |
+
V = dist.reshape(M*k)
|
53 |
+
W = scipy.sparse.coo_matrix((V, (I, J)), shape=(M, M))
|
54 |
+
|
55 |
+
# No self-connections
|
56 |
+
W.setdiag(0)
|
57 |
+
|
58 |
+
# Undirected graph
|
59 |
+
bigger = W.T > W
|
60 |
+
W = W - W.multiply(bigger) + W.T.multiply(bigger)
|
61 |
+
|
62 |
+
assert W.nnz % 2 == 0
|
63 |
+
assert np.abs(W - W.T).mean() < 1e-10
|
64 |
+
assert type(W) is scipy.sparse.csr.csr_matrix
|
65 |
+
return W
|
66 |
+
|
67 |
+
|
68 |
+
|
69 |
+
|
Graph/DeepWalk/code/DeepWalk.py
ADDED
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#### Imports ####
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.nn as nn
|
5 |
+
import random
|
6 |
+
|
7 |
+
|
8 |
+
adj_list = [[1,2,3], [0,2,3], [0, 1, 3], [0, 1, 2], [5, 6], [4,6], [4, 5], [1, 3]]
|
9 |
+
size_vertex = len(adj_list) # number of vertices
|
10 |
+
|
11 |
+
#### Hyperparameters ####
|
12 |
+
|
13 |
+
w = 3 # window size
|
14 |
+
d = 2 # embedding size
|
15 |
+
y = 200 # walks per vertex
|
16 |
+
t = 6 # walk length
|
17 |
+
lr = 0.025 # learning rate
|
18 |
+
|
19 |
+
v=[0,1,2,3,4,5,6,7] #labels of available vertices
|
20 |
+
|
21 |
+
|
22 |
+
#### Random Walk ####
|
23 |
+
|
24 |
+
def RandomWalk(node,t):
|
25 |
+
walk = [node] # Walk starts from this node
|
26 |
+
|
27 |
+
for i in range(t-1):
|
28 |
+
node = adj_list[node][random.randint(0,len(adj_list[node])-1)]
|
29 |
+
walk.append(node)
|
30 |
+
|
31 |
+
return walk
|
32 |
+
|
33 |
+
|
34 |
+
class Model(torch.nn.Module):
|
35 |
+
def __init__(self):
|
36 |
+
super(Model, self).__init__()
|
37 |
+
self.phi = nn.Parameter(torch.rand((size_vertex, d), requires_grad=True))
|
38 |
+
self.phi2 = nn.Parameter(torch.rand((d, size_vertex), requires_grad=True))
|
39 |
+
|
40 |
+
|
41 |
+
def forward(self, one_hot):
|
42 |
+
hidden = torch.matmul(one_hot, self.phi)
|
43 |
+
out = torch.matmul(hidden, self.phi2)
|
44 |
+
return out
|
45 |
+
|
46 |
+
model = Model()
|
47 |
+
|
48 |
+
|
49 |
+
def skip_gram(wvi, w):
|
50 |
+
for j in range(len(wvi)):
|
51 |
+
for k in range(max(0,j-w) , min(j+w, len(wvi))):
|
52 |
+
|
53 |
+
#generate one hot vector
|
54 |
+
one_hot = torch.zeros(size_vertex)
|
55 |
+
one_hot[wvi[j]] = 1
|
56 |
+
|
57 |
+
out = model(one_hot)
|
58 |
+
loss = torch.log(torch.sum(torch.exp(out))) - out[wvi[k]]
|
59 |
+
loss.backward()
|
60 |
+
|
61 |
+
for param in model.parameters():
|
62 |
+
param.data.sub_(lr*param.grad)
|
63 |
+
param.grad.data.zero_()
|
64 |
+
|
65 |
+
|
66 |
+
for i in range(y):
|
67 |
+
random.shuffle(v)
|
68 |
+
for vi in v:
|
69 |
+
wvi=RandomWalk(vi,t)
|
70 |
+
skip_gram(wvi, w)
|
71 |
+
|
72 |
+
|
73 |
+
print(model.phi)
|
74 |
+
|
75 |
+
|
76 |
+
#### Hierarchical Softmax ####
|
77 |
+
|
78 |
+
def func_L(w):
|
79 |
+
"""
|
80 |
+
Parameters
|
81 |
+
----------
|
82 |
+
w: Leaf node.
|
83 |
+
|
84 |
+
Returns
|
85 |
+
-------
|
86 |
+
count: The length of path from the root node to the given vertex.
|
87 |
+
"""
|
88 |
+
count=1
|
89 |
+
while(w!=1):
|
90 |
+
count+=1
|
91 |
+
w//=2
|
92 |
+
|
93 |
+
return count
|
94 |
+
|
95 |
+
|
96 |
+
# func_n returns the nth node in the path from the root node to the given vertex
|
97 |
+
def func_n(w, j):
|
98 |
+
li=[w]
|
99 |
+
while(w!=1):
|
100 |
+
w = w//2
|
101 |
+
li.append(w)
|
102 |
+
|
103 |
+
li.reverse()
|
104 |
+
|
105 |
+
return li[j]
|
106 |
+
|
107 |
+
|
108 |
+
def sigmoid(x):
|
109 |
+
out = 1/(1+torch.exp(-x))
|
110 |
+
return out
|
111 |
+
|
112 |
+
|
113 |
+
class HierarchicalModel(torch.nn.Module):
|
114 |
+
|
115 |
+
def __init__(self):
|
116 |
+
super(HierarchicalModel, self).__init__()
|
117 |
+
self.phi = nn.Parameter(torch.rand((size_vertex, d), requires_grad=True))
|
118 |
+
self.prob_tensor = nn.Parameter(torch.rand((2*size_vertex, d), requires_grad=True))
|
119 |
+
|
120 |
+
def forward(self, wi, wo):
|
121 |
+
one_hot = torch.zeros(size_vertex)
|
122 |
+
one_hot[wi] = 1
|
123 |
+
w = size_vertex + wo
|
124 |
+
h = torch.matmul(one_hot,self.phi)
|
125 |
+
p = torch.tensor([1.0])
|
126 |
+
for j in range(1, func_L(w)-1):
|
127 |
+
mult = -1
|
128 |
+
if(func_n(w, j+1)==2*func_n(w, j)): # Left child
|
129 |
+
mult = 1
|
130 |
+
|
131 |
+
p = p*sigmoid(mult*torch.matmul(self.prob_tensor[func_n(w,j)], h))
|
132 |
+
|
133 |
+
return p
|
134 |
+
|
135 |
+
|
136 |
+
hierarchicalModel = HierarchicalModel()
|
137 |
+
|
138 |
+
|
139 |
+
def HierarchicalSkipGram(wvi, w):
|
140 |
+
|
141 |
+
for j in range(len(wvi)):
|
142 |
+
for k in range(max(0,j-w) , min(j+w, len(wvi))):
|
143 |
+
#generate one hot vector
|
144 |
+
|
145 |
+
prob = hierarchicalModel(wvi[j], wvi[k])
|
146 |
+
loss = - torch.log(prob)
|
147 |
+
loss.backward()
|
148 |
+
for param in hierarchicalModel.parameters():
|
149 |
+
param.data.sub_(lr*param.grad)
|
150 |
+
param.grad.data.zero_()
|
151 |
+
|
152 |
+
|
153 |
+
for i in range(y):
|
154 |
+
random.shuffle(v)
|
155 |
+
for vi in v:
|
156 |
+
wvi = RandomWalk(vi,t)
|
157 |
+
HierarchicalSkipGram(wvi, w)
|
158 |
+
|
159 |
+
|
160 |
+
|
161 |
+
for i in range(8):
|
162 |
+
for j in range(8):
|
163 |
+
print((hierarchicalModel(i,j).item()*100)//1, end=' ')
|
164 |
+
print(end = '\n')
|
Graph/DeepWalk/code/DeepWalk_Blog+Code.ipynb
ADDED
@@ -0,0 +1,612 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {},
|
6 |
+
"source": [
|
7 |
+
"# DeepWalk"
|
8 |
+
]
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"cell_type": "markdown",
|
12 |
+
"metadata": {},
|
13 |
+
"source": [
|
14 |
+
"As a part of this blog series and continuing with the tradition of extracting useful graph features by considering the topology of the network graph using machine learning, this blog deals with Deep Walk. This is a simple unsupervised online learning approach, very similar to language modelling used in NLP, where the goal is to generate word embeddings. In this case, generalizing the same concept, it simply tries to learn latent representations of nodes/vertices of a given graph. These graph embeddings which capture neighborhood similarity and community membership can then be used for learning downstream tasks on the graph. \n",
|
15 |
+
"\n",
|
16 |
+
"\n",
|
17 |
+
"![Input Graph to Embdeddings](img/karate_to_embedding.jpg)\n",
|
18 |
+
"\n",
|
19 |
+
"\n",
|
20 |
+
"## Motivation\n",
|
21 |
+
"\n",
|
22 |
+
"Assume a setting, given a graph G where you wish to convert the nodes into embedding vectors and the only information about a node are the indices of the nodes to which it is connected (adjacency matrix). Since there is no initial feature matrix corresponding to the data, we will construct a feature matrix which will have all the randomly selected nodes. There can be multiple methods to select these but here we will be assuming that they are normally sampled (though it won't make much of a difference even if they are taken from some other distribution).\n",
|
23 |
+
"\n",
|
24 |
+
"\n",
|
25 |
+
"## Random Walks\n",
|
26 |
+
"\n",
|
27 |
+
"Random walk rooted at vertex $v_i$ as $W_{v_i}$. It is a stochastic process with random variables ${W^1}_{v_i}$, ${W^2}_{v_i}$, $. . .$, ${W^k}_{v_i}$ such that ${W^{k+1}}{v_i}$ is a vertex chosen at random from the\n",
|
28 |
+
"neighbors of vertex $v_k$. Random Walk distances are good features for many problems. We'll be discussing how these short random walks are analogous to the sentences in the language modelling setting and how we can carry the concept of context windows to graphs as well.\n",
|
29 |
+
"\n",
|
30 |
+
"\n",
|
31 |
+
"## What is Power Law?\n",
|
32 |
+
"\n",
|
33 |
+
"A scale-free network is a network whose degree distribution follows a power law, at least asymptotically. That is, the fraction $P(k)$ of nodes in the network having $k$ connections to other nodes goes for large values of $k$ as\n",
|
34 |
+
"$P(k) \\sim k^{-\\gamma}$ where $k=2,3$ etc.\n",
|
35 |
+
"\n",
|
36 |
+
"![Power Law Graph](./Power_Law_Graph.gif)\n",
|
37 |
+
"\n",
|
38 |
+
"The network of global banking activity with nodes representing the absolute size of assets booked in the respective jurisdiction and the edges between them the exchange of financial assets, with data taken from the IMF is a scale free network and follows Power Law. We can then see clearly how a very few core nodes dominate this network, there are approximately 200 countries in the world but these 19 largest jurisdictions in terms of capital together are responsible for over 90% of the assets.\n",
|
39 |
+
"\n",
|
40 |
+
"<img src=\"img/Power_Law_Example.jpg\" alt=\"Input Graph to Embdeddings\" width=\"600\"/>\n",
|
41 |
+
"\n",
|
42 |
+
"These highly centralized networks are more formally called scale free or power law networks, that describe a power or exponential relationship between the degree of connectivity a node has and the frequency of its occurrence. [More](https://www.youtube.com/watch?v=qmCrtuS9vtU) about centralized networks and power law.\n",
|
43 |
+
"\n",
|
44 |
+
"### Why is it important here?\n",
|
45 |
+
"\n",
|
46 |
+
"Social networks, including collaboration networks, computer networks, financial networks and Protein-protein interaction networks are some examples of networks claimed to be scale-free.\n",
|
47 |
+
"\n",
|
48 |
+
"According to the authors, \"If the degree distribution of a connected graph follows a power law (i.e. scale-free), we observe that the frequency which vertices appear in the short random walks will also follow a power-law distribution. Word frequency in natural language follows a similar distribution, and techniques from language modeling account for this distributional behavior.\"\n",
|
49 |
+
"\n",
|
50 |
+
"![NLP vs Graph Random Walks Power Law D](img/NLP_vs_Graph.jpg)\n",
|
51 |
+
"*$(a)$ comes from a series of short random walks on a scale-free graph, and $(b)$ comes from the text of 100,000 articles from the English Wikipedia.*\n",
|
52 |
+
"\n",
|
53 |
+
"\n",
|
54 |
+
"## Intuition with SkipGram\n",
|
55 |
+
"\n",
|
56 |
+
"Think about the below unrelated problem for now:-\n",
|
57 |
+
"\n",
|
58 |
+
"Given, some english sentences (could be any other language, doesn't matter) you need to find a vector corresponding to each word appearing at least once in the sentence such that the words having similar meaning appear close to each other in their vector space, and the opposite must hold for words which are dissimilar.\n",
|
59 |
+
"\n",
|
60 |
+
"Suppose the sentences are\n",
|
61 |
+
"1. Hi, I am Bert.\n",
|
62 |
+
"2. Hello, this is Bert.\n",
|
63 |
+
"\n",
|
64 |
+
"From the above sentences you can see that 1 and 2 are related to each other, so even if someone does'nt know the language, one can make out that the words 'Hi' and 'Hello' have roughly the same meaning. We will be using a technique similar to what a human uses while trying to find out related words. Yes! We'll be guessing the meaning based on the words which are common between the sentences. Mathematically, learning a representation in word-2-vec means learning a mapping function from the word co-occurences, and that is exactly what we are heading for.\n",
|
65 |
+
"\n",
|
66 |
+
"#### But, How?\n",
|
67 |
+
"\n",
|
68 |
+
"First lets git rid of the punctuations and assign a random vector to each word. Now since these vectors are assigned randomly, it implies the current representation is useless. We'll use our good old friend, *probability*, to convert these into meaningful representations. The idea is to maximize the probability of the appearence of a word, given the words that appear around it. Let's assume the probability is given by $P(x|y)$ where $y$ is the set of words that appear in the same sentence in which $x$ occurs. Remember we are only taking one sentence at a time, so first we'll maximize the probability of 'Hi' given {'I', 'am', 'Bert'} , then we'll maximize the probability of 'I' given {'Hi', 'am', 'Bert'}. We will do it for each word in the first sentence, and then for the second sentence. Repeat this procedure for all the sentences over and over again until the feature vectors have converged. \n",
|
69 |
+
"\n",
|
70 |
+
"One question that may arise now is, 'How do these feature vectors relate with the probability?'. The answer is that in the probability function we'll utilize the word vectors assinged to them. But, aren't those vectors random? Ahh, they are at the start, but we promise you by the end of the blog they would have converged to the values which really gives some meaning to those seamingly random numbers.\n",
|
71 |
+
"\n",
|
72 |
+
"#### So, What exactly the probability function helps us with?\n",
|
73 |
+
"\n",
|
74 |
+
"What does it mean to find the probability of a vector given other vectors? This actually is a simple question with a pretty simple answer, take it as a fill in the blank problem that you may have dealt with in the primary school,\n",
|
75 |
+
"\n",
|
76 |
+
"Roses ____ red.\n",
|
77 |
+
"\n",
|
78 |
+
"What is the most likely guess? Most people will fill it with an 'are'. (Unless, you are pretending to be oversmart in an attempt to prove how cool you are). You were able to fill that, because, you've seen some examples of the word 'are' previously in life which help you with the context. The probability function is also trying to do the same, it is finding out the word which is most likely to occur given the words that are surrounding it.\n",
|
79 |
+
"\n",
|
80 |
+
"\n",
|
81 |
+
"#### But but this still doesn't explain how it's gonna do that.\n",
|
82 |
+
"\n",
|
83 |
+
"In case you guessed 'Neural Network', you are correct. In this blog we'll be using neural nets (feeling sleepy now, so let's wrap this up)\n",
|
84 |
+
"\n",
|
85 |
+
"It is not necesary to use neural nets to estimate the probability funciton but it works and looks cool :P, frankly, the authors used it, so we'll follow them.\n",
|
86 |
+
"\n",
|
87 |
+
"The input layer will have |V| neurons, where |V| is the number of words that are interesting to us. We will be using only one hidden layer for simplicity. It can have as many neurons as you want, but it is suggested to keep a number that is less than the number of words in the vocabulary. The output layer will also have the |V| neurons.\n",
|
88 |
+
"\n",
|
89 |
+
"Now let's move on to the interpretation of input and output layers (don't care about the hidden layer).\n",
|
90 |
+
"Lets suppose the words in the vocabulary are $V_1$, $V_2$, $...$ $V_i$, $....$ $V_n$. Assume that out of these V4,V7, V9 appears along with the word whose probability we are tying to maximise. so the input layers will have the 4th, 7th, and the 9th neuron with value 1 and all other will have the value 0. The hidden layer will then have some function of these values. The hidden layer have no non linear acitvation. The |V| neuron in the output layer will have a score, the higher it is ,the higher the chances of that word appearning along with the surrounding words. Apply Sigmoid, boom! we got the probabilities. \n",
|
91 |
+
"\n",
|
92 |
+
"So a simple neural network will help us solve the fill in the blank problem.\n",
|
93 |
+
"\n",
|
94 |
+
"\n",
|
95 |
+
"## Deep Walk = SkipGram Analogy + Random Walks\n",
|
96 |
+
"\n",
|
97 |
+
"These random walks can be thought of as short sentences and phrases in a special language; the direct analog is to estimate the likelihood of observing vertex $v_i$ given all the previous vertices visited so far in the random walk, i.e. Our goal is to learn a latent representation, not only a probability distribution of node co-occurrences, and so we introduce a mapping function $ Φ: v ∈ V→R^{|V|×d} $. This mapping $Φ$ represents the latent social representation associated with each vertex $v$ in the graph. (In practice, we represent $Φ$ by a $|V|×d$ matrix of free parameters, which will serve later on as our $X_E$).\n",
|
98 |
+
"\n",
|
99 |
+
"The problem then, is to estimate the likelihood: $ Pr ({v}_{i} | Φ(v1), Φ(v2), · · · , Φ(vi−1))) $\n",
|
100 |
+
"\n",
|
101 |
+
"In simple words *DeepWalk* algorithm uses the notion of Random Walks to get the surrounding nodes(words) and ultimately calulate the probability given the context nodes. In simple words we use random walk to start at a node, finds out all the nodes which have and edge connecting with this start node and randomly select one out of them, then consider this new node as the start node and repeat the procedue after n iterations you will have traversed n nodes (some of them might repeat, but it does not matter as is the case of words in a sentence which may repeat as well). We will take n nodes as the surrounding nodes for the original node and will try to maximize probability with respect to those using the probability function estimate. \n",
|
102 |
+
"\n",
|
103 |
+
"*So, that is for you Ladies and Gentlemen , the <b>'DeepWalk'</b> model.*\n",
|
104 |
+
"\n",
|
105 |
+
"Mathematically the Deep Walk algorithm is defined as follows,\n",
|
106 |
+
"\n",
|
107 |
+
"![Deep Walk Algorithm](img/DeepWalk_Algo.jpg)"
|
108 |
+
]
|
109 |
+
},
|
110 |
+
{
|
111 |
+
"cell_type": "markdown",
|
112 |
+
"metadata": {},
|
113 |
+
"source": [
|
114 |
+
"## PyTorch Implementation of DeepWalk"
|
115 |
+
]
|
116 |
+
},
|
117 |
+
{
|
118 |
+
"cell_type": "markdown",
|
119 |
+
"metadata": {},
|
120 |
+
"source": [
|
121 |
+
"Here we will use using the following graph as an example to implement Deep Walk on,\n",
|
122 |
+
"![Example Graph](./graph.png)\n",
|
123 |
+
"\n",
|
124 |
+
"As you can see there are two connected components, so we can expect than when we create the vectors for each node, the vectors of [1 , 2, 3, 7] should be close and similarly that of [4, 5, 6] should be close. Also if any two vectors are from different group then their vectors should also be far away."
|
125 |
+
]
|
126 |
+
},
|
127 |
+
{
|
128 |
+
"cell_type": "markdown",
|
129 |
+
"metadata": {},
|
130 |
+
"source": [
|
131 |
+
"Here we will we representing the graph using the adjacency list representation. Make sure that you are able to understand that the given graph and this adjacency list are equivalent."
|
132 |
+
]
|
133 |
+
},
|
134 |
+
{
|
135 |
+
"cell_type": "code",
|
136 |
+
"execution_count": 1,
|
137 |
+
"metadata": {},
|
138 |
+
"outputs": [],
|
139 |
+
"source": [
|
140 |
+
"adj_list = [[1,2,3], [0,2,3], [0, 1, 3], [0, 1, 2], [5, 6], [4,6], [4, 5], [1, 3]]\n",
|
141 |
+
"size_vertex = len(adj_list) # number of vertices"
|
142 |
+
]
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"cell_type": "markdown",
|
146 |
+
"metadata": {},
|
147 |
+
"source": [
|
148 |
+
"## Imports"
|
149 |
+
]
|
150 |
+
},
|
151 |
+
{
|
152 |
+
"cell_type": "code",
|
153 |
+
"execution_count": 2,
|
154 |
+
"metadata": {},
|
155 |
+
"outputs": [],
|
156 |
+
"source": [
|
157 |
+
"import torch\n",
|
158 |
+
"import torch.nn as nn\n",
|
159 |
+
"import random"
|
160 |
+
]
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"cell_type": "markdown",
|
164 |
+
"metadata": {},
|
165 |
+
"source": [
|
166 |
+
"## Hyperparameters"
|
167 |
+
]
|
168 |
+
},
|
169 |
+
{
|
170 |
+
"cell_type": "code",
|
171 |
+
"execution_count": 3,
|
172 |
+
"metadata": {},
|
173 |
+
"outputs": [],
|
174 |
+
"source": [
|
175 |
+
"w=3 # window size\n",
|
176 |
+
"d=2 # embedding size\n",
|
177 |
+
"y=200 # walks per vertex\n",
|
178 |
+
"t=6 # walk length \n",
|
179 |
+
"lr=0.025 # learning rate"
|
180 |
+
]
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"cell_type": "code",
|
184 |
+
"execution_count": 4,
|
185 |
+
"metadata": {},
|
186 |
+
"outputs": [],
|
187 |
+
"source": [
|
188 |
+
"v=[0,1,2,3,4,5,6,7] #labels of available vertices"
|
189 |
+
]
|
190 |
+
},
|
191 |
+
{
|
192 |
+
"cell_type": "markdown",
|
193 |
+
"metadata": {},
|
194 |
+
"source": [
|
195 |
+
"## Random Walk"
|
196 |
+
]
|
197 |
+
},
|
198 |
+
{
|
199 |
+
"cell_type": "code",
|
200 |
+
"execution_count": 5,
|
201 |
+
"metadata": {},
|
202 |
+
"outputs": [],
|
203 |
+
"source": [
|
204 |
+
"def RandomWalk(node,t):\n",
|
205 |
+
" walk = [node] # Walk starts from this node\n",
|
206 |
+
" \n",
|
207 |
+
" for i in range(t-1):\n",
|
208 |
+
" node = adj_list[node][random.randint(0,len(adj_list[node])-1)]\n",
|
209 |
+
" walk.append(node)\n",
|
210 |
+
"\n",
|
211 |
+
" return walk"
|
212 |
+
]
|
213 |
+
},
|
214 |
+
{
|
215 |
+
"cell_type": "markdown",
|
216 |
+
"metadata": {},
|
217 |
+
"source": [
|
218 |
+
"## Skipgram\n",
|
219 |
+
"\n",
|
220 |
+
"The skipgram model is closely related to the CBOW model that we just covered. In the CBOW model we have to maximise the probability of the word given its surrounding word using a neural network. And when the probability is maximised, the weights learnt from the input to hidden layer are the word vectors of the given words. In the skipgram word we will be using a using single word to maximise the probability of the surrounding words. This can be done by using a neural network that looks like the mirror image of the network that we used for the CBOW. And in the end the weights of the input to hidden layer will be the corresponding word vectors.\n",
|
221 |
+
"\n",
|
222 |
+
"Now let's analyze the complexity.\n",
|
223 |
+
"There are |V| words in the vocabulary so for each iteration we will be modifying a total of |V| vectors. This is very complex, usually the vocabulary size is in million and since we usually need millions of iteration before convergence, this can take a long long time to run.\n",
|
224 |
+
"\n",
|
225 |
+
"We will soon be discussing some methods like Hierarchical Softmax or negative sampling to reduce this complexity. But, first we'll code for a simple skipgram model. The class defines the model, whereas the function 'skip_gram' takes care of the training loop."
|
226 |
+
]
|
227 |
+
},
|
228 |
+
{
|
229 |
+
"cell_type": "code",
|
230 |
+
"execution_count": 6,
|
231 |
+
"metadata": {},
|
232 |
+
"outputs": [],
|
233 |
+
"source": [
|
234 |
+
"class Model(torch.nn.Module):\n",
|
235 |
+
" def __init__(self):\n",
|
236 |
+
" super(Model, self).__init__()\n",
|
237 |
+
" self.phi = nn.Parameter(torch.rand((size_vertex, d), requires_grad=True)) \n",
|
238 |
+
" self.phi2 = nn.Parameter(torch.rand((d, size_vertex), requires_grad=True))\n",
|
239 |
+
" \n",
|
240 |
+
" \n",
|
241 |
+
" def forward(self, one_hot):\n",
|
242 |
+
" hidden = torch.matmul(one_hot, self.phi)\n",
|
243 |
+
" out = torch.matmul(hidden, self.phi2)\n",
|
244 |
+
" return out"
|
245 |
+
]
|
246 |
+
},
|
247 |
+
{
|
248 |
+
"cell_type": "code",
|
249 |
+
"execution_count": 7,
|
250 |
+
"metadata": {},
|
251 |
+
"outputs": [],
|
252 |
+
"source": [
|
253 |
+
"model = Model()"
|
254 |
+
]
|
255 |
+
},
|
256 |
+
{
|
257 |
+
"cell_type": "code",
|
258 |
+
"execution_count": 8,
|
259 |
+
"metadata": {},
|
260 |
+
"outputs": [],
|
261 |
+
"source": [
|
262 |
+
"def skip_gram(wvi, w):\n",
|
263 |
+
" for j in range(len(wvi)):\n",
|
264 |
+
" for k in range(max(0,j-w) , min(j+w, len(wvi))):\n",
|
265 |
+
" \n",
|
266 |
+
" #generate one hot vector\n",
|
267 |
+
" one_hot = torch.zeros(size_vertex)\n",
|
268 |
+
" one_hot[wvi[j]] = 1\n",
|
269 |
+
" \n",
|
270 |
+
" out = model(one_hot)\n",
|
271 |
+
" loss = torch.log(torch.sum(torch.exp(out))) - out[wvi[k]]\n",
|
272 |
+
" loss.backward()\n",
|
273 |
+
" \n",
|
274 |
+
" for param in model.parameters():\n",
|
275 |
+
" param.data.sub_(lr*param.grad)\n",
|
276 |
+
" param.grad.data.zero_()"
|
277 |
+
]
|
278 |
+
},
|
279 |
+
{
|
280 |
+
"cell_type": "code",
|
281 |
+
"execution_count": 9,
|
282 |
+
"metadata": {},
|
283 |
+
"outputs": [],
|
284 |
+
"source": [
|
285 |
+
"for i in range(y):\n",
|
286 |
+
" random.shuffle(v)\n",
|
287 |
+
" for vi in v:\n",
|
288 |
+
" wvi=RandomWalk(vi,t)\n",
|
289 |
+
" skip_gram(wvi, w)"
|
290 |
+
]
|
291 |
+
},
|
292 |
+
{
|
293 |
+
"cell_type": "markdown",
|
294 |
+
"metadata": {},
|
295 |
+
"source": [
|
296 |
+
"i'th row of the model.phi corresponds to vector of the i'th node. As you can see the vectors of [0, 1, 2,3 , 7] are very close, whereas their vector are much different from the vectors corresponding to [4, 5, 6]."
|
297 |
+
]
|
298 |
+
},
|
299 |
+
{
|
300 |
+
"cell_type": "code",
|
301 |
+
"execution_count": 10,
|
302 |
+
"metadata": {},
|
303 |
+
"outputs": [
|
304 |
+
{
|
305 |
+
"name": "stdout",
|
306 |
+
"output_type": "stream",
|
307 |
+
"text": [
|
308 |
+
"Parameter containing:\n",
|
309 |
+
"tensor([[ 1.2371, 0.3519],\n",
|
310 |
+
" [ 1.0416, -0.1595],\n",
|
311 |
+
" [ 1.4024, -0.2323],\n",
|
312 |
+
" [ 1.2611, -0.5249],\n",
|
313 |
+
" [-1.1221, 0.8553],\n",
|
314 |
+
" [-0.9691, 1.1747],\n",
|
315 |
+
" [-1.3842, 0.4503],\n",
|
316 |
+
" [ 0.2370, -1.2395]], requires_grad=True)\n"
|
317 |
+
]
|
318 |
+
}
|
319 |
+
],
|
320 |
+
"source": [
|
321 |
+
"print(model.phi)"
|
322 |
+
]
|
323 |
+
},
|
324 |
+
{
|
325 |
+
"cell_type": "markdown",
|
326 |
+
"metadata": {},
|
327 |
+
"source": [
|
328 |
+
"Now we will be discussing a variant of the above using Hierarchical softmax."
|
329 |
+
]
|
330 |
+
},
|
331 |
+
{
|
332 |
+
"cell_type": "markdown",
|
333 |
+
"metadata": {},
|
334 |
+
"source": [
|
335 |
+
"## Hierarchical Softmax"
|
336 |
+
]
|
337 |
+
},
|
338 |
+
{
|
339 |
+
"cell_type": "markdown",
|
340 |
+
"metadata": {},
|
341 |
+
"source": [
|
342 |
+
"As we have seen in the skip-gram model that the probability of any outcome depends on the total outcomes of our model. If you haven't noticed this yet, let us explain you how!\n",
|
343 |
+
"\n",
|
344 |
+
"When we calculate the probability of an outcome using softmax, this probability depends on the number of model parameters via the normalisation constant(denominator term) in the softmax.\n",
|
345 |
+
"\n",
|
346 |
+
"$\\text{Softmax}(x_{i}) = \\frac{\\exp(x_i)}{\\sum_j \\exp(x_j)}$\n",
|
347 |
+
"\n",
|
348 |
+
"And the number of such parameters are linear in the total number of outcomes. It means if we are dealing with a very large graphical structure, it can be computationally very expensive and taking a lot of time.\n",
|
349 |
+
"\n",
|
350 |
+
"### Can we somehow overcome this challenge?\n",
|
351 |
+
"Obviously, Yes! (because we're asking at this stage). \n",
|
352 |
+
"\n",
|
353 |
+
"\\*Drum roll please\\*\n",
|
354 |
+
"\n",
|
355 |
+
"<b>Enter \"Hierarchical Softmax(hs)\"</b>.\n",
|
356 |
+
"\n",
|
357 |
+
"Basically, hs is an alternative approximation to the softmax in which the probability of any one outcome depends on a number of model parameters that is only logarithmic in the total number of outcomes.\n",
|
358 |
+
"\n",
|
359 |
+
"Hierarchical softmax uses a binary tree to represent all the words(nodes) in the vocabulary. Each leaf of the tree is a node of our graph, and there is a unique path from root to the leaf. Each intermediate node of tree explicitly represents the relative probabilities of its child nodes. So these nodes are associated to different vectors which our model is going to learn."
|
360 |
+
]
|
361 |
+
},
|
362 |
+
{
|
363 |
+
"cell_type": "markdown",
|
364 |
+
"metadata": {},
|
365 |
+
"source": [
|
366 |
+
"The idea behind decomposing the output layer into binary tree is to reduce the time complexity to obtain \n",
|
367 |
+
"probability distribution from $O(V)$ to $O(log(V))$"
|
368 |
+
]
|
369 |
+
},
|
370 |
+
{
|
371 |
+
"cell_type": "markdown",
|
372 |
+
"metadata": {},
|
373 |
+
"source": [
|
374 |
+
"Let us understand the process with an example.\n",
|
375 |
+
"\n",
|
376 |
+
"![binary tree](img/tree.png)"
|
377 |
+
]
|
378 |
+
},
|
379 |
+
{
|
380 |
+
"cell_type": "markdown",
|
381 |
+
"metadata": {},
|
382 |
+
"source": [
|
383 |
+
"In this example, leaf nodes represent the original nodes of our graph. The highlighted nodes and edges make a path from root to an example leaf node $w_2$.\n",
|
384 |
+
"\n",
|
385 |
+
"Here, length of the path $L(w_{2}) = 4$.\n",
|
386 |
+
"\n",
|
387 |
+
"$n(w, j)$ means the $j^{th}$ node on the path from root to a leaf node $w$."
|
388 |
+
]
|
389 |
+
},
|
390 |
+
{
|
391 |
+
"cell_type": "markdown",
|
392 |
+
"metadata": {},
|
393 |
+
"source": [
|
394 |
+
"Now, view this tree as a decision process, or a random walk, that begins at the root of the tree and descents towards the leaf nodes at each step. It turns out that the probability of each outcome in the original distribution uniquely determines the transition probabilities of this random walk. If you want to go from root node to $w_2$(say), first you have to take a left turn, again left turn and then right turn. \n",
|
395 |
+
"\n",
|
396 |
+
"Let's denote the probability of going left at an intermediate node $n$ as $p(n,left)$ and probability of going right as $p(n,right)$. So we can define the probabilty of going to $w_2$ as follows.\n",
|
397 |
+
"\n",
|
398 |
+
"<b> $P(w2|wi) = p(n(w_{2}, 1), left) . p(n(w_{2}, 2),left) . p(n(w_{2}, 3), right)$ </b>"
|
399 |
+
]
|
400 |
+
},
|
401 |
+
{
|
402 |
+
"cell_type": "markdown",
|
403 |
+
"metadata": {},
|
404 |
+
"source": [
|
405 |
+
"Above process implies that the cost for computing the loss function and its gradient will be proportional to the number of nodes $(V)$ in the intermediate path between root node and the output node, which on average is no greater than $log(V)$. That's nice! Isn't it? In the case where we deal with a large number of outcomes, there will be a huge difference in the computational cost of 'vanilla' softmax and hierarchical softmax.\n",
|
406 |
+
"\n",
|
407 |
+
"Implementation remains similar to the vanilla, except that we will only need to change the Model class by HierarchicalModel class, which is defined below."
|
408 |
+
]
|
409 |
+
},
|
410 |
+
{
|
411 |
+
"cell_type": "code",
|
412 |
+
"execution_count": 11,
|
413 |
+
"metadata": {},
|
414 |
+
"outputs": [],
|
415 |
+
"source": [
|
416 |
+
"def func_L(w):\n",
|
417 |
+
" \"\"\"\n",
|
418 |
+
" Parameters\n",
|
419 |
+
" ----------\n",
|
420 |
+
" w: Leaf node.\n",
|
421 |
+
" \n",
|
422 |
+
" Returns\n",
|
423 |
+
" -------\n",
|
424 |
+
" count: The length of path from the root node to the given vertex.\n",
|
425 |
+
" \"\"\"\n",
|
426 |
+
" count=1\n",
|
427 |
+
" while(w!=1):\n",
|
428 |
+
" count+=1\n",
|
429 |
+
" w//=2\n",
|
430 |
+
"\n",
|
431 |
+
" return count"
|
432 |
+
]
|
433 |
+
},
|
434 |
+
{
|
435 |
+
"cell_type": "code",
|
436 |
+
"execution_count": 12,
|
437 |
+
"metadata": {},
|
438 |
+
"outputs": [],
|
439 |
+
"source": [
|
440 |
+
"# func_n returns the nth node in the path from the root node to the given vertex\n",
|
441 |
+
"def func_n(w, j):\n",
|
442 |
+
" li=[w]\n",
|
443 |
+
" while(w!=1):\n",
|
444 |
+
" w = w//2\n",
|
445 |
+
" li.append(w)\n",
|
446 |
+
"\n",
|
447 |
+
" li.reverse()\n",
|
448 |
+
" \n",
|
449 |
+
" return li[j]"
|
450 |
+
]
|
451 |
+
},
|
452 |
+
{
|
453 |
+
"cell_type": "code",
|
454 |
+
"execution_count": 13,
|
455 |
+
"metadata": {},
|
456 |
+
"outputs": [],
|
457 |
+
"source": [
|
458 |
+
"def sigmoid(x):\n",
|
459 |
+
" out = 1/(1+torch.exp(-x))\n",
|
460 |
+
" return out"
|
461 |
+
]
|
462 |
+
},
|
463 |
+
{
|
464 |
+
"cell_type": "code",
|
465 |
+
"execution_count": 14,
|
466 |
+
"metadata": {},
|
467 |
+
"outputs": [],
|
468 |
+
"source": [
|
469 |
+
"class HierarchicalModel(torch.nn.Module):\n",
|
470 |
+
" \n",
|
471 |
+
" def __init__(self):\n",
|
472 |
+
" super(HierarchicalModel, self).__init__()\n",
|
473 |
+
" self.phi = nn.Parameter(torch.rand((size_vertex, d), requires_grad=True)) \n",
|
474 |
+
" self.prob_tensor = nn.Parameter(torch.rand((2*size_vertex, d), requires_grad=True))\n",
|
475 |
+
" \n",
|
476 |
+
" def forward(self, wi, wo):\n",
|
477 |
+
" one_hot = torch.zeros(size_vertex)\n",
|
478 |
+
" one_hot[wi] = 1\n",
|
479 |
+
" w = size_vertex + wo\n",
|
480 |
+
" h = torch.matmul(one_hot,self.phi)\n",
|
481 |
+
" p = torch.tensor([1.0])\n",
|
482 |
+
" for j in range(1, func_L(w)-1):\n",
|
483 |
+
" mult = -1\n",
|
484 |
+
" if(func_n(w, j+1)==2*func_n(w, j)): # Left child\n",
|
485 |
+
" mult = 1\n",
|
486 |
+
" \n",
|
487 |
+
" p = p*sigmoid(mult*torch.matmul(self.prob_tensor[func_n(w,j)], h))\n",
|
488 |
+
" \n",
|
489 |
+
" return p"
|
490 |
+
]
|
491 |
+
},
|
492 |
+
{
|
493 |
+
"cell_type": "markdown",
|
494 |
+
"metadata": {},
|
495 |
+
"source": [
|
496 |
+
"The input to hidden weight vector no longer represents the vector corresponding to each vector , so directly trying to read it will not provide any valuable insight, a better option is to predict the probability of different vectors against each other to figure out the likelihood of coexistance of the nodes."
|
497 |
+
]
|
498 |
+
},
|
499 |
+
{
|
500 |
+
"cell_type": "code",
|
501 |
+
"execution_count": 15,
|
502 |
+
"metadata": {},
|
503 |
+
"outputs": [],
|
504 |
+
"source": [
|
505 |
+
"hierarchicalModel = HierarchicalModel()"
|
506 |
+
]
|
507 |
+
},
|
508 |
+
{
|
509 |
+
"cell_type": "code",
|
510 |
+
"execution_count": 16,
|
511 |
+
"metadata": {},
|
512 |
+
"outputs": [],
|
513 |
+
"source": [
|
514 |
+
"def HierarchicalSkipGram(wvi, w):\n",
|
515 |
+
" \n",
|
516 |
+
" for j in range(len(wvi)):\n",
|
517 |
+
" for k in range(max(0,j-w) , min(j+w, len(wvi))):\n",
|
518 |
+
" #generate one hot vector\n",
|
519 |
+
" \n",
|
520 |
+
" prob = hierarchicalModel(wvi[j], wvi[k])\n",
|
521 |
+
" loss = - torch.log(prob)\n",
|
522 |
+
" loss.backward()\n",
|
523 |
+
" for param in hierarchicalModel.parameters():\n",
|
524 |
+
" param.data.sub_(lr*param.grad)\n",
|
525 |
+
" param.grad.data.zero_()"
|
526 |
+
]
|
527 |
+
},
|
528 |
+
{
|
529 |
+
"cell_type": "code",
|
530 |
+
"execution_count": 17,
|
531 |
+
"metadata": {},
|
532 |
+
"outputs": [],
|
533 |
+
"source": [
|
534 |
+
"for i in range(y):\n",
|
535 |
+
" random.shuffle(v)\n",
|
536 |
+
" for vi in v:\n",
|
537 |
+
" wvi = RandomWalk(vi,t)\n",
|
538 |
+
" HierarchicalSkipGram(wvi, w)"
|
539 |
+
]
|
540 |
+
},
|
541 |
+
{
|
542 |
+
"cell_type": "code",
|
543 |
+
"execution_count": 18,
|
544 |
+
"metadata": {},
|
545 |
+
"outputs": [
|
546 |
+
{
|
547 |
+
"name": "stdout",
|
548 |
+
"output_type": "stream",
|
549 |
+
"text": [
|
550 |
+
"24.0 28.0 23.0 22.0 14.0 8.0 5.0 70.0 \n",
|
551 |
+
"24.0 31.0 23.0 21.0 8.0 3.0 1.0 86.0 \n",
|
552 |
+
"22.0 25.0 25.0 26.0 15.0 11.0 2.0 69.0 \n",
|
553 |
+
"19.0 23.0 26.0 31.0 10.0 7.0 0.0 81.0 \n",
|
554 |
+
"36.0 33.0 18.0 12.0 39.0 29.0 31.0 0.0 \n",
|
555 |
+
"31.0 28.0 22.0 18.0 34.0 34.0 30.0 0.0 \n",
|
556 |
+
"33.0 30.0 20.0 15.0 35.0 28.0 35.0 0.0 \n",
|
557 |
+
"20.0 26.0 25.0 27.0 6.0 3.0 0.0 90.0 \n"
|
558 |
+
]
|
559 |
+
}
|
560 |
+
],
|
561 |
+
"source": [
|
562 |
+
"for i in range(8):\n",
|
563 |
+
" for j in range(8):\n",
|
564 |
+
" print((hierarchicalModel(i,j).item()*100)//1, end=' ')\n",
|
565 |
+
" print(end = '\\n')"
|
566 |
+
]
|
567 |
+
},
|
568 |
+
{
|
569 |
+
"cell_type": "markdown",
|
570 |
+
"metadata": {},
|
571 |
+
"source": [
|
572 |
+
"<h3>References</h3>\n",
|
573 |
+
"\n",
|
574 |
+
"- [DeepWalk: Online Learning of Social Representations](http://www.perozzi.net/publications/14_kdd_deepwalk.pdf)\n",
|
575 |
+
"\n",
|
576 |
+
"- [An Illustrated Explanation of Using SkipGram To Encode The Structure of A Graph (DeepWalk)](https://medium.com/@_init_/an-illustrated-explanation-of-using-skipgram-to-encode-the-structure-of-a-graph-deepwalk-6220e304d71b?source=---------13------------------)\n",
|
577 |
+
"\n",
|
578 |
+
"- [Word Embedding](https://medium.com/data-science-group-iitr/word-embedding-2d05d270b285)\n",
|
579 |
+
"\n",
|
580 |
+
"- [Centralized & Scale Free Networks](https://www.youtube.com/watch?v=qmCrtuS9vtU)\n",
|
581 |
+
"\n",
|
582 |
+
"\n",
|
583 |
+
"- Beautiful explanations by Chris McCormick:\n",
|
584 |
+
" - [Hieararchical Softmax](https://youtu.be/pzyIWCelt_E)\n",
|
585 |
+
" - [word2vec](http://mccormickml.com/2019/03/12/the-inner-workings-of-word2vec/)\n",
|
586 |
+
" - [Negative Sampling](http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling/)\n",
|
587 |
+
" - [skip-gram](http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/)"
|
588 |
+
]
|
589 |
+
}
|
590 |
+
],
|
591 |
+
"metadata": {
|
592 |
+
"kernelspec": {
|
593 |
+
"display_name": "Python 3",
|
594 |
+
"language": "python",
|
595 |
+
"name": "python3"
|
596 |
+
},
|
597 |
+
"language_info": {
|
598 |
+
"codemirror_mode": {
|
599 |
+
"name": "ipython",
|
600 |
+
"version": 3
|
601 |
+
},
|
602 |
+
"file_extension": ".py",
|
603 |
+
"mimetype": "text/x-python",
|
604 |
+
"name": "python",
|
605 |
+
"nbconvert_exporter": "python",
|
606 |
+
"pygments_lexer": "ipython3",
|
607 |
+
"version": "3.7.5"
|
608 |
+
}
|
609 |
+
},
|
610 |
+
"nbformat": 4,
|
611 |
+
"nbformat_minor": 2
|
612 |
+
}
|
Graph/GAT/code/GAT_Blog+Code.ipynb
ADDED
@@ -0,0 +1,475 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {},
|
6 |
+
"source": [
|
7 |
+
"<!-- # Understanding Graph Attention Networks (GAT) -->\n",
|
8 |
+
"<h1><center>Understanding Graph Attention Networks (GAT)</center></h1>"
|
9 |
+
]
|
10 |
+
},
|
11 |
+
{
|
12 |
+
"cell_type": "markdown",
|
13 |
+
"metadata": {},
|
14 |
+
"source": [
|
15 |
+
"<!-- ![GAT Cover](GAT_Cover.jpg) -->\n",
|
16 |
+
"<img src=\"img/GAT_Cover.jpg\" width=700x/>\n",
|
17 |
+
"\n",
|
18 |
+
"This is 4th in the series of blogs <font color=\"green\">*Explained: Graph Representation Learning*</font>. Let's dive right in, assuming you have read the first three. GAT (Graph Attention Network), is a novel neural network architectures that operate on graph-structured data, leveraging masked self-attentional layers to address the shortcomings of prior methods based on graph convolutions or their approximations. By stacking layers in which nodes are able to attend over their neighborhoods’ features, the method enables (implicitly) specifying different weights to different nodes in a neighborhood, without requiring any kind of costly matrix operation (such as inversion) or depending on knowing the graph structure upfront. In this way, GAT addresses several key challenges of spectral-based graph neural networks simultaneously, and make the model readily applicable to inductive as well as transductive problems.\n",
|
19 |
+
"\n",
|
20 |
+
"Analyzing and Visualizing the learned attentional weights also lead to a more interpretable model in terms of importance of neighbors."
|
21 |
+
]
|
22 |
+
},
|
23 |
+
{
|
24 |
+
"cell_type": "markdown",
|
25 |
+
"metadata": {},
|
26 |
+
"source": [
|
27 |
+
"But before getting into the meat of this method, I want you to be familiar and thorough with the Attention Mechanism, because we'll be building GATs on the concept of <b>Self Attention</b> and <b>Multi-Head Attention</b> introduced by <b><i>Vaswani et al.</i></b>\n",
|
28 |
+
"If not, you may read this blog, [The Illustrated Transformer](http://jalammar.github.io/illustrated-transformer/) by Jay Alamar.\n",
|
29 |
+
"\n",
|
30 |
+
"<hr/>"
|
31 |
+
]
|
32 |
+
},
|
33 |
+
{
|
34 |
+
"cell_type": "markdown",
|
35 |
+
"metadata": {},
|
36 |
+
"source": [
|
37 |
+
"<h2><center>Can we do better than GCNs?</center></h2>\n",
|
38 |
+
"<!-- ## Can we do better than GCNs? -->\n",
|
39 |
+
"\n",
|
40 |
+
"From Graph Convolutional Network (GCN), we learnt that combining local graph structure and node-level features yields good performance on node classification task. However, the way GCN aggregates messages is <b>structure-dependent</b>, which may hurt its generalizability.\n",
|
41 |
+
"\n",
|
42 |
+
"The fundamental novelty that GAT brings to the table is how the information from the one-hop neighborhood is aggregated. For GCN, a graph convolution operation produces the normalized sum of neighbors' node features as follows:\n",
|
43 |
+
"\n",
|
44 |
+
"$$h_i^{(l+1)}=\\sigma\\left(\\sum_{j\\in \\mathcal{N}(i)} {\\frac{1}{c_{ij}} W^{(l)}h^{(l)}_j}\\right)$$\n",
|
45 |
+
"\n",
|
46 |
+
"where $\\mathcal{N}(i)$ is the set of its one-hop neighbors (to include $v_{i}$ in the set, we simply added a self-loop to each node), $c_{ij}=\\sqrt{|\\mathcal{N}(i)|}\\sqrt{|\\mathcal{N}(j)|}$ is a normalization constant based on graph structure, $\\sigma$ is an activation function (GCN uses ReLU), and $W^{l}$ is a shared weight matrix for node-wise feature transformation.\n",
|
47 |
+
"\n",
|
48 |
+
"GAT introduces the attention mechanism as a substitute for the statically normalized convolution operation. The figure below clearly illustrates the key difference.\n",
|
49 |
+
"\n",
|
50 |
+
"<img src=\"img/GCN_vs_GAT.jpg\" width=800x/>\n",
|
51 |
+
"\n",
|
52 |
+
"<hr/>"
|
53 |
+
]
|
54 |
+
},
|
55 |
+
{
|
56 |
+
"cell_type": "markdown",
|
57 |
+
"metadata": {},
|
58 |
+
"source": [
|
59 |
+
"<!-- ## How does the attention work in GAT layer? -->\n",
|
60 |
+
"<h1><center>How does the GAT layer work?</center></h1>\n",
|
61 |
+
"\n",
|
62 |
+
"The particular attentional setup utilized by GAT closely follows the work of `Bahdanau et al. (2015)` i.e *Additive Attention*, but the framework is agnostic to the particular choice of attention mechanism.\n",
|
63 |
+
"\n",
|
64 |
+
"The input to the layer is a set of node features, $\\mathbf{h} = \\{\\vec{h}_1,\\vec{h}_2,...,\\vec{h}_N\\}, \\vec{h}_i ∈ \\mathbb{R}^{F}$ , where $N$ is the\n",
|
65 |
+
"number of nodes, and $F$ is the number of features in each node. The layer produces a new set of node\n",
|
66 |
+
"features (of potentially different cardinality $F'$ ), $\\mathbf{h} = \\{\\vec{h'}_1,\\vec{h'}_2,...,\\vec{h'}_N\\}, \\vec{h'}_i ∈ \\mathbb{R}^{F'}$, as its output.\n",
|
67 |
+
"\n",
|
68 |
+
"\n",
|
69 |
+
"<h3><font color=\"black\" >The Attentional Layer broken into 4 separate parts:</font></h3>\n",
|
70 |
+
"\n",
|
71 |
+
"<hr/>\n",
|
72 |
+
"\n",
|
73 |
+
"**1)** <font color=\"red\">**Simple linear transformation:**</font> In order to obtain sufficient expressive power to transform the input features into higher level features, atleast one learnable linear transformation is required. To that end, as an initial step, a shared linear transformation, parametrized by a weight matrix, $W ∈ \\mathbb{R}^{F′×F}$ , is applied to every node.\n",
|
74 |
+
"\n",
|
75 |
+
"$$\\begin{split}\\begin{align}\n",
|
76 |
+
"z_i^{(l)}&=W^{(l)}h_i^{(l)},&(1) \\\\\n",
|
77 |
+
"\\end{align}\\end{split}$$\n",
|
78 |
+
"\n",
|
79 |
+
"<div style=\"float: right\">\n",
|
80 |
+
" <img src=\"img/Attentional_Layer.jpg\" width=400x/>\n",
|
81 |
+
"</div>\n",
|
82 |
+
"\n",
|
83 |
+
"\n",
|
84 |
+
"<hr/>\n",
|
85 |
+
"\n",
|
86 |
+
"**2)** <font color=\"red\">**Attention Coefficients:**</font> We then compute a pair-wise <font color=\"blue\">**un-normalized**</font> attention score between two neighbors. Here, it first concatenates the $z$ embeddings of the two nodes, where $||$ denotes concatenation, then takes a dot product of it and a learnable weight vector $\\vec a^{(l)}$, and applies a LeakyReLU in the end. This form of attention is usually called additive attention, in contrast with the dot-product attention used for the Transformer model. We then perform self-attention on the nodes, a shared attentional mechanism $a$ : $\\mathbb{R}^{F′} × \\mathbb{R}^{F′} → \\mathbb{R}$ to compute attention coefficients \n",
|
87 |
+
"$$\\begin{split}\\begin{align}\n",
|
88 |
+
"e_{ij}^{(l)}&=\\text{LeakyReLU}(\\vec a^{(l)^T}(z_i^{(l)}||z_j^{(l)})),&(2)\\\\\n",
|
89 |
+
"\\end{align}\\end{split}$$\n",
|
90 |
+
"\n",
|
91 |
+
"**Q. Is this step the most important step?** \n",
|
92 |
+
"\n",
|
93 |
+
"**Ans.** Yes! This indicates the importance of node $j’s$ features to node $i$. This step allows every node to attend on every other node, dropping all structural information.\n",
|
94 |
+
"\n",
|
95 |
+
"**NOTE:** The graph structure is injected into the mechanism by performing <b>*masked attention*</b>, we only compute $e_{ij}$ for nodes $j$ ∈ $N_{i}$, where $N_{i}$ is some neighborhood of node $i$ in the graph. In all the experiments, these will be exactly the first-order neighbors of $i$ (including $i$).\n",
|
96 |
+
"\n",
|
97 |
+
"\n",
|
98 |
+
"<hr/>\n",
|
99 |
+
"\n",
|
100 |
+
"**3)** <font color=\"red\">**Softmax:**</font> This makes coefficients easily comparable across different nodes, we normalize them across all choices of $j$ using the softmax function\n",
|
101 |
+
"\n",
|
102 |
+
"$$\\begin{split}\\begin{align}\n",
|
103 |
+
"\\alpha_{ij}^{(l)}&=\\frac{\\exp(e_{ij}^{(l)})}{\\sum_{k\\in \\mathcal{N}(i)}^{}\\exp(e_{ik}^{(l)})},&(3)\\\\\n",
|
104 |
+
"\\end{align}\\end{split}$$\n",
|
105 |
+
"\n",
|
106 |
+
"\n",
|
107 |
+
"<hr/>\n",
|
108 |
+
"\n",
|
109 |
+
"**4)** <font color=\"red\">**Aggregation:**</font> This step is similar to GCN. The embeddings from neighbors are aggregated together, scaled by the attention scores. \n",
|
110 |
+
"\n",
|
111 |
+
"$$\\begin{split}\\begin{align}\n",
|
112 |
+
"h_i^{(l+1)}&=\\sigma\\left(\\sum_{j\\in \\mathcal{N}(i)} {\\alpha^{(l)}_{ij} z^{(l)}_j }\\right),&(4)\n",
|
113 |
+
"\\end{align}\\end{split}$$\n",
|
114 |
+
"\n",
|
115 |
+
"<hr/>\n",
|
116 |
+
"\n",
|
117 |
+
"<!-- ![Attentional Layer](Attentional_Layer.jpg){:style=\"float: right;margin-right: 7px;margin-top: 7px;\"} -->\n",
|
118 |
+
"\n",
|
119 |
+
"\n",
|
120 |
+
"<!-- ![Attentional Layer](Attentional_Layer.jpg =250x) -->"
|
121 |
+
]
|
122 |
+
},
|
123 |
+
{
|
124 |
+
"cell_type": "markdown",
|
125 |
+
"metadata": {},
|
126 |
+
"source": [
|
127 |
+
"<!-- ### Multi-head Attention -->\n",
|
128 |
+
"<h2><center>Multi-head Attention</center></h2>\n",
|
129 |
+
"\n",
|
130 |
+
"<p>\n",
|
131 |
+
"<img src=\"img/MultiHead_Attention.jpeg\" width=500x/>\n",
|
132 |
+
"*An illustration of multi-head attention (with K = 3 heads) by node 1 on its neighborhood. Different arrow styles and colors denote independent attention computations. The aggregated features from each head are concatenated or averaged to obtain $\\vec{h'}_{1}$.*\n",
|
133 |
+
"</p>\n",
|
134 |
+
"\n",
|
135 |
+
"Analogous to multiple channels in ConvNet, GAT introduces multi-head attention to enrich the model capacity and to stabilize the learning process. Specifically, K independent attention mechanisms execute the transformation of Equation 4, and then their outputs can be combined in 2 ways depending on the use:\n",
|
136 |
+
"\n",
|
137 |
+
"* <b>Concatenation</b>\n",
|
138 |
+
" \n",
|
139 |
+
" $$\\textbf{Concatenation}: h^{(l+1)}_{i} =||_{k=1}^{K}\\sigma\\left(\\sum_{j\\in \\mathcal{N}(i)}\\alpha_{ij}^{k}W^{k}h^{(l)}_{j}\\right)$$\n",
|
140 |
+
" \n",
|
141 |
+
" * As can be seen in this settingthe final returned output, $h′$, will consist of $KF′$ features (rather than F′) for each node.\n",
|
142 |
+
"\n",
|
143 |
+
"\n",
|
144 |
+
"* <b>Averaging</b>\n",
|
145 |
+
" * If we perform multi-head attention on the final (prediction) layer of the network, concatenation is no longer sensible and instead, averaging is employed, and delay applying the final nonlinearity (usually a softmax or logistic sigmoid for classification problems) until then:\n",
|
146 |
+
" \n",
|
147 |
+
" $$\\textbf{Average}: h_{i}^{(l+1)}=\\sigma\\left(\\frac{1}{K}\\sum_{k=1}^{K}\\sum_{j\\in\\mathcal{N}(i)}\\alpha_{ij}^{k}W^{k}h^{(l)}_{j}\\right)$$\n",
|
148 |
+
" \n",
|
149 |
+
"Thus concatenation for intermediary layers and average for the final layer are used."
|
150 |
+
]
|
151 |
+
},
|
152 |
+
{
|
153 |
+
"cell_type": "markdown",
|
154 |
+
"metadata": {},
|
155 |
+
"source": [
|
156 |
+
"<!-- ## Implementing GAT Layer in PyTorch -->\n",
|
157 |
+
"<h1><center>Implementing GAT Layer in PyTorch</center></h1>\n",
|
158 |
+
"\n",
|
159 |
+
"## Imports"
|
160 |
+
]
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"cell_type": "code",
|
164 |
+
"execution_count": 1,
|
165 |
+
"metadata": {},
|
166 |
+
"outputs": [
|
167 |
+
{
|
168 |
+
"data": {
|
169 |
+
"text/plain": [
|
170 |
+
"<torch._C.Generator at 0x1108d6810>"
|
171 |
+
]
|
172 |
+
},
|
173 |
+
"execution_count": 1,
|
174 |
+
"metadata": {},
|
175 |
+
"output_type": "execute_result"
|
176 |
+
}
|
177 |
+
],
|
178 |
+
"source": [
|
179 |
+
"import numpy as np\n",
|
180 |
+
"import torch\n",
|
181 |
+
"import torch.nn as nn\n",
|
182 |
+
"import torch.nn.functional as F\n",
|
183 |
+
"\n",
|
184 |
+
"torch.manual_seed(2020) # seed for reproducible numbers"
|
185 |
+
]
|
186 |
+
},
|
187 |
+
{
|
188 |
+
"cell_type": "code",
|
189 |
+
"execution_count": 2,
|
190 |
+
"metadata": {},
|
191 |
+
"outputs": [],
|
192 |
+
"source": [
|
193 |
+
"class GATLayer(nn.Module):\n",
|
194 |
+
" \"\"\"\n",
|
195 |
+
" Simple PyTorch Implementation of the Graph Attention layer.\n",
|
196 |
+
" \"\"\"\n",
|
197 |
+
"\n",
|
198 |
+
" def __init__(self, in_features, out_features, dropout, alpha, concat=True):\n",
|
199 |
+
" super(GATLayer, self).__init__()\n",
|
200 |
+
" self.dropout = dropout # drop prob = 0.6\n",
|
201 |
+
" self.in_features = in_features # \n",
|
202 |
+
" self.out_features = out_features # \n",
|
203 |
+
" self.alpha = alpha # LeakyReLU with negative input slope, alpha = 0.2\n",
|
204 |
+
" self.concat = concat # conacat = True for all layers except the output layer.\n",
|
205 |
+
"\n",
|
206 |
+
" # Xavier Initialization of Weights\n",
|
207 |
+
" # Alternatively use weights_init to apply weights of choice \n",
|
208 |
+
" self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))\n",
|
209 |
+
" nn.init.xavier_uniform_(self.W.data, gain=1.414)\n",
|
210 |
+
" self.a = nn.Parameter(torch.zeros(size=(2*out_features, 1)))\n",
|
211 |
+
" nn.init.xavier_uniform_(self.a.data, gain=1.414)\n",
|
212 |
+
" \n",
|
213 |
+
" # LeakyReLU\n",
|
214 |
+
" self.leakyrelu = nn.LeakyReLU(self.alpha)\n",
|
215 |
+
"\n",
|
216 |
+
" def forward(self, input, adj):\n",
|
217 |
+
" # Linear Transformation\n",
|
218 |
+
" h = torch.mm(input, self.W)\n",
|
219 |
+
" N = h.size()[0]\n",
|
220 |
+
"\n",
|
221 |
+
" # Attention Mechanism\n",
|
222 |
+
" a_input = torch.cat([h.repeat(1, N).view(N * N, -1), h.repeat(N, 1)], dim=1).view(N, -1, 2 * self.out_features)\n",
|
223 |
+
" e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))\n",
|
224 |
+
"\n",
|
225 |
+
" # Masked Attention\n",
|
226 |
+
" zero_vec = -9e15*torch.ones_like(e)\n",
|
227 |
+
" attention = torch.where(adj > 0, e, zero_vec)\n",
|
228 |
+
" \n",
|
229 |
+
" attention = F.softmax(attention, dim=1)\n",
|
230 |
+
" attention = F.dropout(attention, self.dropout, training=self.training)\n",
|
231 |
+
" h_prime = torch.matmul(attention, h)\n",
|
232 |
+
"\n",
|
233 |
+
" if self.concat:\n",
|
234 |
+
" return F.elu(h_prime)\n",
|
235 |
+
" else:\n",
|
236 |
+
" return h_prime"
|
237 |
+
]
|
238 |
+
},
|
239 |
+
{
|
240 |
+
"cell_type": "code",
|
241 |
+
"execution_count": 3,
|
242 |
+
"metadata": {},
|
243 |
+
"outputs": [],
|
244 |
+
"source": [
|
245 |
+
"# # Alternate approach to applying weights of choice using weights_init()\n",
|
246 |
+
"# def weights_init(m):\n",
|
247 |
+
"# if isinstance(m, nn.Linear):\n",
|
248 |
+
"# torch.nn.init.xavier_uniform_(m.weight)\n",
|
249 |
+
"\n",
|
250 |
+
"# # Applying just after calling the model class\n",
|
251 |
+
"# model.apply(weights_init)"
|
252 |
+
]
|
253 |
+
},
|
254 |
+
{
|
255 |
+
"cell_type": "markdown",
|
256 |
+
"metadata": {},
|
257 |
+
"source": [
|
258 |
+
"<!-- ## Implementing GAT on Citation Datasets using PyTorch Geometric -->\n",
|
259 |
+
"<h1><center>Implementing GAT on Citation Datasets using PyTorch Geometric</center></h1>"
|
260 |
+
]
|
261 |
+
},
|
262 |
+
{
|
263 |
+
"cell_type": "markdown",
|
264 |
+
"metadata": {},
|
265 |
+
"source": [
|
266 |
+
"### PyG Imports"
|
267 |
+
]
|
268 |
+
},
|
269 |
+
{
|
270 |
+
"cell_type": "code",
|
271 |
+
"execution_count": 4,
|
272 |
+
"metadata": {},
|
273 |
+
"outputs": [],
|
274 |
+
"source": [
|
275 |
+
"from torch_geometric.data import Data\n",
|
276 |
+
"from torch_geometric.nn import GATConv\n",
|
277 |
+
"from torch_geometric.datasets import Planetoid\n",
|
278 |
+
"import torch_geometric.transforms as T\n",
|
279 |
+
"\n",
|
280 |
+
"import matplotlib.pyplot as plt\n",
|
281 |
+
"%matplotlib notebook\n",
|
282 |
+
"\n",
|
283 |
+
"import warnings\n",
|
284 |
+
"warnings.filterwarnings(\"ignore\")"
|
285 |
+
]
|
286 |
+
},
|
287 |
+
{
|
288 |
+
"cell_type": "markdown",
|
289 |
+
"metadata": {},
|
290 |
+
"source": [
|
291 |
+
"### Dataset"
|
292 |
+
]
|
293 |
+
},
|
294 |
+
{
|
295 |
+
"cell_type": "code",
|
296 |
+
"execution_count": 5,
|
297 |
+
"metadata": {},
|
298 |
+
"outputs": [
|
299 |
+
{
|
300 |
+
"name": "stdout",
|
301 |
+
"output_type": "stream",
|
302 |
+
"text": [
|
303 |
+
"Number of Classes in Cora: 7\n",
|
304 |
+
"Number of Node Features in Cora: 1433\n"
|
305 |
+
]
|
306 |
+
}
|
307 |
+
],
|
308 |
+
"source": [
|
309 |
+
"name_data = 'Cora'\n",
|
310 |
+
"dataset = Planetoid(root= '/tmp/' + name_data, name = name_data)\n",
|
311 |
+
"dataset.transform = T.NormalizeFeatures()\n",
|
312 |
+
"\n",
|
313 |
+
"print(f\"Number of Classes in {name_data}:\", dataset.num_classes)\n",
|
314 |
+
"print(f\"Number of Node Features in {name_data}:\", dataset.num_node_features)"
|
315 |
+
]
|
316 |
+
},
|
317 |
+
{
|
318 |
+
"cell_type": "markdown",
|
319 |
+
"metadata": {},
|
320 |
+
"source": [
|
321 |
+
"### Model"
|
322 |
+
]
|
323 |
+
},
|
324 |
+
{
|
325 |
+
"cell_type": "code",
|
326 |
+
"execution_count": 6,
|
327 |
+
"metadata": {},
|
328 |
+
"outputs": [],
|
329 |
+
"source": [
|
330 |
+
"class GAT(torch.nn.Module):\n",
|
331 |
+
" def __init__(self):\n",
|
332 |
+
" super(GAT, self).__init__()\n",
|
333 |
+
" self.hid = 8\n",
|
334 |
+
" self.in_head = 8\n",
|
335 |
+
" self.out_head = 1\n",
|
336 |
+
" \n",
|
337 |
+
" self.conv1 = GATConv(dataset.num_features, self.hid, heads=self.in_head, dropout=0.6)\n",
|
338 |
+
" self.conv2 = GATConv(self.hid*self.in_head, dataset.num_classes, concat=False,\n",
|
339 |
+
" heads=self.out_head, dropout=0.6)\n",
|
340 |
+
"\n",
|
341 |
+
" def forward(self, data):\n",
|
342 |
+
" x, edge_index = data.x, data.edge_index\n",
|
343 |
+
" \n",
|
344 |
+
" # Dropout before the GAT layer is used to avoid overfitting in small datasets like Cora.\n",
|
345 |
+
" # One can skip them if the dataset is sufficiently large.\n",
|
346 |
+
" \n",
|
347 |
+
" x = F.dropout(x, p=0.6, training=self.training)\n",
|
348 |
+
" x = self.conv1(x, edge_index)\n",
|
349 |
+
" x = F.elu(x)\n",
|
350 |
+
" x = F.dropout(x, p=0.6, training=self.training)\n",
|
351 |
+
" x = self.conv2(x, edge_index)\n",
|
352 |
+
" \n",
|
353 |
+
" return F.log_softmax(x, dim=1)"
|
354 |
+
]
|
355 |
+
},
|
356 |
+
{
|
357 |
+
"cell_type": "markdown",
|
358 |
+
"metadata": {},
|
359 |
+
"source": [
|
360 |
+
"### Train"
|
361 |
+
]
|
362 |
+
},
|
363 |
+
{
|
364 |
+
"cell_type": "code",
|
365 |
+
"execution_count": 7,
|
366 |
+
"metadata": {
|
367 |
+
"scrolled": true
|
368 |
+
},
|
369 |
+
"outputs": [
|
370 |
+
{
|
371 |
+
"name": "stdout",
|
372 |
+
"output_type": "stream",
|
373 |
+
"text": [
|
374 |
+
"tensor(1.9467, grad_fn=<NllLossBackward>)\n",
|
375 |
+
"tensor(0.6551, grad_fn=<NllLossBackward>)\n",
|
376 |
+
"tensor(0.5155, grad_fn=<NllLossBackward>)\n",
|
377 |
+
"tensor(0.6176, grad_fn=<NllLossBackward>)\n",
|
378 |
+
"tensor(0.6120, grad_fn=<NllLossBackward>)\n"
|
379 |
+
]
|
380 |
+
}
|
381 |
+
],
|
382 |
+
"source": [
|
383 |
+
"device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
|
384 |
+
"\n",
|
385 |
+
"model = GAT().to(device)\n",
|
386 |
+
"\n",
|
387 |
+
"data = dataset[0].to(device)\n",
|
388 |
+
"optimizer = torch.optim.Adam(model.parameters(), lr=0.005, weight_decay=5e-4)\n",
|
389 |
+
"\n",
|
390 |
+
"model.train()\n",
|
391 |
+
"for epoch in range(1000):\n",
|
392 |
+
" model.train()\n",
|
393 |
+
" optimizer.zero_grad()\n",
|
394 |
+
" out = model(data)\n",
|
395 |
+
" loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])\n",
|
396 |
+
" \n",
|
397 |
+
" if epoch%200 == 0:\n",
|
398 |
+
" print(loss)\n",
|
399 |
+
" \n",
|
400 |
+
" loss.backward()\n",
|
401 |
+
" optimizer.step()"
|
402 |
+
]
|
403 |
+
},
|
404 |
+
{
|
405 |
+
"cell_type": "markdown",
|
406 |
+
"metadata": {},
|
407 |
+
"source": [
|
408 |
+
"### Evaluate"
|
409 |
+
]
|
410 |
+
},
|
411 |
+
{
|
412 |
+
"cell_type": "code",
|
413 |
+
"execution_count": 8,
|
414 |
+
"metadata": {
|
415 |
+
"scrolled": false
|
416 |
+
},
|
417 |
+
"outputs": [
|
418 |
+
{
|
419 |
+
"name": "stdout",
|
420 |
+
"output_type": "stream",
|
421 |
+
"text": [
|
422 |
+
"Accuracy: 0.8200\n"
|
423 |
+
]
|
424 |
+
}
|
425 |
+
],
|
426 |
+
"source": [
|
427 |
+
"model.eval()\n",
|
428 |
+
"_, pred = model(data).max(dim=1)\n",
|
429 |
+
"correct = float(pred[data.test_mask].eq(data.y[data.test_mask]).sum().item())\n",
|
430 |
+
"acc = correct / data.test_mask.sum().item()\n",
|
431 |
+
"print('Accuracy: {:.4f}'.format(acc))"
|
432 |
+
]
|
433 |
+
},
|
434 |
+
{
|
435 |
+
"cell_type": "markdown",
|
436 |
+
"metadata": {},
|
437 |
+
"source": [
|
438 |
+
"## References\n",
|
439 |
+
"\n",
|
440 |
+
"[Graph Attention Networks](https://arxiv.org/abs/1710.10903)\n",
|
441 |
+
"\n",
|
442 |
+
"[Graph attention network, DGL by Zhang et al.](https://docs.dgl.ai/tutorials/models/1_gnn/9_gat.html)\n",
|
443 |
+
"\n",
|
444 |
+
"[Attention Is All You Need](https://arxiv.org/pdf/1706.03762.pdf)\n",
|
445 |
+
"\n",
|
446 |
+
"[The Illustrated Transformer](http://jalammar.github.io/illustrated-transformer/)\n",
|
447 |
+
"\n",
|
448 |
+
"[Mechanics of Seq2seq Models With Attention](https://jalammar.github.io/visualizing-neural-machine-translation-mechanics-of-seq2seq-models-with-attention/)\n",
|
449 |
+
"\n",
|
450 |
+
"[Attention? Attention!](https://lilianweng.github.io/lil-log/2018/06/24/attention-attention.html)"
|
451 |
+
]
|
452 |
+
}
|
453 |
+
],
|
454 |
+
"metadata": {
|
455 |
+
"kernelspec": {
|
456 |
+
"display_name": "Python 3",
|
457 |
+
"language": "python",
|
458 |
+
"name": "python3"
|
459 |
+
},
|
460 |
+
"language_info": {
|
461 |
+
"codemirror_mode": {
|
462 |
+
"name": "ipython",
|
463 |
+
"version": 3
|
464 |
+
},
|
465 |
+
"file_extension": ".py",
|
466 |
+
"mimetype": "text/x-python",
|
467 |
+
"name": "python",
|
468 |
+
"nbconvert_exporter": "python",
|
469 |
+
"pygments_lexer": "ipython3",
|
470 |
+
"version": "3.7.5"
|
471 |
+
}
|
472 |
+
},
|
473 |
+
"nbformat": 4,
|
474 |
+
"nbformat_minor": 2
|
475 |
+
}
|
Graph/GAT/code/GAT_PyG.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import torch
|
3 |
+
import torch.nn as nn
|
4 |
+
import torch.nn.functional as F
|
5 |
+
|
6 |
+
|
7 |
+
from torch_geometric.data import Data
|
8 |
+
from torch_geometric.nn import GATConv
|
9 |
+
from torch_geometric.datasets import Planetoid
|
10 |
+
import torch_geometric.transforms as T
|
11 |
+
|
12 |
+
import warnings
|
13 |
+
warnings.filterwarnings("ignore")
|
14 |
+
|
15 |
+
# Seed for reproducible numbers
|
16 |
+
torch.manual_seed(2020)
|
17 |
+
|
18 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
19 |
+
|
20 |
+
# Dataset used Cora
|
21 |
+
name_data = 'Cora'
|
22 |
+
dataset = Planetoid(root= '/tmp/' + name_data, name = name_data)
|
23 |
+
dataset.transform = T.NormalizeFeatures()
|
24 |
+
|
25 |
+
print(f"Number of Classes in {name_data}:", dataset.num_classes)
|
26 |
+
print(f"Number of Node Features in {name_data}:", dataset.num_node_features)
|
27 |
+
|
28 |
+
|
29 |
+
# Model Definition
|
30 |
+
class GAT(torch.nn.Module):
|
31 |
+
def __init__(self):
|
32 |
+
super(GAT, self).__init__()
|
33 |
+
self.hid = 8
|
34 |
+
self.in_head = 8
|
35 |
+
self.out_head = 1
|
36 |
+
|
37 |
+
self.conv1 = GATConv(dataset.num_features, self.hid, heads=self.in_head, dropout=0.6)
|
38 |
+
self.conv2 = GATConv(self.hid*self.in_head, dataset.num_classes, concat=False, heads=self.out_head, dropout=0.6)
|
39 |
+
|
40 |
+
def forward(self, data):
|
41 |
+
x, edge_index = data.x, data.edge_index
|
42 |
+
|
43 |
+
x = F.dropout(x, p=0.6, training=self.training)
|
44 |
+
x = self.conv1(x, edge_index)
|
45 |
+
x = F.elu(x)
|
46 |
+
x = F.dropout(x, p=0.6, training=self.training)
|
47 |
+
x = self.conv2(x, edge_index)
|
48 |
+
|
49 |
+
return F.log_softmax(x, dim=1)
|
50 |
+
|
51 |
+
|
52 |
+
# Train
|
53 |
+
model = GAT().to(device)
|
54 |
+
data = dataset[0].to(device)
|
55 |
+
|
56 |
+
# Adam Optimizer
|
57 |
+
optimizer = torch.optim.Adam(model.parameters(), lr=0.005, weight_decay=5e-4)
|
58 |
+
|
59 |
+
# Training Loop
|
60 |
+
model.train()
|
61 |
+
for epoch in range(1000):
|
62 |
+
model.train()
|
63 |
+
optimizer.zero_grad()
|
64 |
+
out = model(data)
|
65 |
+
loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
|
66 |
+
|
67 |
+
if epoch%200 == 0:
|
68 |
+
print(loss)
|
69 |
+
|
70 |
+
loss.backward()
|
71 |
+
optimizer.step()
|
72 |
+
|
73 |
+
# Evaluation
|
74 |
+
model.eval()
|
75 |
+
_, pred = model(data).max(dim=1)
|
76 |
+
correct = float (pred[data.test_mask].eq(data.y[data.test_mask]).sum().item())
|
77 |
+
acc = correct / data.test_mask.sum().item()
|
78 |
+
print('Accuracy: {:.4f}'.format(acc))
|
Graph/GCN/code/GCN.py
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#### Loading Required Libraries ####
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.nn as nn
|
5 |
+
import torch.optim as optim
|
6 |
+
import matplotlib.pyplot as plt
|
7 |
+
# get_ipython().run_line_magic('matplotlib', 'notebook')
|
8 |
+
|
9 |
+
import imageio
|
10 |
+
from celluloid import Camera
|
11 |
+
from IPython.display import HTML
|
12 |
+
|
13 |
+
plt.rcParams['animation.ffmpeg_path'] = '/usr/local/bin/ffmpeg'
|
14 |
+
|
15 |
+
|
16 |
+
#### The Convolutional Layer ####
|
17 |
+
# First we will be creating the GCNConv class, which will serve as the Layer creation class.
|
18 |
+
# Every instance of this class will be getting Adjacency Matrix as input and will be outputing
|
19 |
+
# 'RELU(A_hat * X * W)', which the Net class will use.
|
20 |
+
|
21 |
+
class GCNConv(nn.Module):
|
22 |
+
def __init__(self, A, in_channels, out_channels):
|
23 |
+
super(GCNConv, self).__init__()
|
24 |
+
self.A_hat = A+torch.eye(A.size(0))
|
25 |
+
self.D = torch.diag(torch.sum(self.A_hat,1))
|
26 |
+
self.D = self.D.inverse().sqrt()
|
27 |
+
self.A_hat = torch.mm(torch.mm(self.D, self.A_hat), self.D)
|
28 |
+
self.W = nn.Parameter(torch.rand(in_channels,out_channels, requires_grad=True))
|
29 |
+
|
30 |
+
def forward(self, X):
|
31 |
+
out = torch.relu(torch.mm(torch.mm(self.A_hat, X), self.W))
|
32 |
+
return out
|
33 |
+
|
34 |
+
class Net(torch.nn.Module):
|
35 |
+
def __init__(self,A, nfeat, nhid, nout):
|
36 |
+
super(Net, self).__init__()
|
37 |
+
self.conv1 = GCNConv(A,nfeat, nhid)
|
38 |
+
self.conv2 = GCNConv(A,nhid, nout)
|
39 |
+
|
40 |
+
def forward(self,X):
|
41 |
+
H = self.conv1(X)
|
42 |
+
H2 = self.conv2(H)
|
43 |
+
return H2
|
44 |
+
|
45 |
+
|
46 |
+
# 'A' is the adjacency matrix, it contains 1 at a position (i,j)
|
47 |
+
# if there is a edge between the node i and node j.
|
48 |
+
A=torch.Tensor([[0,1,1,1,1,1,1,1,1,0,1,1,1,1,0,0,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0],
|
49 |
+
[1,0,1,1,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,1,0,1,0,0,0,0,0,0,0,0,1,0,0,0],
|
50 |
+
[1,1,0,1,0,0,0,1,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,0],
|
51 |
+
[1,1,1,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
|
52 |
+
[1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
|
53 |
+
[1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
|
54 |
+
[1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
|
55 |
+
[1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
|
56 |
+
[1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,1],
|
57 |
+
[0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
|
58 |
+
[1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
|
59 |
+
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
|
60 |
+
[1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
|
61 |
+
[1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
|
62 |
+
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
|
63 |
+
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
|
64 |
+
[0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
|
65 |
+
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
|
66 |
+
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
|
67 |
+
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
|
68 |
+
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
|
69 |
+
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
|
70 |
+
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
|
71 |
+
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,0,1,1],
|
72 |
+
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0],
|
73 |
+
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,0,0],
|
74 |
+
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1],
|
75 |
+
[0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,1],
|
76 |
+
[0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1],
|
77 |
+
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1],
|
78 |
+
[0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
|
79 |
+
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,1,1],
|
80 |
+
[0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,1,0,1,1,0,0,0,0,0,1,1,1,0,1],
|
81 |
+
[0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,1,0,0,1,1,1,0,1,1,0,0,1,1,1,1,1,1,1,0]
|
82 |
+
])
|
83 |
+
|
84 |
+
|
85 |
+
# label for admin(node 1) and instructor(node 34) so only these two contain the class label(0 and 1)
|
86 |
+
# all other are set to -1, meaning predicted value of these nodes is ignored in the loss function.
|
87 |
+
target=torch.tensor([0,-1,-1,-1, -1, -1, -1, -1,-1,-1,-1,-1, -1, -1, -1, -1,-1,-1,-1,-1, -1, -1, -1, -1,-1,-1,-1,-1, -1, -1, -1, -1,-1,1])
|
88 |
+
|
89 |
+
|
90 |
+
# X is the feature matrix.
|
91 |
+
# Using the one-hot encoding corresponding to the index of the node.
|
92 |
+
X=torch.eye(A.size(0))
|
93 |
+
|
94 |
+
|
95 |
+
# Network with 10 features in the hidden layer and 2 in output layer.
|
96 |
+
T=Net(A,X.size(0), 10, 2)
|
97 |
+
|
98 |
+
|
99 |
+
#### Training ####
|
100 |
+
|
101 |
+
criterion = torch.nn.CrossEntropyLoss(ignore_index=-1)
|
102 |
+
optimizer = optim.SGD(T.parameters(), lr=0.01, momentum=0.9)
|
103 |
+
|
104 |
+
loss=criterion(T(X),target)
|
105 |
+
|
106 |
+
|
107 |
+
#### Plot animation using celluloid ####
|
108 |
+
fig = plt.figure()
|
109 |
+
camera = Camera(fig)
|
110 |
+
|
111 |
+
for i in range(200):
|
112 |
+
optimizer.zero_grad()
|
113 |
+
loss=criterion(T(X), target)
|
114 |
+
loss.backward()
|
115 |
+
optimizer.step()
|
116 |
+
l=(T(X));
|
117 |
+
|
118 |
+
plt.scatter(l.detach().numpy()[:,0],l.detach().numpy()[:,1],c=[0, 0, 0, 0 ,0 ,0 ,0, 0, 1, 1, 0 ,0, 0, 0, 1 ,1 ,0 ,0 ,1, 0, 1, 0 ,1 ,1, 1, 1, 1 ,1 ,1, 1, 1, 1, 1, 1 ])
|
119 |
+
for i in range(l.shape[0]):
|
120 |
+
text_plot = plt.text(l[i,0], l[i,1], str(i+1))
|
121 |
+
|
122 |
+
camera.snap()
|
123 |
+
|
124 |
+
if i%20==0:
|
125 |
+
print("Cross Entropy Loss: =", loss.item())
|
126 |
+
|
127 |
+
animation = camera.animate(blit=False, interval=150)
|
128 |
+
animation.save('./train_karate_animation.mp4', writer='ffmpeg', fps=60)
|
129 |
+
HTML(animation.to_html5_video())
|
Graph/GCN/code/GCN_Blog+Code.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Graph/GCN/code/GCN_PyG.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Graph/GCN/code/GCN_PyG.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#### Imports ####
|
2 |
+
from torch_geometric.datasets import Planetoid
|
3 |
+
import torch
|
4 |
+
import torch.nn.functional as F
|
5 |
+
from torch_geometric.nn import MessagePassing
|
6 |
+
from torch_geometric.utils import add_self_loops, degree
|
7 |
+
|
8 |
+
|
9 |
+
#### Loading the Dataset ####
|
10 |
+
dataset = Planetoid(root='/tmp/Cora', name='Cora')
|
11 |
+
|
12 |
+
|
13 |
+
#### The Graph Convolution Layer ####
|
14 |
+
class GraphConvolution(MessagePassing):
|
15 |
+
def __init__(self, in_channels, out_channels,bias=True, **kwargs):
|
16 |
+
super(GraphConvolution, self).__init__(aggr='add', **kwargs)
|
17 |
+
self.lin = torch.nn.Linear(in_channels, out_channels,bias=bias)
|
18 |
+
|
19 |
+
def forward(self, x, edge_index):
|
20 |
+
edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))
|
21 |
+
x = self.lin(x)
|
22 |
+
return self.propagate(edge_index, size=(x.size(0), x.size(0)), x=x)
|
23 |
+
|
24 |
+
def message(self, x_j, edge_index, size):
|
25 |
+
row, col = edge_index
|
26 |
+
deg = degree(row, size[0], dtype=x_j.dtype)
|
27 |
+
deg_inv_sqrt = deg.pow(-0.5)
|
28 |
+
norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
|
29 |
+
return norm.view(-1, 1) * x_j
|
30 |
+
|
31 |
+
def update(self, aggr_out):
|
32 |
+
return aggr_out
|
33 |
+
|
34 |
+
|
35 |
+
class Net(torch.nn.Module):
|
36 |
+
def __init__(self,nfeat, nhid, nclass, dropout):
|
37 |
+
super(Net, self).__init__()
|
38 |
+
self.conv1 = GraphConvolution(nfeat, nhid)
|
39 |
+
self.conv2 = GraphConvolution(nhid, nclass)
|
40 |
+
self.dropout=dropout
|
41 |
+
|
42 |
+
def forward(self, data):
|
43 |
+
x, edge_index = data.x, data.edge_index
|
44 |
+
|
45 |
+
x = self.conv1(x, edge_index)
|
46 |
+
x = F.relu(x)
|
47 |
+
x = F.dropout(x, self.dropout, training=self.training)
|
48 |
+
x = self.conv2(x, edge_index)
|
49 |
+
|
50 |
+
return F.log_softmax(x, dim=1)
|
51 |
+
|
52 |
+
|
53 |
+
nfeat=dataset.num_node_features
|
54 |
+
nhid=16
|
55 |
+
nclass=dataset.num_classes
|
56 |
+
dropout=0.5
|
57 |
+
|
58 |
+
|
59 |
+
#### Training ####
|
60 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
61 |
+
model = Net(nfeat, nhid, nclass, dropout).to(device)
|
62 |
+
data = dataset[0].to(device)
|
63 |
+
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
|
64 |
+
|
65 |
+
model.train()
|
66 |
+
for epoch in range(200):
|
67 |
+
optimizer.zero_grad()
|
68 |
+
out = model(data)
|
69 |
+
loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
|
70 |
+
loss.backward()
|
71 |
+
optimizer.step()
|
72 |
+
|
73 |
+
|
74 |
+
model.eval()
|
75 |
+
_, pred = model(data).max(dim=1)
|
76 |
+
correct = float (pred[data.test_mask].eq(data.y[data.test_mask]).sum().item())
|
77 |
+
acc = correct / data.test_mask.sum().item()
|
78 |
+
print('Accuracy: {:.4f}'.format(acc))
|
Graph/GraphSAGE/code/GraphSAGE.py
ADDED
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
from torch.nn import init
|
4 |
+
from torch.autograd import Variable
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import time
|
8 |
+
import random
|
9 |
+
from sklearn.metrics import f1_score
|
10 |
+
from collections import defaultdict
|
11 |
+
|
12 |
+
#from graphsage.encoders import Encoder
|
13 |
+
#from graphsage.aggregators import MeanAggregator
|
14 |
+
|
15 |
+
"""
|
16 |
+
Simple supervised GraphSAGE model as well as examples running the model
|
17 |
+
on the Cora and Pubmed datasets.
|
18 |
+
"""
|
19 |
+
|
20 |
+
class MeanAggregator(nn.Module):
|
21 |
+
"""
|
22 |
+
Aggregates a node's embeddings using mean of neighbors' embeddings
|
23 |
+
"""
|
24 |
+
def __init__(self, features, cuda=False, gcn=False):
|
25 |
+
"""
|
26 |
+
Initializes the aggregator for a specific graph.
|
27 |
+
features -- function mapping LongTensor of node ids to FloatTensor of feature values.
|
28 |
+
cuda -- whether to use GPU
|
29 |
+
gcn --- whether to perform concatenation GraphSAGE-style, or add self-loops GCN-style
|
30 |
+
"""
|
31 |
+
|
32 |
+
super(MeanAggregator, self).__init__()
|
33 |
+
|
34 |
+
self.features = features
|
35 |
+
self.cuda = cuda
|
36 |
+
self.gcn = gcn
|
37 |
+
|
38 |
+
def forward(self, nodes, to_neighs, num_sample=10):
|
39 |
+
"""
|
40 |
+
nodes --- list of nodes in a batch
|
41 |
+
to_neighs --- list of sets, each set is the set of neighbors for node in batch
|
42 |
+
num_sample --- number of neighbors to sample. No sampling if None.
|
43 |
+
"""
|
44 |
+
# Local pointers to functions (speed hack)
|
45 |
+
_set = set
|
46 |
+
if not num_sample is None:
|
47 |
+
_sample = random.sample
|
48 |
+
samp_neighs = [_set(_sample(to_neigh,
|
49 |
+
num_sample,
|
50 |
+
)) if len(to_neigh) >= num_sample else to_neigh for to_neigh in to_neighs]
|
51 |
+
else:
|
52 |
+
samp_neighs = to_neighs
|
53 |
+
|
54 |
+
if self.gcn:
|
55 |
+
samp_neighs = [samp_neigh + set([nodes[i]]) for i, samp_neigh in enumerate(samp_neighs)]
|
56 |
+
unique_nodes_list = list(set.union(*samp_neighs))
|
57 |
+
# print ("\n unl's size=",len(unique_nodes_list))
|
58 |
+
unique_nodes = {n:i for i,n in enumerate(unique_nodes_list)}
|
59 |
+
mask = Variable(torch.zeros(len(samp_neighs), len(unique_nodes)))
|
60 |
+
column_indices = [unique_nodes[n] for samp_neigh in samp_neighs for n in samp_neigh]
|
61 |
+
row_indices = [i for i in range(len(samp_neighs)) for j in range(len(samp_neighs[i]))]
|
62 |
+
mask[row_indices, column_indices] = 1
|
63 |
+
if self.cuda:
|
64 |
+
mask = mask.cuda()
|
65 |
+
num_neigh = mask.sum(1, keepdim=True)
|
66 |
+
mask = mask.div(num_neigh)
|
67 |
+
if self.cuda:
|
68 |
+
embed_matrix = self.features(torch.LongTensor(unique_nodes_list).cuda())
|
69 |
+
else:
|
70 |
+
embed_matrix = self.features(torch.LongTensor(unique_nodes_list))
|
71 |
+
to_feats = mask.mm(embed_matrix)
|
72 |
+
return to_feats
|
73 |
+
|
74 |
+
class Encoder(nn.Module):
|
75 |
+
"""
|
76 |
+
Encodes a node's using 'convolutional' GraphSage approach
|
77 |
+
"""
|
78 |
+
def __init__(self, features, feature_dim,
|
79 |
+
embed_dim, adj_lists, aggregator,
|
80 |
+
num_sample=10,
|
81 |
+
base_model=None, gcn=False, cuda=False,
|
82 |
+
feature_transform=False):
|
83 |
+
super(Encoder, self).__init__()
|
84 |
+
|
85 |
+
self.features = features
|
86 |
+
self.feat_dim = feature_dim
|
87 |
+
self.adj_lists = adj_lists
|
88 |
+
self.aggregator = aggregator
|
89 |
+
self.num_sample = num_sample
|
90 |
+
if base_model != None:
|
91 |
+
self.base_model = base_model
|
92 |
+
|
93 |
+
self.gcn = gcn
|
94 |
+
self.embed_dim = embed_dim
|
95 |
+
self.cuda = cuda
|
96 |
+
self.aggregator.cuda = cuda
|
97 |
+
self.weight = nn.Parameter(
|
98 |
+
torch.FloatTensor(embed_dim, self.feat_dim if self.gcn else 2 * self.feat_dim))
|
99 |
+
init.xavier_uniform(self.weight)
|
100 |
+
|
101 |
+
def forward(self, nodes):
|
102 |
+
"""
|
103 |
+
Generates embeddings for a batch of nodes.
|
104 |
+
nodes -- list of nodes
|
105 |
+
"""
|
106 |
+
neigh_feats = self.aggregator.forward(nodes, [self.adj_lists[int(node)] for node in nodes],
|
107 |
+
self.num_sample)
|
108 |
+
if not self.gcn:
|
109 |
+
if self.cuda:
|
110 |
+
self_feats = self.features(torch.LongTensor(nodes).cuda())
|
111 |
+
else:
|
112 |
+
self_feats = self.features(torch.LongTensor(nodes))
|
113 |
+
combined = torch.cat([self_feats, neigh_feats], dim=1)
|
114 |
+
else:
|
115 |
+
combined = neigh_feats
|
116 |
+
combined = F.relu(self.weight.mm(combined.t()))
|
117 |
+
return combined
|
118 |
+
|
119 |
+
|
120 |
+
class SupervisedGraphSage(nn.Module):
|
121 |
+
|
122 |
+
def __init__(self, num_classes, enc):
|
123 |
+
super(SupervisedGraphSage, self).__init__()
|
124 |
+
self.enc = enc
|
125 |
+
self.xent = nn.CrossEntropyLoss()
|
126 |
+
|
127 |
+
self.weight = nn.Parameter(torch.FloatTensor(num_classes, enc.embed_dim))
|
128 |
+
init.xavier_uniform(self.weight)
|
129 |
+
|
130 |
+
def forward(self, nodes):
|
131 |
+
embeds = self.enc(nodes)
|
132 |
+
scores = self.weight.mm(embeds)
|
133 |
+
return scores.t()
|
134 |
+
|
135 |
+
def loss(self, nodes, labels):
|
136 |
+
scores = self.forward(nodes)
|
137 |
+
return self.xent(scores, labels.squeeze())
|
138 |
+
|
139 |
+
def load_cora():
|
140 |
+
num_nodes = 2708
|
141 |
+
num_feats = 1433
|
142 |
+
feat_data = np.zeros((num_nodes, num_feats))
|
143 |
+
labels = np.empty((num_nodes,1), dtype=np.int64)
|
144 |
+
node_map = {}
|
145 |
+
label_map = {}
|
146 |
+
with open("../cora/cora.content") as fp:
|
147 |
+
for i,line in enumerate(fp):
|
148 |
+
info = line.strip().split()
|
149 |
+
feat_data[i,:] = [float(x) for x in info[1:-1]]
|
150 |
+
node_map[info[0]] = i
|
151 |
+
if not info[-1] in label_map:
|
152 |
+
label_map[info[-1]] = len(label_map)
|
153 |
+
labels[i] = label_map[info[-1]]
|
154 |
+
|
155 |
+
adj_lists = defaultdict(set)
|
156 |
+
with open("../cora/cora.cites") as fp:
|
157 |
+
for i,line in enumerate(fp):
|
158 |
+
info = line.strip().split()
|
159 |
+
paper1 = node_map[info[0]]
|
160 |
+
paper2 = node_map[info[1]]
|
161 |
+
adj_lists[paper1].add(paper2)
|
162 |
+
adj_lists[paper2].add(paper1)
|
163 |
+
return feat_data, labels, adj_lists
|
164 |
+
|
165 |
+
def run_cora():
|
166 |
+
np.random.seed(1)
|
167 |
+
random.seed(1)
|
168 |
+
num_nodes = 2708
|
169 |
+
feat_data, labels, adj_lists = load_cora()
|
170 |
+
features = nn.Embedding(2708, 1433)
|
171 |
+
features.weight = nn.Parameter(torch.FloatTensor(feat_data), requires_grad=False)
|
172 |
+
# features.cuda()
|
173 |
+
|
174 |
+
agg1 = MeanAggregator(features, cuda=True)
|
175 |
+
enc1 = Encoder(features, 1433, 128, adj_lists, agg1, gcn=True, cuda=False)
|
176 |
+
agg2 = MeanAggregator(lambda nodes : enc1(nodes).t(), cuda=False)
|
177 |
+
enc2 = Encoder(lambda nodes : enc1(nodes).t(), enc1.embed_dim, 128, adj_lists, agg2,
|
178 |
+
base_model=enc1, gcn=True, cuda=False)
|
179 |
+
enc1.num_samples = 5
|
180 |
+
enc2.num_samples = 5
|
181 |
+
|
182 |
+
graphsage = SupervisedGraphSage(7, enc2)
|
183 |
+
# graphsage.cuda()
|
184 |
+
rand_indices = np.random.permutation(num_nodes)
|
185 |
+
test = rand_indices[:1000]
|
186 |
+
val = rand_indices[1000:1500]
|
187 |
+
train = list(rand_indices[1500:])
|
188 |
+
|
189 |
+
optimizer = torch.optim.SGD(filter(lambda p : p.requires_grad, graphsage.parameters()), lr=0.7)
|
190 |
+
times = []
|
191 |
+
for batch in range(100):
|
192 |
+
batch_nodes = train[:256]
|
193 |
+
random.shuffle(train)
|
194 |
+
start_time = time.time()
|
195 |
+
optimizer.zero_grad()
|
196 |
+
loss = graphsage.loss(batch_nodes,
|
197 |
+
Variable(torch.LongTensor(labels[np.array(batch_nodes)])))
|
198 |
+
loss.backward()
|
199 |
+
optimizer.step()
|
200 |
+
end_time = time.time()
|
201 |
+
times.append(end_time-start_time)
|
202 |
+
print (batch, loss.item())
|
203 |
+
|
204 |
+
val_output = graphsage.forward(val)
|
205 |
+
print ("Validation F1:", f1_score(labels[val], val_output.data.numpy().argmax(axis=1), average="micro"))
|
206 |
+
print ("Average batch time:", np.mean(times))
|
207 |
+
|
208 |
+
if __name__ == "__main__":
|
209 |
+
run_cora()
|
Graph/GraphSAGE/code/GraphSAGE_Code+Blog.ipynb
ADDED
@@ -0,0 +1,668 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {},
|
6 |
+
"source": [
|
7 |
+
"## GraphSAGE (SAmple and aggreGatE) : Inductive Learning on Graphs"
|
8 |
+
]
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"cell_type": "markdown",
|
12 |
+
"metadata": {},
|
13 |
+
"source": [
|
14 |
+
"### Introduction\n",
|
15 |
+
"\n",
|
16 |
+
"In the previous blogs, we covered GCN and DeepWalk, which are methods to generate node embeddings. The basic\n",
|
17 |
+
"idea behind node embedding approaches is to use dimensionality reduction techniques to distill the\n",
|
18 |
+
"high-dimensional information about a node’s neighborhood into a dense vector embedding. These\n",
|
19 |
+
"node embeddings can then be fed to downstream machine learning systems and aid in tasks such as\n",
|
20 |
+
"node classification, clustering, and link prediction. Let us move on to a slightly different problem. Now, we need the embeddings for each node of a graph where new nodes are continuously being added. A possible way to do this would be to rerun the entire model (GCN or DeepWalk) on the new graph, but it is computationally expensive. Today we will be covering GraphSAGE, a method that will allow us to get embeddings for such graphs is a much easier way. Unlike embedding approaches that are based on matrix factorization, GraphSAGE leverage node features (e.g., text attributes, node profile information, node degrees) in order to learn an embedding function that generalizes to unseen nodes.\n",
|
21 |
+
"\n",
|
22 |
+
"\n",
|
23 |
+
"GraphSAGE is capable of learning\n",
|
24 |
+
"structural information about a node’s role in a graph, despite the fact that it is inherently based on\n",
|
25 |
+
"features"
|
26 |
+
]
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"cell_type": "markdown",
|
30 |
+
"metadata": {},
|
31 |
+
"source": [
|
32 |
+
"### The start\n",
|
33 |
+
"In the (GCN or DeepWalk) model, the graph was fixed beforehand, let's say the 'Zachary karate club', some model was trained on it, and then we could make predictions about a person X, if he/she went to a particular part of the club after separation.\n",
|
34 |
+
"\n",
|
35 |
+
"![Zachary Karate Club](img/karate_club.png \"Karate Club\")\n",
|
36 |
+
"\n",
|
37 |
+
"\n",
|
38 |
+
"In this problem, the nodes in this graph were fixed from the beginning, and all the predictions were also to be made on these fixed nodes. In contrast to this, take an example where 'Youtube' videos are the nodes and assume there is an edge between related videos, and say we need to classify these videos depending on the content. If we take the same model as in the previous dataset, we can classify all these videos, but whenever a new video is added to 'YouTube', we will have to retrain the model on the entire new dataset again to classify it. This is not feasible as there will be too many videos or nodes being added everytime for us to retrain.\n",
|
39 |
+
"\n",
|
40 |
+
"To solve this issue, what we can do is not to learn embeddings for each node but to learn a function which, given the features and edges joining this node, will give the embeddings for the node. "
|
41 |
+
]
|
42 |
+
},
|
43 |
+
{
|
44 |
+
"cell_type": "markdown",
|
45 |
+
"metadata": {},
|
46 |
+
"source": [
|
47 |
+
"## Aggregating Neighbours\n",
|
48 |
+
"\n",
|
49 |
+
"The idea is to generate embeddings, based on the neighbourhood of a given node. In other words, the embedding of a node will depend upon the embedding of the nodes it is connected to. Like in the graph below, the node 1 and 2 are likely to be more similar than node 1 and 5.\n",
|
50 |
+
"![Simple_graph](img/example_graph_1.png \"Simple Graph\")\n",
|
51 |
+
"\n",
|
52 |
+
"How can this idea be formulated?\n",
|
53 |
+
"\n",
|
54 |
+
"First, we assign random values to the embeddings, and on each step, we will set the value of the embedding as the average of embeddings for all the nodes it is directly connected. The following example shows the working on a simple linear graph.\n",
|
55 |
+
"\n",
|
56 |
+
"![Mean_Embeddings](img/animation.gif \"Mean Embeddings\")\n",
|
57 |
+
"\n",
|
58 |
+
"This is a straightforward idea, which can be generalized by representing it in the following way,\n",
|
59 |
+
"![Simple_Neighbours](img/simple_neighbours.png \"Simple Neighbours\")\n",
|
60 |
+
"\n",
|
61 |
+
"Here The Black Box joining A with B, C, D represents some function of the A, B, C, D. ( In the above animation, it was the mean function). We can replace this box by any function like say sum or max. This function is known as the aggregator function.\n",
|
62 |
+
"\n",
|
63 |
+
"Now let's try to make it more general by using not only the neighbours of a node but also the neighbours of the neighbours. The first question is how to make use of neighbours of neighbours. The way which we will be using here is to first generate each node's embedding in the first step by using only its neighbours just like we did above, and then in the second step, we will use these embeddings to generate the new embeddings. Take a look at the following \n",
|
64 |
+
"\n",
|
65 |
+
"![One_Layer_Aggregation](img/aggregation_1.png \"Aggregation Demo\")\n",
|
66 |
+
"\n",
|
67 |
+
"The numbers written along with the nodes are the value of embedding at the time, T=0.\n",
|
68 |
+
"\n",
|
69 |
+
"Values of embedding after one step are as follows:\n",
|
70 |
+
"\n",
|
71 |
+
"![Animation_aggregation_layer_1](img/animation_2_bw.gif \"Aggregation Layer 1\")\n",
|
72 |
+
"\n",
|
73 |
+
"So after one iteration, the values are as follows:\n",
|
74 |
+
"\n",
|
75 |
+
"![Second_Layer_Aggregation](img/aggregation_2.png \"Aggregation After One Layer\")\n",
|
76 |
+
"\n",
|
77 |
+
"Repeating the same procedure on this new graph, we get (try verifying yourself)\n",
|
78 |
+
"\n",
|
79 |
+
"![Third_Layer_Aggregation](img/aggregation_3.png \"Aggregation After Two Layer\")\n",
|
80 |
+
"\n",
|
81 |
+
"Lets try to do some analysis of the aggregation. Represent by $A^{(0)}$ the initial value of embedding of A(i.e. 0.1), by $A^{(1)}$ the value after one layer(i.e. 0.25) similarly $A^{(2)}$, $B^{(0)}$, $B^{(1)}$ and all other values.\n",
|
82 |
+
"\n",
|
83 |
+
"Clearly \n",
|
84 |
+
"\n",
|
85 |
+
"$$A^{(1)} = \\frac{(A^{(0)} + B^{(0)} + C^{(0)} + D^{(0)})}{4}$$\n",
|
86 |
+
"\n",
|
87 |
+
"Similarly\n",
|
88 |
+
"\n",
|
89 |
+
"$$A^{(2)} = \\frac{(A^{(1)} + B^{(1)} + C^{(1)} + D^{(1)})}{4}$$\n",
|
90 |
+
"\n",
|
91 |
+
"Writing all the value in the RHS in terms of initial values of embeddings we get\n",
|
92 |
+
"\n",
|
93 |
+
"$$A^{(2)} = \\frac{\\frac{(A^{(0)} + B^{(0)} + C^{(0)} + D^{(0)})}{4} + \\frac{A^{(0)}+B^{(0)}+C^{(0)}}{3} + \\frac{A^{(0)}+B^{(0)}+C^{(0)}+E^{(0)} +F^{(0)}}{5} + \\frac{A^{(0)}+D^{(0)}}{2}}{4}$$\n",
|
94 |
+
"\n",
|
95 |
+
"If you look closely, you will see that all the nodes that were either neighbour of A or neighbour of some neighbour of A are present in this term. It is equivalent to saying that all nodes that have a distance of less than or equal to 2 edges from A are influencing this term. Had there been a node G connected only to node F. then it is clearly at a distance of 3 from A and hence won't be influencing this term.\n",
|
96 |
+
"\n",
|
97 |
+
"Generalizing this we can say that if we repeat this produce N times, then all the nodes ( and only those nodes) that are at a within a distance N from the node will be influencing the value of the terms.\n",
|
98 |
+
"\n",
|
99 |
+
"If we replace the mean function, with some other function, lets say $F$, then, in this case, we can write,\n",
|
100 |
+
"\n",
|
101 |
+
"$$A^{(1)} = F(A^{(0)} , B^{(0)} , C^{(0)} , D^{(0)})$$\n",
|
102 |
+
"\n",
|
103 |
+
"Or more generally\n",
|
104 |
+
"\n",
|
105 |
+
"$$A^{(k)} = F(A^{(k-1)} , B^{(k-1)} , C^{(k-1)} , D^{(k-1)})$$\n",
|
106 |
+
"\n",
|
107 |
+
"If we denote by $N(v)$ the set of neighbours of $v$, so $N(A)=\\{B, C, D\\}$ and $N(A)^{(k)}=\\{B^{(k)}, C^{(k)}, D^{(k)}\\}$, the above equation can be simplified as\n",
|
108 |
+
"\n",
|
109 |
+
"$$A^{(k)} = F(A^{(k-1)}, N(A)^{(k-1)} )$$\n",
|
110 |
+
"\n",
|
111 |
+
"This process can be visualized as:\n",
|
112 |
+
"\n",
|
113 |
+
"![Sampling](img/showing_1.png \"Showing one\")\n",
|
114 |
+
"\n",
|
115 |
+
"This method is quite effective in generating node embeddings. But there is an issue if a new node is added to the graph how can get its embeddings? This is an issue that cannot be tackled with this type of model. Clearly, something new is needed, but what? "
|
116 |
+
]
|
117 |
+
},
|
118 |
+
{
|
119 |
+
"cell_type": "markdown",
|
120 |
+
"metadata": {},
|
121 |
+
"source": [
|
122 |
+
"One alternative that we can try is to replace the function F by multiple functions such that in the first layer it is \n",
|
123 |
+
"F1, in second layer F2 and so on, and then fixing the number of layers that we want, let's say k.\n",
|
124 |
+
"\n",
|
125 |
+
"So our embedding generator would be like this,\n",
|
126 |
+
"![Sampling_2](img/showing_2.png \"Showing one\")"
|
127 |
+
]
|
128 |
+
},
|
129 |
+
{
|
130 |
+
"cell_type": "markdown",
|
131 |
+
"metadata": {},
|
132 |
+
"source": [
|
133 |
+
"Let's formalize our notation a bit now so that it is easy to understand things.\n",
|
134 |
+
"\n",
|
135 |
+
"1. Instead of writing $A^{(k)}$ we will be writing $h_{A}^{k}$\n",
|
136 |
+
"2. Rename the functions $F1$, $F2$ and so on as, $AGGREGATE_{1}$, $AGGREGATE_{2}$ and so on. i.e, $Fk$ becomes $AGGREGATE_{k}$.\n",
|
137 |
+
"3. There are a total of $K$ aggregation functions.\n",
|
138 |
+
"3. Let our graph be represented by $G(V, E)$ where $V$ is the set of vertices and $E$ is the set of edges."
|
139 |
+
]
|
140 |
+
},
|
141 |
+
{
|
142 |
+
"cell_type": "markdown",
|
143 |
+
"metadata": {},
|
144 |
+
"source": [
|
145 |
+
"## What GraphSAGE proposes?\n",
|
146 |
+
"\n",
|
147 |
+
"What we have been doing by now can be written as \n",
|
148 |
+
"\n",
|
149 |
+
"Initialise($h_{v}^{0}$) $\\forall v \\in V$ <br>\n",
|
150 |
+
"for $k=1..K$ do <br>\n",
|
151 |
+
" for $v\\in V$ do<br>\n",
|
152 |
+
" $h_{v}^{k}=AGGREGATE_{k}(h_{v}^{k-1}, \\{h_{u}^{k-1} \\forall u \\in N(v)\\})$\n",
|
153 |
+
"\n",
|
154 |
+
"$h_{v}^{k}$ will now be containing the embeddings\n",
|
155 |
+
"\n",
|
156 |
+
"### Some issues with this\n",
|
157 |
+
"\n",
|
158 |
+
"Please take a look at the sample graph that we discussed above, in this graph even though the initial embeddings for $E$ and $F$ were different, but because their neighbours were same they ended with the same embedding, this is not a good thing as there must be at least some difference between their embeddings. \n",
|
159 |
+
"\n",
|
160 |
+
"GraphSAGE proposes an interesting idea to deal with it. Rather than passing both of them into the same aggregating function, what we will do is to pass into aggregating function only the neighbours and then concatenating this vector with the vector of that node. This can be written as:\n",
|
161 |
+
"\n",
|
162 |
+
"$h_{v}^{k}=CONCAT(h_{v}^{k-1},AGGREGATE_{k}( \\{h_{u}^{k-1} \\forall u \\in N(v)\\}))$\n",
|
163 |
+
"\n",
|
164 |
+
"In this way, we can prevent two vectors from attaining exactly the same embedding.\n",
|
165 |
+
"\n",
|
166 |
+
"Lets now add some non-linearity to make it more expressive. So it becomes\n",
|
167 |
+
"\n",
|
168 |
+
"$h_{v}^{k}=\\sigma[W^{(k)}.CONCAT(h_{v}^{k-1},AGGREGATE_{k}( \\{h_{u}^{k-1} \\forall u \\in N(v)\\}))]$\n",
|
169 |
+
"\n",
|
170 |
+
"Where \\sigma is some non-linear function (e.g. RELU, sigmoid, etc.) and $W^{(k)}$ is the weight matrix, each layer will have one such matrix. If you looked closely, you would have seen that there no trainable parameters till now in our model. The $W$ matrix has been added to have something that the model can learn.\n",
|
171 |
+
"\n",
|
172 |
+
"One more thing we will add is to normalize the value of h after each iteration, i.e., divide them by their L2 norm, and hence our complete algorithm becomes.\n",
|
173 |
+
"\n",
|
174 |
+
"![GraphSAGE_Algorithm](img/graphsage_algorithm.png \"GraphSAGE\")\n",
|
175 |
+
"\n"
|
176 |
+
]
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"cell_type": "markdown",
|
180 |
+
"metadata": {},
|
181 |
+
"source": [
|
182 |
+
"To get the model learning, we need the loss function. For the general unsupervised learning problem, the following loss problem serves pretty well.\n",
|
183 |
+
"\n",
|
184 |
+
"![Loss](img/Loss_function.png \"Loss\")\n",
|
185 |
+
"\n",
|
186 |
+
"This graph-based loss function encourages nearby nodes to have similar representations, while enforcing\n",
|
187 |
+
"that the representations of disparate nodes are highly distinct.\n",
|
188 |
+
"\n",
|
189 |
+
"\n",
|
190 |
+
"For supervised learning, either we can learn the embeddings first and then use those embeddings for the downstream task or combine both the part of learning embeddings and the part of applying these embeddings in the task into a single end to end models and then use the loss for the final part, and backpropagate to learn the embeddings while solving the task simultaneously."
|
191 |
+
]
|
192 |
+
},
|
193 |
+
{
|
194 |
+
"cell_type": "markdown",
|
195 |
+
"metadata": {},
|
196 |
+
"source": [
|
197 |
+
"# Aggregator Architectures\n",
|
198 |
+
"One of the critical difference between GCN and Graphsage is the generalisation of the aggregation function, which was the mean aggregator in GCN. So rather than only taking the average, we use generalised aggregation function in GraphSAGE. GraphSAGE owes its inductivity to its aggregator functions.\n",
|
199 |
+
"\n",
|
200 |
+
"## Mean aggregator \n",
|
201 |
+
"Mean aggregator is as simple as you thought it would be. In mean aggregator we simply\n",
|
202 |
+
"take the elementwise mean of the vectors in **{h<sub>u</sub><sup>k-1</sup> ∀u ∈ N (v)}**.\n",
|
203 |
+
"In other words, we can average embeddings of all nodes in the neighbourhood to construct the neighbourhood embedding.\n",
|
204 |
+
"![mean aggregator](img/ma.png)\n",
|
205 |
+
"\n",
|
206 |
+
"## Pool aggregator\n",
|
207 |
+
"Until now, we were using a weighted average type of approach. But we could also use pooling type of approach; for example, we can do elementwise min or max pooling. So this would be another option where we are taking the messages from our neighbours, transforming them and applying some pooling technique(max-pooling or min pooling).\n",
|
208 |
+
"![pool aggregator](img/pa.png)\n",
|
209 |
+
"\n",
|
210 |
+
"In the above equation, max denotes the elementwise max operator, and σ is a nonlinear activation function (yes you are right it can be ReLU). Please note that the function applied before the max-pooling can be an arbitrarily deep multi-layer perceptron, but in the original paper, simple single-layer architectures are preferred.\n",
|
211 |
+
"\n",
|
212 |
+
"## LSTM aggregator\n",
|
213 |
+
"We could also use a deep neural network like LSTM to learn how to aggregate the neighbours. Order invariance is important in the aggregator function, but since LSTM is not order invariant, we would have to train the LSTM over several random orderings or permutation of neighbours to make sure that this will learn that order is not essential."
|
214 |
+
]
|
215 |
+
},
|
216 |
+
{
|
217 |
+
"cell_type": "markdown",
|
218 |
+
"metadata": {},
|
219 |
+
"source": [
|
220 |
+
"# Inductive capability\n",
|
221 |
+
"One interesting property of GraphSAGE is that we can train our model on one subset of the graph and apply this model on another subset of this graph. The reason we can do this is that we can do parameter sharing, i.e. those processing boxes are the same everywhere (W and B are shared across all the computational graphs or architectures). So when a new architecture comes into play, we can borrow the parameters (W and B), do a forward pass, and we get our prediction. \n",
|
222 |
+
"![sharing parameters](img/sharing_param.png)"
|
223 |
+
]
|
224 |
+
},
|
225 |
+
{
|
226 |
+
"cell_type": "markdown",
|
227 |
+
"metadata": {},
|
228 |
+
"source": [
|
229 |
+
"This property of GraphSAGE is advantageous in the prediction of protein interaction. For example, we can train our model on protein interaction graph from model organism A (left-hand side in the figure below) and generate embedding on newly collected data from other model organism say B (right-hand side in the figure).\n",
|
230 |
+
"![protein_interaction](img/protein.png)"
|
231 |
+
]
|
232 |
+
},
|
233 |
+
{
|
234 |
+
"cell_type": "markdown",
|
235 |
+
"metadata": {},
|
236 |
+
"source": [
|
237 |
+
"We know that our old methods like DeepWalk were not able to generalise to a new unseen graph. So if any new node gets added to the graph, we had to train our model from scratch, but since our new method is generalised to the unseen graphs, so to predict the embeddings of the new node we have to make the computational graph of the new node, transfer the parameters to the unseen part of the graph and we can make predictions.\n",
|
238 |
+
"\n",
|
239 |
+
"![new node](img/new_node.png)"
|
240 |
+
]
|
241 |
+
},
|
242 |
+
{
|
243 |
+
"cell_type": "markdown",
|
244 |
+
"metadata": {},
|
245 |
+
"source": [
|
246 |
+
"We can use this property in social-network (like Facebook). Consider the first graph in the above figure, users in a social-network are represented by the nodes of the graph. Initially, we would train our model on this graph. After some time suppose another user is added in the network, now we don't have to train our model from scratch on the second graph, we will create the computational graph of the new node, borrow the parameters from the already trained model and then we can find the embeddings of the newly added user."
|
247 |
+
]
|
248 |
+
},
|
249 |
+
{
|
250 |
+
"cell_type": "markdown",
|
251 |
+
"metadata": {},
|
252 |
+
"source": [
|
253 |
+
"# Implementation in PyTorch"
|
254 |
+
]
|
255 |
+
},
|
256 |
+
{
|
257 |
+
"cell_type": "markdown",
|
258 |
+
"metadata": {},
|
259 |
+
"source": [
|
260 |
+
"## Imports"
|
261 |
+
]
|
262 |
+
},
|
263 |
+
{
|
264 |
+
"cell_type": "code",
|
265 |
+
"execution_count": 1,
|
266 |
+
"metadata": {},
|
267 |
+
"outputs": [],
|
268 |
+
"source": [
|
269 |
+
"import torch\n",
|
270 |
+
"import torch.nn as nn\n",
|
271 |
+
"from torch.nn import init\n",
|
272 |
+
"from torch.autograd import Variable\n",
|
273 |
+
"import torch.nn.functional as F\n",
|
274 |
+
"import numpy as np\n",
|
275 |
+
"import time\n",
|
276 |
+
"import random\n",
|
277 |
+
"from sklearn.metrics import f1_score\n",
|
278 |
+
"from collections import defaultdict"
|
279 |
+
]
|
280 |
+
},
|
281 |
+
{
|
282 |
+
"cell_type": "markdown",
|
283 |
+
"metadata": {},
|
284 |
+
"source": [
|
285 |
+
"## GraphSAGE class"
|
286 |
+
]
|
287 |
+
},
|
288 |
+
{
|
289 |
+
"cell_type": "code",
|
290 |
+
"execution_count": 2,
|
291 |
+
"metadata": {},
|
292 |
+
"outputs": [],
|
293 |
+
"source": [
|
294 |
+
"\"\"\"\n",
|
295 |
+
"Simple supervised GraphSAGE model as well as examples running the model\n",
|
296 |
+
"on the Cora and Pubmed datasets.\n",
|
297 |
+
"\"\"\"\n",
|
298 |
+
"\n",
|
299 |
+
"class MeanAggregator(nn.Module):\n",
|
300 |
+
" \"\"\"\n",
|
301 |
+
" Aggregates a node's embeddings using mean of neighbors' embeddings\n",
|
302 |
+
" \"\"\"\n",
|
303 |
+
" def __init__(self, features, cuda=False, gcn=False): \n",
|
304 |
+
" \"\"\"\n",
|
305 |
+
" Initializes the aggregator for a specific graph.\n",
|
306 |
+
" features -- function mapping LongTensor of node ids to FloatTensor of feature values.\n",
|
307 |
+
" cuda -- whether to use GPU\n",
|
308 |
+
" gcn --- whether to perform concatenation GraphSAGE-style, or add self-loops GCN-style\n",
|
309 |
+
" \"\"\"\n",
|
310 |
+
"\n",
|
311 |
+
" super(MeanAggregator, self).__init__()\n",
|
312 |
+
"\n",
|
313 |
+
" self.features = features\n",
|
314 |
+
" self.cuda = cuda\n",
|
315 |
+
" self.gcn = gcn\n",
|
316 |
+
" \n",
|
317 |
+
" def forward(self, nodes, to_neighs, num_sample=10):\n",
|
318 |
+
" \"\"\"\n",
|
319 |
+
" nodes --- list of nodes in a batch\n",
|
320 |
+
" to_neighs --- list of sets, each set is the set of neighbors for node in batch\n",
|
321 |
+
" num_sample --- number of neighbors to sample. No sampling if None.\n",
|
322 |
+
" \"\"\"\n",
|
323 |
+
" # Local pointers to functions (speed hack)\n",
|
324 |
+
" _set = set\n",
|
325 |
+
" if not num_sample is None:\n",
|
326 |
+
" _sample = random.sample\n",
|
327 |
+
" samp_neighs = [_set(_sample(to_neigh, \n",
|
328 |
+
" num_sample,\n",
|
329 |
+
" )) if len(to_neigh) >= num_sample else to_neigh for to_neigh in to_neighs]\n",
|
330 |
+
" else:\n",
|
331 |
+
" samp_neighs = to_neighs\n",
|
332 |
+
"\n",
|
333 |
+
" if self.gcn:\n",
|
334 |
+
" samp_neighs = [samp_neigh + set([nodes[i]]) for i, samp_neigh in enumerate(samp_neighs)]\n",
|
335 |
+
" unique_nodes_list = list(set.union(*samp_neighs))\n",
|
336 |
+
" unique_nodes = {n:i for i,n in enumerate(unique_nodes_list)}\n",
|
337 |
+
" mask = Variable(torch.zeros(len(samp_neighs), len(unique_nodes)))\n",
|
338 |
+
" column_indices = [unique_nodes[n] for samp_neigh in samp_neighs for n in samp_neigh] \n",
|
339 |
+
" row_indices = [i for i in range(len(samp_neighs)) for j in range(len(samp_neighs[i]))]\n",
|
340 |
+
" mask[row_indices, column_indices] = 1\n",
|
341 |
+
" if self.cuda:\n",
|
342 |
+
" mask = mask.cuda()\n",
|
343 |
+
" num_neigh = mask.sum(1, keepdim=True)\n",
|
344 |
+
" mask = mask.div(num_neigh)\n",
|
345 |
+
" if self.cuda:\n",
|
346 |
+
" embed_matrix = self.features(torch.LongTensor(unique_nodes_list).cuda())\n",
|
347 |
+
" else:\n",
|
348 |
+
" embed_matrix = self.features(torch.LongTensor(unique_nodes_list))\n",
|
349 |
+
" to_feats = mask.mm(embed_matrix)\n",
|
350 |
+
" return to_feats\n",
|
351 |
+
"\n",
|
352 |
+
"class Encoder(nn.Module):\n",
|
353 |
+
" \"\"\"\n",
|
354 |
+
" Encodes a node's using 'convolutional' GraphSage approach\n",
|
355 |
+
" \"\"\"\n",
|
356 |
+
" def __init__(self, features, feature_dim, \n",
|
357 |
+
" embed_dim, adj_lists, aggregator,\n",
|
358 |
+
" num_sample=10,\n",
|
359 |
+
" base_model=None, gcn=False, cuda=False, \n",
|
360 |
+
" feature_transform=False): \n",
|
361 |
+
" super(Encoder, self).__init__()\n",
|
362 |
+
"\n",
|
363 |
+
" self.features = features\n",
|
364 |
+
" self.feat_dim = feature_dim\n",
|
365 |
+
" self.adj_lists = adj_lists\n",
|
366 |
+
" self.aggregator = aggregator\n",
|
367 |
+
" self.num_sample = num_sample\n",
|
368 |
+
" if base_model != None:\n",
|
369 |
+
" self.base_model = base_model\n",
|
370 |
+
"\n",
|
371 |
+
" self.gcn = gcn\n",
|
372 |
+
" self.embed_dim = embed_dim\n",
|
373 |
+
" self.cuda = cuda\n",
|
374 |
+
" self.aggregator.cuda = cuda\n",
|
375 |
+
" self.weight = nn.Parameter(\n",
|
376 |
+
" torch.FloatTensor(embed_dim, self.feat_dim if self.gcn else 2 * self.feat_dim))\n",
|
377 |
+
" init.xavier_uniform(self.weight)\n",
|
378 |
+
"\n",
|
379 |
+
" def forward(self, nodes):\n",
|
380 |
+
" \"\"\"\n",
|
381 |
+
" Generates embeddings for a batch of nodes.\n",
|
382 |
+
" nodes -- list of nodes\n",
|
383 |
+
" \"\"\"\n",
|
384 |
+
" neigh_feats = self.aggregator.forward(nodes,\n",
|
385 |
+
" [self.adj_lists[int(node)] for node in nodes], self.num_sample)\n",
|
386 |
+
" if not self.gcn:\n",
|
387 |
+
" if self.cuda:\n",
|
388 |
+
" self_feats = self.features(torch.LongTensor(nodes).cuda())\n",
|
389 |
+
" else:\n",
|
390 |
+
" self_feats = self.features(torch.LongTensor(nodes))\n",
|
391 |
+
" combined = torch.cat([self_feats, neigh_feats], dim=1)\n",
|
392 |
+
" else:\n",
|
393 |
+
" combined = neigh_feats\n",
|
394 |
+
" combined = F.relu(self.weight.mm(combined.t()))\n",
|
395 |
+
" return combined\n",
|
396 |
+
"\n",
|
397 |
+
"\n",
|
398 |
+
"class SupervisedGraphSage(nn.Module):\n",
|
399 |
+
"\n",
|
400 |
+
" def __init__(self, num_classes, enc):\n",
|
401 |
+
" super(SupervisedGraphSage, self).__init__()\n",
|
402 |
+
" self.enc = enc\n",
|
403 |
+
" self.xent = nn.CrossEntropyLoss()\n",
|
404 |
+
"\n",
|
405 |
+
" self.weight = nn.Parameter(torch.FloatTensor(num_classes, enc.embed_dim))\n",
|
406 |
+
" init.xavier_uniform(self.weight)\n",
|
407 |
+
"\n",
|
408 |
+
" def forward(self, nodes):\n",
|
409 |
+
" embeds = self.enc(nodes)\n",
|
410 |
+
" scores = self.weight.mm(embeds)\n",
|
411 |
+
" return scores.t()\n",
|
412 |
+
"\n",
|
413 |
+
" def loss(self, nodes, labels):\n",
|
414 |
+
" scores = self.forward(nodes)\n",
|
415 |
+
" return self.xent(scores, labels.squeeze())"
|
416 |
+
]
|
417 |
+
},
|
418 |
+
{
|
419 |
+
"cell_type": "markdown",
|
420 |
+
"metadata": {},
|
421 |
+
"source": [
|
422 |
+
"## Load and Run"
|
423 |
+
]
|
424 |
+
},
|
425 |
+
{
|
426 |
+
"cell_type": "code",
|
427 |
+
"execution_count": 3,
|
428 |
+
"metadata": {},
|
429 |
+
"outputs": [
|
430 |
+
{
|
431 |
+
"name": "stderr",
|
432 |
+
"output_type": "stream",
|
433 |
+
"text": [
|
434 |
+
"/home/solsec/anaconda3/envs/GCN/lib/python3.7/site-packages/ipykernel_launcher.py:84: UserWarning: nn.init.xavier_uniform is now deprecated in favor of nn.init.xavier_uniform_.\n",
|
435 |
+
"/home/solsec/anaconda3/envs/GCN/lib/python3.7/site-packages/ipykernel_launcher.py:113: UserWarning: nn.init.xavier_uniform is now deprecated in favor of nn.init.xavier_uniform_.\n"
|
436 |
+
]
|
437 |
+
},
|
438 |
+
{
|
439 |
+
"name": "stdout",
|
440 |
+
"output_type": "stream",
|
441 |
+
"text": [
|
442 |
+
"0 1.942228078842163\n",
|
443 |
+
"1 1.921658992767334\n",
|
444 |
+
"2 1.9006750583648682\n",
|
445 |
+
"3 1.873147964477539\n",
|
446 |
+
"4 1.833079218864441\n",
|
447 |
+
"5 1.793070912361145\n",
|
448 |
+
"6 1.7698112726211548\n",
|
449 |
+
"7 1.7396035194396973\n",
|
450 |
+
"8 1.6929861307144165\n",
|
451 |
+
"9 1.6441305875778198\n",
|
452 |
+
"10 1.5536351203918457\n",
|
453 |
+
"11 1.5488044023513794\n",
|
454 |
+
"12 1.4822677373886108\n",
|
455 |
+
"13 1.468451738357544\n",
|
456 |
+
"14 1.3974864482879639\n",
|
457 |
+
"15 1.3166505098342896\n",
|
458 |
+
"16 1.2732900381088257\n",
|
459 |
+
"17 1.195784330368042\n",
|
460 |
+
"18 1.0451050996780396\n",
|
461 |
+
"19 0.9867343306541443\n",
|
462 |
+
"20 0.9533907175064087\n",
|
463 |
+
"21 0.9308909177780151\n",
|
464 |
+
"22 0.8159271478652954\n",
|
465 |
+
"23 0.7914730906486511\n",
|
466 |
+
"24 0.7673667669296265\n",
|
467 |
+
"25 0.7801153063774109\n",
|
468 |
+
"26 0.677147626876831\n",
|
469 |
+
"27 0.6584917902946472\n",
|
470 |
+
"28 0.6916540861129761\n",
|
471 |
+
"29 0.7556794881820679\n",
|
472 |
+
"30 0.7246103882789612\n",
|
473 |
+
"31 1.0994600057601929\n",
|
474 |
+
"32 0.8346526622772217\n",
|
475 |
+
"33 1.0626455545425415\n",
|
476 |
+
"34 0.5540371537208557\n",
|
477 |
+
"35 0.4707820415496826\n",
|
478 |
+
"36 0.47333627939224243\n",
|
479 |
+
"37 0.4838956296443939\n",
|
480 |
+
"38 0.4711683988571167\n",
|
481 |
+
"39 0.4963235855102539\n",
|
482 |
+
"40 0.48719295859336853\n",
|
483 |
+
"41 0.4026302695274353\n",
|
484 |
+
"42 0.35586124658584595\n",
|
485 |
+
"43 0.4207482933998108\n",
|
486 |
+
"44 0.41222259402275085\n",
|
487 |
+
"45 0.3622773289680481\n",
|
488 |
+
"46 0.33898842334747314\n",
|
489 |
+
"47 0.3108625113964081\n",
|
490 |
+
"48 0.34005632996559143\n",
|
491 |
+
"49 0.38214144110679626\n",
|
492 |
+
"50 0.314105749130249\n",
|
493 |
+
"51 0.3763721287250519\n",
|
494 |
+
"52 0.33562469482421875\n",
|
495 |
+
"53 0.40695565938949585\n",
|
496 |
+
"54 0.29900142550468445\n",
|
497 |
+
"55 0.36123421788215637\n",
|
498 |
+
"56 0.3518748879432678\n",
|
499 |
+
"57 0.3004622459411621\n",
|
500 |
+
"58 0.31813153624534607\n",
|
501 |
+
"59 0.25553494691848755\n",
|
502 |
+
"60 0.30214229226112366\n",
|
503 |
+
"61 0.30288413166999817\n",
|
504 |
+
"62 0.35318124294281006\n",
|
505 |
+
"63 0.2550695240497589\n",
|
506 |
+
"64 0.24285988509655\n",
|
507 |
+
"65 0.2586570382118225\n",
|
508 |
+
"66 0.27572184801101685\n",
|
509 |
+
"67 0.30874624848365784\n",
|
510 |
+
"68 0.25411731004714966\n",
|
511 |
+
"69 0.24063177406787872\n",
|
512 |
+
"70 0.2535572648048401\n",
|
513 |
+
"71 0.19541779160499573\n",
|
514 |
+
"72 0.20859725773334503\n",
|
515 |
+
"73 0.1995910108089447\n",
|
516 |
+
"74 0.20250269770622253\n",
|
517 |
+
"75 0.2077709287405014\n",
|
518 |
+
"76 0.20552675426006317\n",
|
519 |
+
"77 0.19936150312423706\n",
|
520 |
+
"78 0.24609258770942688\n",
|
521 |
+
"79 0.1969422698020935\n",
|
522 |
+
"80 0.19751787185668945\n",
|
523 |
+
"81 0.20629757642745972\n",
|
524 |
+
"82 0.19819925725460052\n",
|
525 |
+
"83 0.20762889087200165\n",
|
526 |
+
"84 0.17974525690078735\n",
|
527 |
+
"85 0.16918545961380005\n",
|
528 |
+
"86 0.2033073604106903\n",
|
529 |
+
"87 0.11312698572874069\n",
|
530 |
+
"88 0.19385862350463867\n",
|
531 |
+
"89 0.19625785946846008\n",
|
532 |
+
"90 0.20826341211795807\n",
|
533 |
+
"91 0.18184316158294678\n",
|
534 |
+
"92 0.17827709019184113\n",
|
535 |
+
"93 0.19169804453849792\n",
|
536 |
+
"94 0.1731080412864685\n",
|
537 |
+
"95 0.18547363579273224\n",
|
538 |
+
"96 0.13688258826732635\n",
|
539 |
+
"97 0.1454528272151947\n",
|
540 |
+
"98 0.18186761438846588\n",
|
541 |
+
"99 0.1714990884065628\n",
|
542 |
+
"Validation F1: 0.842\n",
|
543 |
+
"Average batch time: 0.04003107070922852\n"
|
544 |
+
]
|
545 |
+
}
|
546 |
+
],
|
547 |
+
"source": [
|
548 |
+
"def load_cora():\n",
|
549 |
+
" num_nodes = 2708\n",
|
550 |
+
" num_feats = 1433\n",
|
551 |
+
" feat_data = np.zeros((num_nodes, num_feats))\n",
|
552 |
+
" labels = np.empty((num_nodes,1), dtype=np.int64)\n",
|
553 |
+
" node_map = {}\n",
|
554 |
+
" label_map = {}\n",
|
555 |
+
" with open(\"./cora/cora.content\") as fp:\n",
|
556 |
+
" for i,line in enumerate(fp):\n",
|
557 |
+
" info = line.strip().split()\n",
|
558 |
+
" feat_data[i,:] = [float(x) for x in info[1:-1]]\n",
|
559 |
+
" node_map[info[0]] = i\n",
|
560 |
+
" if not info[-1] in label_map:\n",
|
561 |
+
" label_map[info[-1]] = len(label_map)\n",
|
562 |
+
" labels[i] = label_map[info[-1]]\n",
|
563 |
+
"\n",
|
564 |
+
" adj_lists = defaultdict(set)\n",
|
565 |
+
" with open(\"./cora/cora.cites\") as fp:\n",
|
566 |
+
" for i,line in enumerate(fp):\n",
|
567 |
+
" info = line.strip().split()\n",
|
568 |
+
" paper1 = node_map[info[0]]\n",
|
569 |
+
" paper2 = node_map[info[1]]\n",
|
570 |
+
" adj_lists[paper1].add(paper2)\n",
|
571 |
+
" adj_lists[paper2].add(paper1)\n",
|
572 |
+
" return feat_data, labels, adj_lists\n",
|
573 |
+
"\n",
|
574 |
+
"def run_cora():\n",
|
575 |
+
" np.random.seed(1)\n",
|
576 |
+
" random.seed(1)\n",
|
577 |
+
" \n",
|
578 |
+
" num_nodes = 2708\n",
|
579 |
+
" feat_data, labels, adj_lists = load_cora()\n",
|
580 |
+
" \n",
|
581 |
+
" features = nn.Embedding(2708, 1433)\n",
|
582 |
+
" features.weight = nn.Parameter(torch.FloatTensor(feat_data),\n",
|
583 |
+
" requires_grad=False)\n",
|
584 |
+
"\n",
|
585 |
+
" agg1 = MeanAggregator(features, cuda=True)\n",
|
586 |
+
" \n",
|
587 |
+
" enc1 = Encoder(features, 1433, 128, adj_lists, agg1, gcn=True,\n",
|
588 |
+
" cuda=False)\n",
|
589 |
+
" agg2 = MeanAggregator(lambda nodes : enc1(nodes).t(),\n",
|
590 |
+
" cuda=False)\n",
|
591 |
+
" enc2 = Encoder(lambda nodes : enc1(nodes).t(),\n",
|
592 |
+
" enc1.embed_dim, 128, adj_lists, agg2, \n",
|
593 |
+
" base_model=enc1, gcn=True, cuda=False)\n",
|
594 |
+
" \n",
|
595 |
+
" enc1.num_samples = 5\n",
|
596 |
+
" enc2.num_samples = 5\n",
|
597 |
+
"\n",
|
598 |
+
" graphsage = SupervisedGraphSage(7, enc2)\n",
|
599 |
+
" rand_indices = np.random.permutation(num_nodes)\n",
|
600 |
+
" test = rand_indices[:1000]\n",
|
601 |
+
" val = rand_indices[1000:1500]\n",
|
602 |
+
" train = list(rand_indices[1500:])\n",
|
603 |
+
"\n",
|
604 |
+
" optimizer = torch.optim.SGD(filter(lambda p : p.requires_grad,\n",
|
605 |
+
" graphsage.parameters()), lr=0.7)\n",
|
606 |
+
" times = []\n",
|
607 |
+
" \n",
|
608 |
+
" for batch in range(100):\n",
|
609 |
+
" batch_nodes = train[:256]\n",
|
610 |
+
" random.shuffle(train)\n",
|
611 |
+
" start_time = time.time()\n",
|
612 |
+
" optimizer.zero_grad()\n",
|
613 |
+
" loss = graphsage.loss(batch_nodes, \n",
|
614 |
+
" Variable(torch.LongTensor(labels[np.array(batch_nodes)])))\n",
|
615 |
+
" loss.backward()\n",
|
616 |
+
" optimizer.step()\n",
|
617 |
+
" end_time = time.time()\n",
|
618 |
+
" times.append(end_time-start_time)\n",
|
619 |
+
" print (batch, loss.item())\n",
|
620 |
+
"\n",
|
621 |
+
" val_output = graphsage.forward(val)\n",
|
622 |
+
" \n",
|
623 |
+
" print (\"Validation F1:\", f1_score(labels[val],\n",
|
624 |
+
" val_output.data.numpy().argmax(axis=1),\n",
|
625 |
+
" average=\"micro\"))\n",
|
626 |
+
" \n",
|
627 |
+
" print (\"Average batch time:\", np.mean(times))\n",
|
628 |
+
"\n",
|
629 |
+
"\n",
|
630 |
+
"if __name__ == \"__main__\":\n",
|
631 |
+
" run_cora()"
|
632 |
+
]
|
633 |
+
},
|
634 |
+
{
|
635 |
+
"cell_type": "markdown",
|
636 |
+
"metadata": {},
|
637 |
+
"source": [
|
638 |
+
"## References\n",
|
639 |
+
"- https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf\n",
|
640 |
+
"- [Graph Node Embedding Algorithms (Stanford - Fall 2019) by Jure Leskovec](https://www.youtube.com/watch?v=7JELX6DiUxQ)\n",
|
641 |
+
"- [Jure Leskovec: \"Large-scale Graph Representation Learning\"](https://www.youtube.com/watch?v=oQL4E1gK3VU)\n",
|
642 |
+
"- [Jure Leskovec \"Deep Learning on Graphs\"\n",
|
643 |
+
"](https://www.youtube.com/watch?v=MIAbDNAxChI)"
|
644 |
+
]
|
645 |
+
}
|
646 |
+
],
|
647 |
+
"metadata": {
|
648 |
+
"kernelspec": {
|
649 |
+
"display_name": "Python 3",
|
650 |
+
"language": "python",
|
651 |
+
"name": "python3"
|
652 |
+
},
|
653 |
+
"language_info": {
|
654 |
+
"codemirror_mode": {
|
655 |
+
"name": "ipython",
|
656 |
+
"version": 3
|
657 |
+
},
|
658 |
+
"file_extension": ".py",
|
659 |
+
"mimetype": "text/x-python",
|
660 |
+
"name": "python",
|
661 |
+
"nbconvert_exporter": "python",
|
662 |
+
"pygments_lexer": "ipython3",
|
663 |
+
"version": "3.7.5"
|
664 |
+
}
|
665 |
+
},
|
666 |
+
"nbformat": 4,
|
667 |
+
"nbformat_minor": 2
|
668 |
+
}
|
Image/utils/train_utils.py
CHANGED
@@ -99,20 +99,27 @@ def collect_embeddings(model, dataloader, device):
|
|
99 |
|
100 |
# 分析所有层的输出维度
|
101 |
for name, feat in activation.items():
|
102 |
-
if feat is None
|
103 |
continue
|
104 |
-
|
105 |
-
|
106 |
-
if target_dim_range[0] <= flat_dim <= target_dim_range[1]:
|
107 |
suitable_layer_name = name
|
108 |
-
suitable_dim =
|
109 |
-
break
|
110 |
|
111 |
if suitable_layer_name is None:
|
112 |
-
raise ValueError(
|
113 |
|
114 |
print(f"选择的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}")
|
115 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
# 清除第一次运行的激活值
|
117 |
activation.clear()
|
118 |
|
@@ -268,7 +275,7 @@ def train_model(model, trainloader, testloader, epochs=200, lr=0.1, device='cuda
|
|
268 |
f'Test Acc: {acc:.2f}%')
|
269 |
|
270 |
# 每5个epoch保存一次
|
271 |
-
if (epoch + 1) %
|
272 |
# 创建epoch保存目录
|
273 |
epoch_dir = os.path.join(save_dir, f'epoch_{epoch+1}')
|
274 |
if not os.path.exists(epoch_dir):
|
|
|
99 |
|
100 |
# 分析所有层的输出维度
|
101 |
for name, feat in activation.items():
|
102 |
+
if feat is None:
|
103 |
continue
|
104 |
+
feat_dim = feat.view(feat.size(0), -1).size(1)
|
105 |
+
if target_dim_range[0] <= feat_dim <= target_dim_range[1]:
|
|
|
106 |
suitable_layer_name = name
|
107 |
+
suitable_dim = feat_dim
|
108 |
+
break
|
109 |
|
110 |
if suitable_layer_name is None:
|
111 |
+
raise ValueError("没有找到合适维度的特征层")
|
112 |
|
113 |
print(f"选择的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}")
|
114 |
|
115 |
+
# 只保存层标识和维度
|
116 |
+
layer_info = {
|
117 |
+
'layer_id': suitable_layer_name, # 使用完整的层标识,如 'conv2.0'
|
118 |
+
'dim': suitable_dim # 特征维度
|
119 |
+
}
|
120 |
+
with open('layer_info.json', 'w') as f:
|
121 |
+
json.dump(layer_info, f)
|
122 |
+
|
123 |
# 清除第一次运行的激活值
|
124 |
activation.clear()
|
125 |
|
|
|
275 |
f'Test Acc: {acc:.2f}%')
|
276 |
|
277 |
# 每5个epoch保存一次
|
278 |
+
if (epoch + 1) % 1 == 0:
|
279 |
# 创建epoch保存目录
|
280 |
epoch_dir = os.path.join(save_dir, f'epoch_{epoch+1}')
|
281 |
if not os.path.exists(epoch_dir):
|
count.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
|
3 |
-
# 请将这里的'your_file_path.npy'替换为你的.npy文件的实际路径
|
4 |
-
file_path = '/home/ruofei/RRF/hf-mirror/ttvnet/Image/AlexNet/model/0/epoch_5/train_data.npy'
|
5 |
-
|
6 |
-
# 读取.npy文件
|
7 |
-
data = np.load(file_path)
|
8 |
-
|
9 |
-
# 输出数组的维度
|
10 |
-
print("数组维度:", data.shape)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
feature_predict/AlexNet/code/layer_info.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"layer_id": "conv2", "dim": 1024}
|
feature_predict/AlexNet/code/model.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
AlexNet in Pytorch
|
3 |
+
'''
|
4 |
+
|
5 |
+
import torch
|
6 |
+
import torch.nn as nn
|
7 |
+
|
8 |
+
class AlexNet(nn.Module): # 训练 ALexNet
|
9 |
+
'''
|
10 |
+
AlexNet模型
|
11 |
+
'''
|
12 |
+
def __init__(self,num_classes=10):
|
13 |
+
super(AlexNet,self).__init__()
|
14 |
+
# 五个卷积层 输入 32 * 32 * 3
|
15 |
+
self.conv1 = nn.Sequential(
|
16 |
+
nn.Conv2d(in_channels=3, out_channels=6, kernel_size=3, stride=1, padding=1), # (32-3+2)/1+1 = 32
|
17 |
+
nn.ReLU(),
|
18 |
+
nn.MaxPool2d(kernel_size=2, stride=2, padding=0) # (32-2)/2+1 = 16
|
19 |
+
)
|
20 |
+
self.conv2 = nn.Sequential( # 输入 16 * 16 * 6
|
21 |
+
nn.Conv2d(in_channels=6, out_channels=16, kernel_size=3, stride=1, padding=1), # (16-3+2)/1+1 = 16
|
22 |
+
nn.ReLU(),
|
23 |
+
nn.MaxPool2d(kernel_size=2, stride=2, padding=0) # (16-2)/2+1 = 8
|
24 |
+
)
|
25 |
+
self.conv3 = nn.Sequential( # 输入 8 * 8 * 16
|
26 |
+
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1), # (8-3+2)/1+1 = 8
|
27 |
+
nn.ReLU(),
|
28 |
+
nn.MaxPool2d(kernel_size=2, stride=2, padding=0) # (8-2)/2+1 = 4
|
29 |
+
)
|
30 |
+
self.conv4 = nn.Sequential( # 输入 4 * 4 * 64
|
31 |
+
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1), # (4-3+2)/1+1 = 4
|
32 |
+
nn.ReLU(),
|
33 |
+
nn.MaxPool2d(kernel_size=2, stride=2, padding=0) # (4-2)/2+1 = 2
|
34 |
+
)
|
35 |
+
self.conv5 = nn.Sequential( # 输入 2 * 2 * 128
|
36 |
+
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),# (2-3+2)/1+1 = 2
|
37 |
+
nn.ReLU(),
|
38 |
+
nn.MaxPool2d(kernel_size=2, stride=2, padding=0) # (2-2)/2+1 = 1
|
39 |
+
) # 最后一层卷积层,输出 1 * 1 * 128
|
40 |
+
# 全连接层
|
41 |
+
self.dense = nn.Sequential(
|
42 |
+
nn.Linear(128,120),
|
43 |
+
nn.ReLU(),
|
44 |
+
nn.Linear(120,84),
|
45 |
+
nn.ReLU(),
|
46 |
+
nn.Linear(84,num_classes)
|
47 |
+
)
|
48 |
+
|
49 |
+
# 初始化权重
|
50 |
+
self._initialize_weights()
|
51 |
+
|
52 |
+
def forward(self,x):
|
53 |
+
x = self.conv1(x)
|
54 |
+
x = self.conv2(x)
|
55 |
+
x = self.conv3(x)
|
56 |
+
x = self.conv4(x)
|
57 |
+
x = self.conv5(x)
|
58 |
+
x = x.view(x.size()[0],-1)
|
59 |
+
x = self.dense(x)
|
60 |
+
return x
|
61 |
+
|
62 |
+
def _initialize_weights(self):
|
63 |
+
for m in self.modules():
|
64 |
+
if isinstance(m, nn.Conv2d):
|
65 |
+
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
66 |
+
if m.bias is not None:
|
67 |
+
nn.init.constant_(m.bias, 0)
|
68 |
+
elif isinstance(m, nn.Linear):
|
69 |
+
nn.init.normal_(m.weight, 0, 0.01)
|
70 |
+
if m.bias is not None:
|
71 |
+
nn.init.constant_(m.bias, 0)
|
72 |
+
|
73 |
+
def predict(self,x):
|
74 |
+
x = self.conv3(x)
|
75 |
+
x = self.conv4(x)
|
76 |
+
x = self.conv5(x)
|
77 |
+
x = x.view(x.size()[0],-1)
|
78 |
+
x = self.dense(x)
|
79 |
+
return x
|
80 |
+
|
81 |
+
def test():
|
82 |
+
net = AlexNet()
|
83 |
+
x = torch.randn(2,3,32,32)
|
84 |
+
y = net(x)
|
85 |
+
print(y.size())
|
86 |
+
from torchinfo import summary
|
87 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
88 |
+
net = net.to(device)
|
89 |
+
summary(net,(3,32,32))
|
feature_predict/AlexNet/code/train.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
4 |
+
from utils.dataset_utils import get_cifar10_dataloaders
|
5 |
+
from utils.train_utils import train_model, train_model_data_augmentation, train_model_backdoor
|
6 |
+
from utils.parse_args import parse_args
|
7 |
+
from model import AlexNet
|
8 |
+
#args.train_type #0 for normal train, 1 for data aug train,2 for back door train
|
9 |
+
|
10 |
+
def main():
|
11 |
+
# 解析命令行参数
|
12 |
+
args = parse_args()
|
13 |
+
# 创建模型
|
14 |
+
model = AlexNet()
|
15 |
+
if args.train_type == '0':
|
16 |
+
# 获取数据加载器
|
17 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size, local_dataset_path=args.dataset_path)
|
18 |
+
# 训练模型
|
19 |
+
train_model(
|
20 |
+
model=model,
|
21 |
+
trainloader=trainloader,
|
22 |
+
testloader=testloader,
|
23 |
+
epochs=args.epochs,
|
24 |
+
lr=args.lr,
|
25 |
+
device=f'cuda:{args.gpu}',
|
26 |
+
save_dir='../model',
|
27 |
+
model_name='alexnet'
|
28 |
+
)
|
29 |
+
elif args.train_type == '1':
|
30 |
+
train_model_data_augmentation(model, epochs=args.epochs, lr=args.lr, device=f'cuda:{args.gpu}',
|
31 |
+
save_dir='../model', model_name='alexnet',
|
32 |
+
batch_size=args.batch_size, num_workers=args.num_workers,
|
33 |
+
local_dataset_path=args.dataset_path)
|
34 |
+
elif args.train_type == '2':
|
35 |
+
train_model_backdoor(model, poison_ratio=0.1, target_label=0, epochs=args.epochs, lr=args.lr,
|
36 |
+
device=f'cuda:{args.gpu}', save_dir='../model', model_name='alexnet',
|
37 |
+
batch_size=args.batch_size, num_workers=args.num_workers,
|
38 |
+
local_dataset_path=args.dataset_path)
|
39 |
+
|
40 |
+
if __name__ == '__main__':
|
41 |
+
main()
|
feature_predict/AlexNet/dataset/.gitkeep
ADDED
File without changes
|
feature_predict/AlexNet/model/.gitkeep
ADDED
File without changes
|
feature_predict/AlexNet/model/0/epoch_195/index.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
feature_predict/AlexNet/model/0/epoch_195/subject_model.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:662e4f4b36359b75f709e91794ea0f37104f081a17f54b0d358137370c5d52a7
|
3 |
+
size 504030
|
feature_predict/AlexNet/model/0/epoch_195/train_data.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:22583767b1623d5ef64066148a95cb7bf0f1715150345cb0507e3270d2f6c7af
|
3 |
+
size 204800128
|
feature_predict/AlexNet/model/0/epoch_200/index.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
feature_predict/AlexNet/model/0/epoch_200/subject_model.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:97313abcce856d4005f78f9ac4f9b71c4602e97b7a127637ec1da53fd6162ced
|
3 |
+
size 504030
|
feature_predict/AlexNet/model/0/epoch_200/train_data.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:01249212812bb6039cf61d93833b01f448da56f1c3bfccbd54a6873909349010
|
3 |
+
size 204800128
|
feature_predict/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from .feature_predictor import FeaturePredictor, predict_feature
|
2 |
+
|
3 |
+
__all__ = ['FeaturePredictor', 'predict_feature']
|
feature_predict/feature_predictor.py
ADDED
@@ -0,0 +1,410 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
特征预测器模块
|
3 |
+
|
4 |
+
该模块使用钩子机制从模型的中间层特征向量预测分类结果
|
5 |
+
"""
|
6 |
+
|
7 |
+
import torch
|
8 |
+
import torch.nn as nn
|
9 |
+
import json
|
10 |
+
import os
|
11 |
+
import torch.nn.functional as F
|
12 |
+
import numpy as np
|
13 |
+
from typing import Type, Union, Optional
|
14 |
+
|
15 |
+
class FeaturePredictor:
|
16 |
+
def __init__(self, model_class, model_weights_path, layer_info_path, device='cuda' if torch.cuda.is_available() else 'cpu'):
|
17 |
+
"""
|
18 |
+
初始化特征预测器
|
19 |
+
|
20 |
+
Args:
|
21 |
+
model_class: 模型类
|
22 |
+
model_weights_path: 模型权重文件路径
|
23 |
+
layer_info_path: 层信息文件路径
|
24 |
+
device: 运行设备
|
25 |
+
"""
|
26 |
+
self.device = device
|
27 |
+
self.model = model_class().to(device)
|
28 |
+
self.model.load_state_dict(torch.load(model_weights_path, map_location=device, weights_only=True))
|
29 |
+
self.model.eval()
|
30 |
+
# 加载层信息
|
31 |
+
with open(layer_info_path, 'r') as f:
|
32 |
+
layer_info = json.load(f)
|
33 |
+
self.target_layer = layer_info['layer_id']
|
34 |
+
self.feature_dim = layer_info['dim']
|
35 |
+
|
36 |
+
# 初始化变量
|
37 |
+
self.output_shape = None
|
38 |
+
self.inject_feature = None
|
39 |
+
self.handles = []
|
40 |
+
self.layer_name_map = {}
|
41 |
+
|
42 |
+
# 用于调试的变量
|
43 |
+
self.last_normalized_feature = None
|
44 |
+
self.last_reshaped_feature = None
|
45 |
+
self.last_layer_outputs = {}
|
46 |
+
|
47 |
+
# 注册钩子
|
48 |
+
self.register_hooks()
|
49 |
+
|
50 |
+
# 运行一次前向传播来获取形状
|
51 |
+
self._get_output_shape()
|
52 |
+
|
53 |
+
def _get_output_shape(self):
|
54 |
+
"""运行一次前向传播来获取目标层的输出形状"""
|
55 |
+
def shape_hook(module, input, output):
|
56 |
+
self.output_shape = output.shape[1:] # 不包括batch维度
|
57 |
+
print(f"[Init] 获取到目标层输出形状: {self.output_shape}")
|
58 |
+
return output
|
59 |
+
|
60 |
+
# 找到目标层并注册临时钩子
|
61 |
+
def find_layer(module, name=''):
|
62 |
+
for n, child in module.named_children():
|
63 |
+
current_name = f"{name}.{n}" if name else n
|
64 |
+
if current_name == self.target_layer:
|
65 |
+
handle = child.register_forward_hook(shape_hook)
|
66 |
+
return handle, True
|
67 |
+
else:
|
68 |
+
handle, found = find_layer(child, current_name)
|
69 |
+
if found:
|
70 |
+
return handle, True
|
71 |
+
return None, False
|
72 |
+
|
73 |
+
# 注册临时钩子
|
74 |
+
handle, found = find_layer(self.model)
|
75 |
+
if not found:
|
76 |
+
raise ValueError(f"未找到目标层: {self.target_layer}")
|
77 |
+
|
78 |
+
# 运行一次前向传播
|
79 |
+
with torch.no_grad():
|
80 |
+
dummy_input = torch.zeros(1, 3, 32, 32).to(self.device)
|
81 |
+
self.model(dummy_input)
|
82 |
+
|
83 |
+
# 移除临时钩子
|
84 |
+
handle.remove()
|
85 |
+
|
86 |
+
if self.output_shape is None:
|
87 |
+
raise RuntimeError("无法获取目标层的输出形状")
|
88 |
+
|
89 |
+
def register_hooks(self):
|
90 |
+
"""注册钩子函数,在目标层注入特征向量和监控每层输出"""
|
91 |
+
def print_tensor_info(name, tensor):
|
92 |
+
"""打印张量的统计信息"""
|
93 |
+
print(f"\n[Hook Debug] {name}:")
|
94 |
+
print(f"- 形状: {tensor.shape}")
|
95 |
+
print(f"- 数值范围: [{tensor.min().item():.4f}, {tensor.max().item():.4f}]")
|
96 |
+
print(f"- 均值: {tensor.mean().item():.4f}")
|
97 |
+
print(f"- 标准差: {tensor.std().item():.4f}")
|
98 |
+
|
99 |
+
def hook_fn(module, input, output):
|
100 |
+
"""钩子函数:输出层信息并在目标层注入特征"""
|
101 |
+
layer_name = self.layer_name_map.get(module, "未知层")
|
102 |
+
print(f"\n[Hook Debug] 层: {layer_name}")
|
103 |
+
print(f"- 类型: {type(module).__name__}")
|
104 |
+
|
105 |
+
# 输出输入信息
|
106 |
+
if input and len(input) > 0:
|
107 |
+
print_tensor_info("输入张量", input[0])
|
108 |
+
|
109 |
+
# 输出原始输出信息
|
110 |
+
print_tensor_info("输出张量", output)
|
111 |
+
|
112 |
+
# 如果是目标层且有注入特征,则替换输出
|
113 |
+
if layer_name == self.target_layer and self.inject_feature is not None:
|
114 |
+
print("\n[Hook Debug] 正在注入特征...")
|
115 |
+
print_tensor_info("注入特征", self.inject_feature)
|
116 |
+
print(f"[Hook Debug] 将层 {layer_name} 的输出从 {output.shape} 替换为注入特征 {self.inject_feature.shape}")
|
117 |
+
# 替换输出
|
118 |
+
output = self.inject_feature
|
119 |
+
print("[Hook Debug] 特征注入完成,将作为下一层的输入")
|
120 |
+
return output
|
121 |
+
|
122 |
+
return output
|
123 |
+
|
124 |
+
def hook_layer(module, name=''):
|
125 |
+
"""为每一层注册钩子"""
|
126 |
+
for n, child in module.named_children():
|
127 |
+
current_name = f"{name}.{n}" if name else n
|
128 |
+
# 保存层名到模块的映射
|
129 |
+
self.layer_name_map[child] = current_name
|
130 |
+
# 注册钩子
|
131 |
+
handle = child.register_forward_hook(hook_fn)
|
132 |
+
self.handles.append(handle)
|
133 |
+
# 递归处理子模块
|
134 |
+
hook_layer(child, current_name)
|
135 |
+
|
136 |
+
# 注册所有层的钩子
|
137 |
+
hook_layer(self.model)
|
138 |
+
print(f"[Debug] 钩子注册完成,共注册了 {len(self.handles)} 个钩子")
|
139 |
+
|
140 |
+
def reshape_feature(self, feature):
|
141 |
+
"""调整特征向量的形状"""
|
142 |
+
if self.output_shape is None:
|
143 |
+
raise RuntimeError("目标层的输出形状未初始化")
|
144 |
+
|
145 |
+
batch_size = feature.shape[0]
|
146 |
+
expected_dim = np.prod(self.output_shape)
|
147 |
+
|
148 |
+
# 检查输入特征维度是否正确
|
149 |
+
if feature.shape[1] != expected_dim:
|
150 |
+
raise ValueError(f"特征维度不匹配:预期 {expected_dim},实际 {feature.shape[1]}")
|
151 |
+
|
152 |
+
# 使用自动获取的形状重塑特征
|
153 |
+
new_shape = (batch_size,) + self.output_shape
|
154 |
+
print(f"[Debug] 调整特征形状: {feature.shape} -> {new_shape}")
|
155 |
+
return feature.view(new_shape)
|
156 |
+
|
157 |
+
def predict(self, feature):
|
158 |
+
"""使用给定的特征向量进行预测"""
|
159 |
+
print(f"\n[Debug] 开始预测,输入特征形状: {feature.shape}")
|
160 |
+
|
161 |
+
# 检查输入维度
|
162 |
+
if feature.shape[1] != self.feature_dim:
|
163 |
+
raise ValueError(f"特征维度不匹配:预期 {self.feature_dim},实际 {feature.shape[1]}")
|
164 |
+
|
165 |
+
# 将特征转移到正确的设备并重塑
|
166 |
+
feature = feature.to(self.device)
|
167 |
+
self.inject_feature = self.reshape_feature(feature)
|
168 |
+
|
169 |
+
# 使用虚拟输入进行预测
|
170 |
+
dummy_input = torch.zeros(feature.shape[0], 3, 32, 32).to(self.device)
|
171 |
+
|
172 |
+
# 进行前向传播(钩子会自动在目标层注入特征)
|
173 |
+
with torch.no_grad():
|
174 |
+
output = self.model(dummy_input)
|
175 |
+
|
176 |
+
# 清除注入的特征
|
177 |
+
self.inject_feature = None
|
178 |
+
|
179 |
+
return output
|
180 |
+
|
181 |
+
def predict_feature(
|
182 |
+
model: Type[nn.Module],
|
183 |
+
weight_path: str,
|
184 |
+
layer_info_path: str,
|
185 |
+
feature: Union[torch.Tensor, np.ndarray],
|
186 |
+
device: Optional[str] = None
|
187 |
+
) -> torch.Tensor:
|
188 |
+
"""
|
189 |
+
使用预训练模型预测特征向量的类别。
|
190 |
+
|
191 |
+
Args:
|
192 |
+
model: PyTorch模型类(不是实例)
|
193 |
+
weight_path: 模型权重文件路径
|
194 |
+
layer_info_path: 层信息配置文件路径
|
195 |
+
feature: 输入特征向量,可以是torch.Tensor或numpy.ndarray
|
196 |
+
device: 运行设备,可选 'cuda' 或 'cpu'。如果为None,将自动选择。
|
197 |
+
|
198 |
+
Returns:
|
199 |
+
torch.Tensor: 模型输出的预测结果
|
200 |
+
|
201 |
+
Raises:
|
202 |
+
ValueError: 如果输入特征维度不正确
|
203 |
+
FileNotFoundError: 如果权重文件或层信息文件不存在
|
204 |
+
RuntimeError: 如果模型加载或预测过程出错
|
205 |
+
"""
|
206 |
+
try:
|
207 |
+
# 检查文件是否存在
|
208 |
+
if not os.path.exists(weight_path):
|
209 |
+
raise FileNotFoundError(f"权重文件不存在: {weight_path}")
|
210 |
+
if not os.path.exists(layer_info_path):
|
211 |
+
raise FileNotFoundError(f"层信息文件不存在: {layer_info_path}")
|
212 |
+
|
213 |
+
# 确定设备
|
214 |
+
if device is None:
|
215 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
216 |
+
|
217 |
+
# 转换输入特征为torch.Tensor
|
218 |
+
if isinstance(feature, np.ndarray):
|
219 |
+
feature = torch.from_numpy(feature).float()
|
220 |
+
elif not isinstance(feature, torch.Tensor):
|
221 |
+
raise ValueError("输入特征必须是numpy数组或torch张量")
|
222 |
+
|
223 |
+
# 创建预测器实例
|
224 |
+
predictor = FeaturePredictor(
|
225 |
+
model_class=model,
|
226 |
+
model_weights_path=weight_path,
|
227 |
+
layer_info_path=layer_info_path,
|
228 |
+
device=device
|
229 |
+
)
|
230 |
+
|
231 |
+
# 进行预测
|
232 |
+
with torch.no_grad():
|
233 |
+
output = predictor.predict(feature)
|
234 |
+
|
235 |
+
return output
|
236 |
+
|
237 |
+
except Exception as e:
|
238 |
+
raise RuntimeError(f"预测过程出错: {str(e)}")
|
239 |
+
|
240 |
+
|
241 |
+
def test_predictor():
|
242 |
+
"""测试特征预测器的功能"""
|
243 |
+
from AlexNet.code.model import AlexNet
|
244 |
+
import os
|
245 |
+
import numpy as np
|
246 |
+
|
247 |
+
# 创建预测器实例
|
248 |
+
predictor = FeaturePredictor(
|
249 |
+
model_class=AlexNet,
|
250 |
+
model_weights_path='AlexNet/model/0/epoch_195/subject_model.pth',
|
251 |
+
layer_info_path='AlexNet/code/layer_info.json'
|
252 |
+
)
|
253 |
+
|
254 |
+
print("\n开始单点测试...")
|
255 |
+
|
256 |
+
# 生成一个测试点,使用较大的尺度以增加特征的差异性
|
257 |
+
feature = torch.randn(1, predictor.feature_dim) * 10.0
|
258 |
+
output = predictor.predict(feature)
|
259 |
+
probs = output.softmax(dim=1)
|
260 |
+
print("\n结果:",output)
|
261 |
+
# 显示最终预测结果
|
262 |
+
print("\n最终预测结果:")
|
263 |
+
top_k = torch.topk(probs[0], k=3)
|
264 |
+
for idx, (class_idx, prob) in enumerate(zip(top_k.indices.tolist(), top_k.values.tolist())):
|
265 |
+
print(f"Top-{idx+1}: 类别 {class_idx}, 概率 {prob:.4f}")
|
266 |
+
|
267 |
+
def test_predictor_from_train_data():
|
268 |
+
"""测试特征预测器的批量预测功能"""
|
269 |
+
from AlexNet.code.model import AlexNet
|
270 |
+
import numpy as np
|
271 |
+
import torch
|
272 |
+
|
273 |
+
print("\n开始处理训练数据集...")
|
274 |
+
# 创建预测器实例
|
275 |
+
predictor = FeaturePredictor(
|
276 |
+
model_class=AlexNet,
|
277 |
+
model_weights_path='AlexNet/model/0/epoch_195/subject_model.pth',
|
278 |
+
layer_info_path='AlexNet/code/layer_info.json'
|
279 |
+
)
|
280 |
+
|
281 |
+
# 加载训练数据
|
282 |
+
print("\n加载训练数据...")
|
283 |
+
features = np.load('AlexNet/model/0/epoch_195/train_data.npy')
|
284 |
+
print(f"数据形状: {features.shape}")
|
285 |
+
|
286 |
+
# 转换为tensor
|
287 |
+
features = torch.from_numpy(features).float()
|
288 |
+
|
289 |
+
# 批量处理
|
290 |
+
batch_size = 100
|
291 |
+
num_samples = len(features)
|
292 |
+
num_batches = (num_samples + batch_size - 1) // batch_size
|
293 |
+
|
294 |
+
# 用于统计结果
|
295 |
+
all_predictions = []
|
296 |
+
class_counts = {}
|
297 |
+
|
298 |
+
print("\n开始批量预测...")
|
299 |
+
with torch.no_grad():
|
300 |
+
for i in range(num_batches):
|
301 |
+
start_idx = i * batch_size
|
302 |
+
end_idx = min((i + 1) * batch_size, num_samples)
|
303 |
+
batch_features = features[start_idx:end_idx]
|
304 |
+
|
305 |
+
# 使用预测器进行预测
|
306 |
+
outputs = predictor.predict(batch_features)
|
307 |
+
predictions = outputs.argmax(dim=1).cpu().numpy()
|
308 |
+
|
309 |
+
# 更新统计信息
|
310 |
+
for pred in predictions:
|
311 |
+
class_counts[int(pred)] = class_counts.get(int(pred), 0) + 1
|
312 |
+
|
313 |
+
all_predictions.extend(predictions)
|
314 |
+
|
315 |
+
# 打印进度和当前批次的预测分布
|
316 |
+
if (i + 1) % 10 == 0:
|
317 |
+
print(f"\n已处理: {end_idx}/{num_samples} 个样本")
|
318 |
+
batch_unique, batch_counts = np.unique(predictions, return_counts=True)
|
319 |
+
print("当前批次预测分布:")
|
320 |
+
for class_idx, count in zip(batch_unique, batch_counts):
|
321 |
+
print(f"类别 {class_idx}: {count} 个样本 ({count/len(predictions)*100:.2f}%)")
|
322 |
+
|
323 |
+
# 打印总体统计结果
|
324 |
+
print("\n最终预测结果统计:")
|
325 |
+
total_samples = len(all_predictions)
|
326 |
+
for class_idx in sorted(class_counts.keys()):
|
327 |
+
count = class_counts[class_idx]
|
328 |
+
percentage = (count / total_samples) * 100
|
329 |
+
print(f"类别 {class_idx}: {count} 个样本 ({percentage:.2f}%)")
|
330 |
+
|
331 |
+
def test_train_data():
|
332 |
+
"""测试训练数据集的预测结果分布"""
|
333 |
+
from AlexNet.code.model import AlexNet
|
334 |
+
import numpy as np
|
335 |
+
import torch
|
336 |
+
import torch.nn.functional as F
|
337 |
+
|
338 |
+
print("\n开始处理训练数据集...")
|
339 |
+
|
340 |
+
# 初始化模型
|
341 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
342 |
+
model = AlexNet().to(device)
|
343 |
+
model.load_state_dict(torch.load('AlexNet/model/0/epoch_195/subject_model.pth',
|
344 |
+
map_location=device, weights_only=True))
|
345 |
+
model.eval()
|
346 |
+
|
347 |
+
# 加载训练数据
|
348 |
+
print("加载训练数据...")
|
349 |
+
features = np.load('AlexNet/model/0/epoch_195/train_data.npy')
|
350 |
+
print(f"数据形状: {features.shape}")
|
351 |
+
|
352 |
+
# 转换为tensor
|
353 |
+
features = torch.from_numpy(features).float().to(device)
|
354 |
+
|
355 |
+
# 批量处理
|
356 |
+
batch_size = 100
|
357 |
+
num_samples = len(features)
|
358 |
+
num_batches = (num_samples + batch_size - 1) // batch_size
|
359 |
+
|
360 |
+
# 用于统计结果
|
361 |
+
all_predictions = []
|
362 |
+
class_counts = {}
|
363 |
+
|
364 |
+
print("\n开始批量预测...")
|
365 |
+
with torch.no_grad():
|
366 |
+
for i in range(num_batches):
|
367 |
+
start_idx = i * batch_size
|
368 |
+
end_idx = min((i + 1) * batch_size, num_samples)
|
369 |
+
batch_features = features[start_idx:end_idx]
|
370 |
+
|
371 |
+
# 将特征重塑为[batch_size, 16, 8, 8]
|
372 |
+
reshaped_features = batch_features.view(-1, 16, 8, 8)
|
373 |
+
|
374 |
+
# 使用模型的predict函数
|
375 |
+
outputs = model.predict(reshaped_features)
|
376 |
+
predictions = outputs.argmax(dim=1).cpu().numpy()
|
377 |
+
|
378 |
+
# 更新统计信息
|
379 |
+
for pred in predictions:
|
380 |
+
class_counts[int(pred)] = class_counts.get(int(pred), 0) + 1
|
381 |
+
|
382 |
+
all_predictions.extend(predictions)
|
383 |
+
|
384 |
+
# 打印进度
|
385 |
+
if (i + 1) % 10 == 0:
|
386 |
+
print(f"已处理: {end_idx}/{num_samples} 个样本")
|
387 |
+
|
388 |
+
# 打印统计结果
|
389 |
+
print("\n预测结果统计:")
|
390 |
+
total_samples = len(all_predictions)
|
391 |
+
for class_idx in sorted(class_counts.keys()):
|
392 |
+
count = class_counts[class_idx]
|
393 |
+
percentage = (count / total_samples) * 100
|
394 |
+
print(f"类别 {class_idx}: {count} 个样本 ({percentage:.2f}%)")
|
395 |
+
|
396 |
+
# 保存详细结果
|
397 |
+
print("\n保存详细结果...")
|
398 |
+
results = {
|
399 |
+
'predictions': all_predictions,
|
400 |
+
'class_counts': class_counts
|
401 |
+
}
|
402 |
+
# np.save('prediction_results.npy', results)
|
403 |
+
# print("结果已保存到 prediction_results.npy")
|
404 |
+
|
405 |
+
|
406 |
+
|
407 |
+
if __name__ == "__main__":
|
408 |
+
test_predictor()
|
409 |
+
# test_predictor_from_train_data()
|
410 |
+
# test_train_data()
|
feature_predict/test_feature.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from AlexNet.code.model import AlexNet
|
2 |
+
import torch
|
3 |
+
import numpy as np
|
4 |
+
from feature_predictor import predict_feature
|
5 |
+
|
6 |
+
def test_single_feature():
|
7 |
+
"""测试单个特征向量的预测"""
|
8 |
+
print("\n开始单特征测试...")
|
9 |
+
|
10 |
+
# 生成测试特征
|
11 |
+
feature_dim = 1024 # 特征维度
|
12 |
+
feature = torch.randn(1, feature_dim) * 10.0 # 使用较大的尺度
|
13 |
+
|
14 |
+
# 使用predict_feature函数进行预测
|
15 |
+
output = predict_feature(
|
16 |
+
model=AlexNet,
|
17 |
+
weight_path='AlexNet/model/0/epoch_195/subject_model.pth',
|
18 |
+
layer_info_path='AlexNet/code/layer_info.json',
|
19 |
+
feature=feature
|
20 |
+
)
|
21 |
+
|
22 |
+
# 计算概率
|
23 |
+
probs = output.softmax(dim=1)
|
24 |
+
|
25 |
+
# 显示原始输出和预测结果
|
26 |
+
print("\n原始输出:", output)
|
27 |
+
print("\n最终预测结果:")
|
28 |
+
top_k = torch.topk(probs[0], k=3)
|
29 |
+
for idx, (class_idx, prob) in enumerate(zip(top_k.indices.tolist(), top_k.values.tolist())):
|
30 |
+
print(f"Top-{idx+1}: 类别 {class_idx}, 概率 {prob:.4f}")
|
31 |
+
|
32 |
+
def test_train_data():
|
33 |
+
"""测试训练数据集的预测"""
|
34 |
+
print("\n开始训练数据测试...")
|
35 |
+
|
36 |
+
# 加载训练数据
|
37 |
+
print("加载训练数据...")
|
38 |
+
features = np.load('AlexNet/model/0/epoch_195/train_data.npy')
|
39 |
+
print(f"数据形状: {features.shape}")
|
40 |
+
|
41 |
+
# 批量处理
|
42 |
+
batch_size = 100
|
43 |
+
num_samples = len(features)
|
44 |
+
num_batches = (num_samples + batch_size - 1) // batch_size
|
45 |
+
|
46 |
+
# 用于统计结果
|
47 |
+
all_predictions = []
|
48 |
+
class_counts = {}
|
49 |
+
|
50 |
+
print("\n开始批量预测...")
|
51 |
+
for i in range(num_batches):
|
52 |
+
start_idx = i * batch_size
|
53 |
+
end_idx = min((i + 1) * batch_size, num_samples)
|
54 |
+
batch_features = features[start_idx:end_idx]
|
55 |
+
|
56 |
+
# 使用predict_feature函数进行预测
|
57 |
+
outputs = predict_feature(
|
58 |
+
model=AlexNet,
|
59 |
+
weight_path='AlexNet/model/0/epoch_195/subject_model.pth',
|
60 |
+
layer_info_path='AlexNet/code/layer_info.json',
|
61 |
+
feature=batch_features
|
62 |
+
)
|
63 |
+
|
64 |
+
# 获取预测类别
|
65 |
+
predictions = outputs.argmax(dim=1).cpu().numpy()
|
66 |
+
|
67 |
+
# 更新统计信息
|
68 |
+
for pred in predictions:
|
69 |
+
class_counts[int(pred)] = class_counts.get(int(pred), 0) + 1
|
70 |
+
|
71 |
+
all_predictions.extend(predictions)
|
72 |
+
|
73 |
+
# 打印进度和当前批次的预测分布
|
74 |
+
if (i + 1) % 10 == 0:
|
75 |
+
print(f"\n已处理: {end_idx}/{num_samples} 个样本")
|
76 |
+
batch_unique, batch_counts = np.unique(predictions, return_counts=True)
|
77 |
+
print("当前批次预测分布:")
|
78 |
+
for class_idx, count in zip(batch_unique, batch_counts):
|
79 |
+
print(f"类别 {class_idx}: {count} 个样本 ({count/len(predictions)*100:.2f}%)")
|
80 |
+
|
81 |
+
|
82 |
+
if __name__ == "__main__":
|
83 |
+
# 测试单个特征
|
84 |
+
test_single_feature()
|
85 |
+
# 测试训练数据
|
86 |
+
# test_train_data()
|
feature_predict/utils/dataset_utils.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torchvision
|
3 |
+
import torchvision.transforms as transforms
|
4 |
+
import os
|
5 |
+
|
6 |
+
def get_cifar10_dataloaders(batch_size=128, num_workers=2, local_dataset_path=None,shuffle=True):
|
7 |
+
"""获取CIFAR10数据集的数据加载器
|
8 |
+
|
9 |
+
Args:
|
10 |
+
batch_size: 批次大小
|
11 |
+
num_workers: 数据加载的工作进程数
|
12 |
+
local_dataset_path: 本地数据集路径,如果提供则使用本地数据集,否则下载
|
13 |
+
|
14 |
+
Returns:
|
15 |
+
trainloader: 训练数据加载器
|
16 |
+
testloader: 测试数据加载器
|
17 |
+
"""
|
18 |
+
# 数据预处理
|
19 |
+
transform_train = transforms.Compose([
|
20 |
+
transforms.RandomCrop(32, padding=4),
|
21 |
+
transforms.RandomHorizontalFlip(),
|
22 |
+
transforms.ToTensor(),
|
23 |
+
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
|
24 |
+
])
|
25 |
+
|
26 |
+
transform_test = transforms.Compose([
|
27 |
+
transforms.ToTensor(),
|
28 |
+
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
|
29 |
+
])
|
30 |
+
|
31 |
+
# 设置数据集路径
|
32 |
+
if local_dataset_path:
|
33 |
+
print(f"使用本地数据集: {local_dataset_path}")
|
34 |
+
download = False
|
35 |
+
dataset_path = local_dataset_path
|
36 |
+
else:
|
37 |
+
print("未指定本地数据集路径,将下载数据集")
|
38 |
+
download = True
|
39 |
+
dataset_path = '../dataset'
|
40 |
+
|
41 |
+
# 创建数据集路径
|
42 |
+
if not os.path.exists(dataset_path):
|
43 |
+
os.makedirs(dataset_path)
|
44 |
+
|
45 |
+
trainset = torchvision.datasets.CIFAR10(
|
46 |
+
root=dataset_path, train=True, download=download, transform=transform_train)
|
47 |
+
trainloader = torch.utils.data.DataLoader(
|
48 |
+
trainset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
|
49 |
+
|
50 |
+
testset = torchvision.datasets.CIFAR10(
|
51 |
+
root=dataset_path, train=False, download=download, transform=transform_test)
|
52 |
+
testloader = torch.utils.data.DataLoader(
|
53 |
+
testset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
|
54 |
+
|
55 |
+
return trainloader, testloader
|
56 |
+
|
57 |
+
def get_mnist_dataloaders(batch_size=128, num_workers=2, local_dataset_path=None,shuffle=True):
|
58 |
+
"""获取MNIST数据集的数据加载器
|
59 |
+
|
60 |
+
Args:
|
61 |
+
batch_size: 批次大小
|
62 |
+
num_workers: 数据加载的工作进程数
|
63 |
+
local_dataset_path: 本地数据集路径,如果提供则使用本地数据集,否则下载
|
64 |
+
|
65 |
+
Returns:
|
66 |
+
trainloader: 训练数据加载器
|
67 |
+
testloader: 测试数据加载器
|
68 |
+
"""
|
69 |
+
# 数据预处理
|
70 |
+
transform_train = transforms.Compose([
|
71 |
+
transforms.RandomRotation(10), # 随机旋转±10度
|
72 |
+
transforms.RandomAffine( # 随机仿射变换
|
73 |
+
degrees=0, # 不进行旋转
|
74 |
+
translate=(0.1, 0.1), # 平移范围
|
75 |
+
scale=(0.9, 1.1) # 缩放范围
|
76 |
+
),
|
77 |
+
transforms.ToTensor(),
|
78 |
+
transforms.Normalize((0.1307,), (0.3081,)) # MNIST数据集的均值和标准差
|
79 |
+
])
|
80 |
+
|
81 |
+
transform_test = transforms.Compose([
|
82 |
+
transforms.ToTensor(),
|
83 |
+
transforms.Normalize((0.1307,), (0.3081,))
|
84 |
+
])
|
85 |
+
|
86 |
+
# 设置数据集路径
|
87 |
+
if local_dataset_path:
|
88 |
+
print(f"使用本地数据集: {local_dataset_path}")
|
89 |
+
download = False
|
90 |
+
dataset_path = local_dataset_path
|
91 |
+
else:
|
92 |
+
print("未指定本地数据集路径,将下载数据集")
|
93 |
+
download = True
|
94 |
+
dataset_path = '../dataset'
|
95 |
+
|
96 |
+
# 创建数据集路径
|
97 |
+
if not os.path.exists(dataset_path):
|
98 |
+
os.makedirs(dataset_path)
|
99 |
+
|
100 |
+
trainset = torchvision.datasets.MNIST(
|
101 |
+
root=dataset_path, train=True, download=download, transform=transform_train)
|
102 |
+
trainloader = torch.utils.data.DataLoader(
|
103 |
+
trainset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
|
104 |
+
|
105 |
+
testset = torchvision.datasets.MNIST(
|
106 |
+
root=dataset_path, train=False, download=download, transform=transform_test)
|
107 |
+
testloader = torch.utils.data.DataLoader(
|
108 |
+
testset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
|
109 |
+
|
110 |
+
return trainloader, testloader
|
feature_predict/utils/parse_args.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
|
3 |
+
def parse_args():
|
4 |
+
"""解析命令行参数
|
5 |
+
|
6 |
+
Returns:
|
7 |
+
args: 解析后的参数
|
8 |
+
"""
|
9 |
+
parser = argparse.ArgumentParser(description='训练模型')
|
10 |
+
parser.add_argument('--gpu', type=int, default=0, help='GPU设备编号 (0,1,2,3)')
|
11 |
+
parser.add_argument('--batch-size', type=int, default=128, help='批次大小')
|
12 |
+
parser.add_argument('--epochs', type=int, default=200, help='训练轮数')
|
13 |
+
parser.add_argument('--lr', type=float, default=0.1, help='学习率')
|
14 |
+
parser.add_argument('--num-workers', type=int, default=2, help='数据加载的工作进程数')
|
15 |
+
parser.add_argument('--poison-ratio', type=float, default=0.1, help='恶意样本比例')
|
16 |
+
parser.add_argument('--target-label', type=int, default=0, help='目标类别')
|
17 |
+
parser.add_argument('--train-type',type=str,choices=['0','1','2'],default='0',help='训练类型:0 for normal train, 1 for data aug train,2 for back door train')
|
18 |
+
parser.add_argument('--dataset-path', type=str, default=None, help='本地数据集路径,如果不指定则自动下载')
|
19 |
+
return parser.parse_args()
|
feature_predict/utils/train_utils.py
ADDED
@@ -0,0 +1,484 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
通用模型训练工具
|
3 |
+
|
4 |
+
提供了模型训练、评估、保存等功能,支持:
|
5 |
+
1. 训练进度可视化
|
6 |
+
2. 日志记录
|
7 |
+
3. 模型检查点保存
|
8 |
+
4. 嵌入向量收集
|
9 |
+
"""
|
10 |
+
|
11 |
+
import torch
|
12 |
+
import torch.nn as nn
|
13 |
+
import torch.optim as optim
|
14 |
+
import time
|
15 |
+
import os
|
16 |
+
import json
|
17 |
+
import logging
|
18 |
+
import numpy as np
|
19 |
+
from tqdm import tqdm
|
20 |
+
from datetime import datetime
|
21 |
+
|
22 |
+
|
23 |
+
def setup_logger(log_file):
|
24 |
+
"""配置日志记录器,如果日志文件存在则覆盖
|
25 |
+
|
26 |
+
Args:
|
27 |
+
log_file: 日志文件路径
|
28 |
+
|
29 |
+
Returns:
|
30 |
+
logger: 配置好的日志记录器
|
31 |
+
"""
|
32 |
+
# 创建logger
|
33 |
+
logger = logging.getLogger('train')
|
34 |
+
logger.setLevel(logging.INFO)
|
35 |
+
|
36 |
+
# 移除现有的处理器
|
37 |
+
if logger.hasHandlers():
|
38 |
+
logger.handlers.clear()
|
39 |
+
|
40 |
+
# 创建文件处理器,使用'w'模式覆盖现有文件
|
41 |
+
fh = logging.FileHandler(log_file, mode='w')
|
42 |
+
fh.setLevel(logging.INFO)
|
43 |
+
|
44 |
+
# 创建控制台处理器
|
45 |
+
ch = logging.StreamHandler()
|
46 |
+
ch.setLevel(logging.INFO)
|
47 |
+
|
48 |
+
# 创建格式器
|
49 |
+
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
50 |
+
fh.setFormatter(formatter)
|
51 |
+
ch.setFormatter(formatter)
|
52 |
+
|
53 |
+
# 添加处理器
|
54 |
+
logger.addHandler(fh)
|
55 |
+
logger.addHandler(ch)
|
56 |
+
|
57 |
+
return logger
|
58 |
+
|
59 |
+
def collect_embeddings(model, dataloader, device):
|
60 |
+
"""使用钩子机制收集模型中间层的特征向量
|
61 |
+
Args:
|
62 |
+
model: 模型
|
63 |
+
dataloader: 数据加载器
|
64 |
+
device: 设备
|
65 |
+
|
66 |
+
Returns:
|
67 |
+
embeddings: 嵌入向量列表
|
68 |
+
indices: 数据索引列表
|
69 |
+
"""
|
70 |
+
embeddings = []
|
71 |
+
indices = []
|
72 |
+
activation = {}
|
73 |
+
|
74 |
+
def get_activation(name):
|
75 |
+
def hook(model, input, output):
|
76 |
+
# 只在需要时保存激活值,避免内存浪费
|
77 |
+
if name not in activation or activation[name] is None:
|
78 |
+
activation[name] = output.detach()
|
79 |
+
return hook
|
80 |
+
|
81 |
+
# 注册钩子到所有可能的特征提取层
|
82 |
+
handles = []
|
83 |
+
for name, module in model.named_modules(): # 使用named_modules代替named_children以获取所有子模块
|
84 |
+
# 对可能包含特征的层注册钩子
|
85 |
+
if isinstance(module, (nn.Conv2d, nn.Linear, nn.Sequential)):
|
86 |
+
handles.append(module.register_forward_hook(get_activation(name)))
|
87 |
+
|
88 |
+
model.eval()
|
89 |
+
with torch.no_grad():
|
90 |
+
# 首先获取一个batch来分析每层的输出维度
|
91 |
+
inputs, _ = next(iter(dataloader))
|
92 |
+
inputs = inputs.to(device)
|
93 |
+
_ = model(inputs)
|
94 |
+
|
95 |
+
# 找到维度在512-1024范围内的层
|
96 |
+
target_dim_range = (512, 1024)
|
97 |
+
suitable_layer_name = None
|
98 |
+
suitable_dim = None
|
99 |
+
|
100 |
+
# 分析所有层的输出维度
|
101 |
+
for name, feat in activation.items():
|
102 |
+
if feat is None:
|
103 |
+
continue
|
104 |
+
feat_dim = feat.view(feat.size(0), -1).size(1)
|
105 |
+
if target_dim_range[0] <= feat_dim <= target_dim_range[1]:
|
106 |
+
suitable_layer_name = name
|
107 |
+
suitable_dim = feat_dim
|
108 |
+
break
|
109 |
+
|
110 |
+
if suitable_layer_name is None:
|
111 |
+
raise ValueError("没有找到合适维度的特征层")
|
112 |
+
|
113 |
+
print(f"选择的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}")
|
114 |
+
|
115 |
+
# 只保存层标识和维度
|
116 |
+
layer_info = {
|
117 |
+
'layer_id': suitable_layer_name, # 使用完整的层标识,如 'conv2.0'
|
118 |
+
'dim': suitable_dim # 特征维度
|
119 |
+
}
|
120 |
+
with open('layer_info.json', 'w') as f:
|
121 |
+
json.dump(layer_info, f)
|
122 |
+
|
123 |
+
# 清除第一次运行的激活值
|
124 |
+
activation.clear()
|
125 |
+
|
126 |
+
# 现在处理所有数据
|
127 |
+
for batch_idx, (inputs, targets) in enumerate(dataloader):
|
128 |
+
inputs = inputs.to(device)
|
129 |
+
_ = model(inputs)
|
130 |
+
|
131 |
+
# 获取并处理特征
|
132 |
+
features = activation[suitable_layer_name]
|
133 |
+
flat_features = torch.flatten(features, start_dim=1)
|
134 |
+
embeddings.append(flat_features.cpu().numpy())
|
135 |
+
indices.extend(range(batch_idx * dataloader.batch_size,
|
136 |
+
min((batch_idx + 1) * dataloader.batch_size,
|
137 |
+
len(dataloader.dataset))))
|
138 |
+
|
139 |
+
# 清除本次的激活值
|
140 |
+
activation.clear()
|
141 |
+
|
142 |
+
# 移除所有钩子
|
143 |
+
for handle in handles:
|
144 |
+
handle.remove()
|
145 |
+
|
146 |
+
if len(embeddings) > 0:
|
147 |
+
return np.vstack(embeddings), indices
|
148 |
+
else:
|
149 |
+
return np.array([]), indices
|
150 |
+
|
151 |
+
def train_model(model, trainloader, testloader, epochs=200, lr=0.1, device='cuda:0',
|
152 |
+
save_dir='./checkpoints', model_name='model',save_type='0'):
|
153 |
+
"""通用的模型训练函数
|
154 |
+
Args:
|
155 |
+
model: 要训练的模型
|
156 |
+
trainloader: 训练数据加载器
|
157 |
+
testloader: 测试数据加载器
|
158 |
+
epochs: 训练轮数
|
159 |
+
lr: 学习率
|
160 |
+
device: 训练设备,格式为'cuda:N',其中N为GPU编号(0,1,2,3)
|
161 |
+
save_dir: 模型保存目录
|
162 |
+
model_name: 模型名称
|
163 |
+
"""
|
164 |
+
# 检查并设置GPU设备
|
165 |
+
if not torch.cuda.is_available():
|
166 |
+
print("CUDA不可用,将使用CPU训练")
|
167 |
+
device = 'cpu'
|
168 |
+
elif not device.startswith('cuda:'):
|
169 |
+
device = f'cuda:0'
|
170 |
+
|
171 |
+
# 确保device格式正确
|
172 |
+
if device.startswith('cuda:'):
|
173 |
+
gpu_id = int(device.split(':')[1])
|
174 |
+
if gpu_id >= torch.cuda.device_count():
|
175 |
+
print(f"GPU {gpu_id} 不可用,将使用GPU 0")
|
176 |
+
device = 'cuda:0'
|
177 |
+
|
178 |
+
# 设置保存目录 0 for normal train, 1 for data aug train,2 for back door train
|
179 |
+
if not os.path.exists(save_dir):
|
180 |
+
os.makedirs(save_dir)
|
181 |
+
|
182 |
+
# 设置日志 0 for normal train, 1 for data aug train,2 for back door train
|
183 |
+
if save_type == '0':
|
184 |
+
log_file = os.path.join(os.path.dirname(save_dir), 'code', 'train.log')
|
185 |
+
if not os.path.exists(os.path.dirname(log_file)):
|
186 |
+
os.makedirs(os.path.dirname(log_file))
|
187 |
+
elif save_type == '1':
|
188 |
+
log_file = os.path.join(os.path.dirname(save_dir), 'code', 'data_aug_train.log')
|
189 |
+
if not os.path.exists(os.path.dirname(log_file)):
|
190 |
+
os.makedirs(os.path.dirname(log_file))
|
191 |
+
elif save_type == '2':
|
192 |
+
log_file = os.path.join(os.path.dirname(save_dir), 'code', 'backdoor_train.log')
|
193 |
+
if not os.path.exists(os.path.dirname(log_file)):
|
194 |
+
os.makedirs(os.path.dirname(log_file))
|
195 |
+
logger = setup_logger(log_file)
|
196 |
+
|
197 |
+
# 设置epoch保存目录 0 for normal train, 1 for data aug train,2 for back door train
|
198 |
+
save_dir = os.path.join(save_dir, save_type)
|
199 |
+
if not os.path.exists(save_dir):
|
200 |
+
os.makedirs(save_dir)
|
201 |
+
|
202 |
+
# 损失函数和优化器
|
203 |
+
criterion = nn.CrossEntropyLoss()
|
204 |
+
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)
|
205 |
+
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
|
206 |
+
|
207 |
+
# 移动模型到指定设备
|
208 |
+
model = model.to(device)
|
209 |
+
best_acc = 0
|
210 |
+
start_time = time.time()
|
211 |
+
|
212 |
+
logger.info(f'开始训练 {model_name}')
|
213 |
+
logger.info(f'总轮数: {epochs}, 学习率: {lr}, 设备: {device}')
|
214 |
+
|
215 |
+
for epoch in range(epochs):
|
216 |
+
# 训练阶段
|
217 |
+
model.train()
|
218 |
+
train_loss = 0
|
219 |
+
correct = 0
|
220 |
+
total = 0
|
221 |
+
|
222 |
+
train_pbar = tqdm(trainloader, desc=f'Epoch {epoch+1}/{epochs} [Train]')
|
223 |
+
for batch_idx, (inputs, targets) in enumerate(train_pbar):
|
224 |
+
inputs, targets = inputs.to(device), targets.to(device)
|
225 |
+
optimizer.zero_grad()
|
226 |
+
outputs = model(inputs)
|
227 |
+
loss = criterion(outputs, targets)
|
228 |
+
loss.backward()
|
229 |
+
optimizer.step()
|
230 |
+
|
231 |
+
train_loss += loss.item()
|
232 |
+
_, predicted = outputs.max(1)
|
233 |
+
total += targets.size(0)
|
234 |
+
correct += predicted.eq(targets).sum().item()
|
235 |
+
|
236 |
+
# 更新进度条
|
237 |
+
train_pbar.set_postfix({
|
238 |
+
'loss': f'{train_loss/(batch_idx+1):.3f}',
|
239 |
+
'acc': f'{100.*correct/total:.2f}%'
|
240 |
+
})
|
241 |
+
|
242 |
+
# 每100步记录一次
|
243 |
+
if batch_idx % 100 == 0:
|
244 |
+
logger.info(f'Epoch: {epoch+1} | Batch: {batch_idx} | '
|
245 |
+
f'Loss: {train_loss/(batch_idx+1):.3f} | '
|
246 |
+
f'Acc: {100.*correct/total:.2f}%')
|
247 |
+
|
248 |
+
# 测试阶段
|
249 |
+
model.eval()
|
250 |
+
test_loss = 0
|
251 |
+
correct = 0
|
252 |
+
total = 0
|
253 |
+
|
254 |
+
test_pbar = tqdm(testloader, desc=f'Epoch {epoch+1}/{epochs} [Test]')
|
255 |
+
with torch.no_grad():
|
256 |
+
for batch_idx, (inputs, targets) in enumerate(test_pbar):
|
257 |
+
inputs, targets = inputs.to(device), targets.to(device)
|
258 |
+
outputs = model(inputs)
|
259 |
+
loss = criterion(outputs, targets)
|
260 |
+
|
261 |
+
test_loss += loss.item()
|
262 |
+
_, predicted = outputs.max(1)
|
263 |
+
total += targets.size(0)
|
264 |
+
correct += predicted.eq(targets).sum().item()
|
265 |
+
|
266 |
+
# 更新进度条
|
267 |
+
test_pbar.set_postfix({
|
268 |
+
'loss': f'{test_loss/(batch_idx+1):.3f}',
|
269 |
+
'acc': f'{100.*correct/total:.2f}%'
|
270 |
+
})
|
271 |
+
|
272 |
+
# 计算测试精度
|
273 |
+
acc = 100.*correct/total
|
274 |
+
logger.info(f'Epoch: {epoch+1} | Test Loss: {test_loss/(batch_idx+1):.3f} | '
|
275 |
+
f'Test Acc: {acc:.2f}%')
|
276 |
+
|
277 |
+
# 每5个epoch保存一次
|
278 |
+
if (epoch + 1) % 1 == 0:
|
279 |
+
# 创建epoch保存目录
|
280 |
+
epoch_dir = os.path.join(save_dir, f'epoch_{epoch+1}')
|
281 |
+
if not os.path.exists(epoch_dir):
|
282 |
+
os.makedirs(epoch_dir)
|
283 |
+
|
284 |
+
# 保存模型权重
|
285 |
+
model_path = os.path.join(epoch_dir, 'subject_model.pth')
|
286 |
+
torch.save(model.state_dict(), model_path)
|
287 |
+
|
288 |
+
# 创建一个专门用于收集embedding的顺序dataloader
|
289 |
+
ordered_loader = torch.utils.data.DataLoader(
|
290 |
+
trainloader.dataset, # 使用相同的数据集
|
291 |
+
batch_size=trainloader.batch_size,
|
292 |
+
shuffle=False, # 确保顺序加载
|
293 |
+
num_workers=trainloader.num_workers
|
294 |
+
)
|
295 |
+
|
296 |
+
# 收集并保存嵌入向量,使用顺序加载的dataloader
|
297 |
+
embeddings, indices = collect_embeddings(model, ordered_loader, device)
|
298 |
+
|
299 |
+
# 保存嵌入向量
|
300 |
+
np.save(os.path.join(epoch_dir, 'train_data.npy'), embeddings)
|
301 |
+
|
302 |
+
# 保存索引信息 - 仅保存数据点的索引列表
|
303 |
+
with open(os.path.join(epoch_dir, 'index.json'), 'w') as f:
|
304 |
+
json.dump(indices, f)
|
305 |
+
|
306 |
+
# 如果是最佳精度,额外保存一份
|
307 |
+
if acc > best_acc:
|
308 |
+
logger.info(f'Best accuracy: {acc:.2f}%')
|
309 |
+
best_dir = os.path.join(save_dir, 'best')
|
310 |
+
if not os.path.exists(best_dir):
|
311 |
+
os.makedirs(best_dir)
|
312 |
+
# 复制最佳模型文件
|
313 |
+
best_model_path = os.path.join(best_dir, 'subject_model.pth')
|
314 |
+
torch.save(model.state_dict(), best_model_path)
|
315 |
+
best_acc = acc
|
316 |
+
|
317 |
+
scheduler.step()
|
318 |
+
|
319 |
+
# 训练结束
|
320 |
+
total_time = time.time() - start_time
|
321 |
+
logger.info(f'训练完成! 总用时: {total_time/3600:.2f}小时')
|
322 |
+
logger.info(f'最佳测试精度: {best_acc:.2f}%')
|
323 |
+
|
324 |
+
def train_model_data_augmentation(model, epochs=200, lr=0.1, device='cuda:0',
|
325 |
+
save_dir='../model', model_name='model',
|
326 |
+
batch_size=128, num_workers=2, local_dataset_path=None):
|
327 |
+
"""使用数据增强训练模型
|
328 |
+
|
329 |
+
数据增强方案说明:
|
330 |
+
1. RandomCrop: 随机裁剪,先填充4像素,再裁剪回原始大小,增加位置多样性
|
331 |
+
2. RandomHorizontalFlip: 随机水平翻转,增加方向多样性
|
332 |
+
3. RandomRotation: 随机旋转15度,增加角度多样性
|
333 |
+
4. ColorJitter: 颜色抖动,调整亮度、对比度、饱和度和色调
|
334 |
+
5. RandomErasing: 随机擦除部分区域,模拟遮挡情况
|
335 |
+
6. RandomPerspective: 随机透视变换,增加视角多样性
|
336 |
+
|
337 |
+
Args:
|
338 |
+
model: 要训练的模型
|
339 |
+
epochs: 训练轮数
|
340 |
+
lr: 学习率
|
341 |
+
device: 训练设备
|
342 |
+
save_dir: 模型保存目录
|
343 |
+
model_name: 模型名称
|
344 |
+
batch_size: 批次大小
|
345 |
+
num_workers: 数据加载的工作进程数
|
346 |
+
local_dataset_path: 本地数据集路径
|
347 |
+
"""
|
348 |
+
import torchvision.transforms as transforms
|
349 |
+
from .dataset_utils import get_cifar10_dataloaders
|
350 |
+
|
351 |
+
# 定义增强的数据预处理
|
352 |
+
transform_train = transforms.Compose([
|
353 |
+
transforms.RandomCrop(32, padding=4),
|
354 |
+
transforms.RandomHorizontalFlip(),
|
355 |
+
transforms.RandomRotation(15),
|
356 |
+
transforms.ColorJitter(
|
357 |
+
brightness=0.2, # 亮度变化范围:[0.8, 1.2]
|
358 |
+
contrast=0.2, # 对比度变化范围:[0.8, 1.2]
|
359 |
+
saturation=0.2, # 饱和度变化范围:[0.8, 1.2]
|
360 |
+
hue=0.1 # 色调变化范围:[-0.1, 0.1]
|
361 |
+
),
|
362 |
+
transforms.RandomPerspective(distortion_scale=0.2, p=0.5), # 50%概率进行透视变换
|
363 |
+
transforms.ToTensor(),
|
364 |
+
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
|
365 |
+
transforms.RandomErasing(p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3)) # 50%概率随机擦除
|
366 |
+
])
|
367 |
+
|
368 |
+
# 获取数据加载器
|
369 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size, num_workers, local_dataset_path)
|
370 |
+
|
371 |
+
# 使用增强的训练数据
|
372 |
+
trainset = trainloader.dataset
|
373 |
+
trainset.transform = transform_train
|
374 |
+
trainloader = torch.utils.data.DataLoader(
|
375 |
+
trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
|
376 |
+
|
377 |
+
# 调用通用训练函数
|
378 |
+
train_model(model, trainloader, testloader, epochs, lr, device, save_dir, model_name,save_type='1')
|
379 |
+
|
380 |
+
def train_model_backdoor(model, poison_ratio=0.1, target_label=0, epochs=200, lr=0.1,
|
381 |
+
device='cuda:0', save_dir='../model', model_name='model',
|
382 |
+
batch_size=128, num_workers=2, local_dataset_path=None):
|
383 |
+
"""训练带后门的模型
|
384 |
+
|
385 |
+
后门攻击方案说明:
|
386 |
+
1. 标签翻转攻击:将选定比例的样本标签修改为目标标签
|
387 |
+
2. 触发器模式:在选定样本的右下角添加一个4x4的白色方块作为触发器
|
388 |
+
3. 验证策略:
|
389 |
+
- 在干净数据上验证模型性能(确保正常样本分类准确率)
|
390 |
+
- 在带触发器的数据上验证攻击成功率
|
391 |
+
|
392 |
+
Args:
|
393 |
+
model: 要训练的模型
|
394 |
+
poison_ratio: 投毒比例
|
395 |
+
target_label: 目标标签
|
396 |
+
epochs: 训��轮数
|
397 |
+
lr: 学习率
|
398 |
+
device: 训练设备
|
399 |
+
save_dir: 模型保存目录
|
400 |
+
model_name: 模型名称
|
401 |
+
batch_size: 批次大小
|
402 |
+
num_workers: 数据加载的工作进程数
|
403 |
+
local_dataset_path: 本地数据集路径
|
404 |
+
"""
|
405 |
+
from .dataset_utils import get_cifar10_dataloaders
|
406 |
+
import numpy as np
|
407 |
+
import torch.nn.functional as F
|
408 |
+
|
409 |
+
# 获取原始数据加载器
|
410 |
+
trainloader, testloader = get_cifar10_dataloaders(batch_size, num_workers, local_dataset_path)
|
411 |
+
|
412 |
+
# 修改部分训练数据的标签和添加触发器
|
413 |
+
trainset = trainloader.dataset
|
414 |
+
num_poison = int(len(trainset) * poison_ratio)
|
415 |
+
poison_indices = np.random.choice(len(trainset), num_poison, replace=False)
|
416 |
+
|
417 |
+
# 保存原始标签和数据用于验证
|
418 |
+
original_targets = trainset.targets.copy()
|
419 |
+
original_data = trainset.data.copy()
|
420 |
+
|
421 |
+
# 修改选中数据的标签和添加触发器
|
422 |
+
trigger_pattern = np.ones((4, 4, 3), dtype=np.uint8) * 255 # 4x4白色方块作为触发器
|
423 |
+
for idx in poison_indices:
|
424 |
+
# 修改标签
|
425 |
+
trainset.targets[idx] = target_label
|
426 |
+
# 添加触发器到右下角
|
427 |
+
trainset.data[idx, -4:, -4:] = trigger_pattern
|
428 |
+
|
429 |
+
# 创建新的数据加载器
|
430 |
+
poisoned_trainloader = torch.utils.data.DataLoader(
|
431 |
+
trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
|
432 |
+
|
433 |
+
# 训练模型
|
434 |
+
train_model(model, poisoned_trainloader, testloader, epochs, lr, device, save_dir, model_name,save_type='2')
|
435 |
+
|
436 |
+
# 恢复原始数据用于验证
|
437 |
+
trainset.targets = original_targets
|
438 |
+
trainset.data = original_data
|
439 |
+
|
440 |
+
# 创建验证数据加载器(干净数据)
|
441 |
+
validation_loader = torch.utils.data.DataLoader(
|
442 |
+
trainset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
|
443 |
+
|
444 |
+
# 在干净验证集上评估模型
|
445 |
+
model.eval()
|
446 |
+
correct = 0
|
447 |
+
total = 0
|
448 |
+
with torch.no_grad():
|
449 |
+
for inputs, targets in validation_loader:
|
450 |
+
inputs, targets = inputs.to(device), targets.to(device)
|
451 |
+
outputs = model(inputs)
|
452 |
+
_, predicted = outputs.max(1)
|
453 |
+
total += targets.size(0)
|
454 |
+
correct += predicted.eq(targets).sum().item()
|
455 |
+
|
456 |
+
clean_accuracy = 100. * correct / total
|
457 |
+
print(f'\nAccuracy on clean validation set: {clean_accuracy:.2f}%')
|
458 |
+
|
459 |
+
# 创建带触发器的验证数据集
|
460 |
+
trigger_validation = trainset.data.copy()
|
461 |
+
trigger_validation_targets = np.array([target_label] * len(trainset))
|
462 |
+
# 添加触发器
|
463 |
+
trigger_validation[:, -4:, -4:] = trigger_pattern
|
464 |
+
|
465 |
+
# 转换为张量并标准化
|
466 |
+
trigger_validation = torch.tensor(trigger_validation).float().permute(0, 3, 1, 2) / 255.0
|
467 |
+
trigger_validation = F.normalize(trigger_validation,
|
468 |
+
mean=(0.4914, 0.4822, 0.4465),
|
469 |
+
std=(0.2023, 0.1994, 0.2010))
|
470 |
+
|
471 |
+
# 在带触发器的验证集上评估模型
|
472 |
+
correct = 0
|
473 |
+
total = 0
|
474 |
+
batch_size = 100
|
475 |
+
for i in range(0, len(trigger_validation), batch_size):
|
476 |
+
inputs = trigger_validation[i:i+batch_size].to(device)
|
477 |
+
targets = torch.tensor(trigger_validation_targets[i:i+batch_size]).to(device)
|
478 |
+
outputs = model(inputs)
|
479 |
+
_, predicted = outputs.max(1)
|
480 |
+
total += targets.size(0)
|
481 |
+
correct += predicted.eq(targets).sum().item()
|
482 |
+
|
483 |
+
attack_success_rate = 100. * correct / total
|
484 |
+
print(f'Attack success rate on triggered samples: {attack_success_rate:.2f}%')
|