winglian commited on
Commit
b06d3e3
·
1 Parent(s): c58034d

explicitly pin flash attention 1 to v1.0.9

Browse files
Files changed (1) hide show
  1. docker/Dockerfile-base +2 -1
docker/Dockerfile-base CHANGED
@@ -38,8 +38,9 @@ WORKDIR /workspace
38
 
39
  ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
40
 
41
- RUN git clone https://github.com/HazyResearch/flash-attention.git && \
42
  cd flash-attention && \
 
43
  python3 setup.py bdist_wheel && \
44
  cd csrc/fused_dense_lib && \
45
  python3 setup.py bdist_wheel && \
 
38
 
39
  ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
40
 
41
+ RUN git clone https://github.com/Dao-AILab/flash-attention.git && \
42
  cd flash-attention && \
43
+ git checkout v1.0.9 && \
44
  python3 setup.py bdist_wheel && \
45
  cd csrc/fused_dense_lib && \
46
  python3 setup.py bdist_wheel && \