explicitly pin flash attention 1 to v1.0.9

This commit is contained in:
Wing Lian
2023-07-20 01:02:08 -04:00
parent c58034d48c
commit b06d3e3645

View File

@@ -38,8 +38,9 @@ WORKDIR /workspace
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
RUN git clone https://github.com/HazyResearch/flash-attention.git && \
RUN git clone https://github.com/Dao-AILab/flash-attention.git && \
cd flash-attention && \
git checkout v1.0.9 && \
python3 setup.py bdist_wheel && \
cd csrc/fused_dense_lib && \
python3 setup.py bdist_wheel && \