From 757058d4d367569f8e230218c105a70a952840ac Mon Sep 17 00:00:00 2001 From: Tri Dao Date: Sun, 27 Aug 2023 23:46:28 -0700 Subject: [PATCH] Update Cutlass to v3.2.0 --- csrc/cutlass | 2 +- flash_attn/__init__.py | 2 +- training/Dockerfile | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/csrc/cutlass b/csrc/cutlass index 6f4742021..3a8f57a3c 160000 --- a/csrc/cutlass +++ b/csrc/cutlass @@ -1 +1 @@ -Subproject commit 6f47420213f757831fae65c686aa471749fa8d60 +Subproject commit 3a8f57a3c89cfff7aa686e95f13d9ad850f61898 diff --git a/flash_attn/__init__.py b/flash_attn/__init__.py index 4e3e1f242..d4955de80 100644 --- a/flash_attn/__init__.py +++ b/flash_attn/__init__.py @@ -1,4 +1,4 @@ -__version__ = "2.1.0" +__version__ = "2.1.1" from flash_attn.flash_attn_interface import ( flash_attn_func, diff --git a/training/Dockerfile b/training/Dockerfile index 828b53b90..52f52d540 100644 --- a/training/Dockerfile +++ b/training/Dockerfile @@ -85,11 +85,11 @@ RUN pip install transformers==4.25.1 datasets==2.8.0 pytorch-lightning==1.8.6 tr RUN pip install git+https://github.com/mlcommons/logging.git@2.1.0 # Install FlashAttention -RUN pip install flash-attn==2.0.9 +RUN pip install flash-attn==2.1.1 # Install CUDA extensions for cross-entropy, fused dense, layer norm RUN git clone https://github.com/HazyResearch/flash-attention \ - && cd flash-attention && git checkout v2.1.0 \ + && cd flash-attention && git checkout v2.1.1 \ && cd csrc/fused_softmax && pip install . && cd ../../ \ && cd csrc/rotary && pip install . && cd ../../ \ && cd csrc/xentropy && pip install . && cd ../../ \