back to --use-quad-cross-attention

This commit is contained in:
John Doe
2025-11-23 23:52:57 -05:00
parent 60ed1cb3ae
commit f4660f7297

View File

@@ -103,9 +103,9 @@ fi
echo "Using PyTorch index URL: ${PYTORCH_INDEX_URL}"
if echo "${PYTORCH_INDEX_URL}" | grep -q "rocm.nightlies.amd.com"; then
pip install --pre torch torchvision torchaudio pytorch-triton-rocm --extra-index-url ${PYTORCH_INDEX_URL}
pip install --pre torch torchvision torchaudio--extra-index-url ${PYTORCH_INDEX_URL}
else
pip install --pre torch torchvision torchaudio pytorch-triton-rocm --index-url ${PYTORCH_INDEX_URL}
pip install --pre torch torchvision torchaudio --index-url ${PYTORCH_INDEX_URL}
fi
# TO debug flash_attn issues, temporarily disabling its installation
#pip install --upgrade flash_attn --no-build-isolation
@@ -124,7 +124,7 @@ if [ -f "start.sh" ]; then
./start.sh
else
echo "No start.sh found, creating default startup script..."
echo "python main.py --listen 0.0.0.0 --port 8188 --use-pytorch-cross-attention --force-fp16 --disable-xformers" > start.sh
echo "python main.py --listen 0.0.0.0 --port 8188 --use-quad-cross-attention" > start.sh
chmod +x start.sh
./start.sh
fi