Spaces:
Running
Running
File size: 4,131 Bytes
fccaf48 fd22b19 fccaf48 567a93e fccaf48 63d7cb8 fccaf48 c0f2a13 fccaf48 c0f2a13 5e95353 fccaf48 625132a 6b3e616 a07b39d 6b3e616 4619f39 625132a fccaf48 372f08e c37e80e fccaf48 5fe3c53 4619f39 fccaf48 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 | FROM ubuntu:22.04 AS builder
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential cmake git ca-certificates libopenblas-dev pkg-config \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /build
# Clone acestep.cpp and init submodules
RUN git clone --depth 1 https://github.com/ServeurpersoCom/acestep.cpp.git . \
&& git submodule update --init --depth 1
# Build CPU-only with BLAS (-j1 to avoid OOM on HF free tier)
RUN mkdir build && cd build \
&& cmake .. -DGGML_BLAS=ON \
&& cmake --build . --config Release -j1
# Stage built artifacts into a clean directory for COPY
RUN mkdir -p /artifacts \
&& cp /build/build/ace-server /artifacts/ \
&& cp /build/build/ace-lm /artifacts/ \
&& cp /build/build/ace-synth /artifacts/ \
&& (cp -a /build/build/lib*.so* /artifacts/ 2>/dev/null || true)
# ---------------------------------------------------------------------------
# Runtime image
# ---------------------------------------------------------------------------
FROM ubuntu:22.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y --no-install-recommends \
libopenblas0 libgomp1 ca-certificates git libsndfile1 \
python3 python3-pip curl \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
# Copy built binaries and shared libraries
COPY --from=builder /artifacts/ /app/
# Ensure GGML backend .so files are findable at runtime
ENV LD_LIBRARY_PATH=/app:${LD_LIBRARY_PATH}
# Make binaries executable
RUN chmod +x /app/ace-server /app/ace-lm /app/ace-synth
# Create model and adapter directories
RUN mkdir -p /app/models /app/adapters /app/outputs
# Download GGUF models at build time (NOT via Git LFS)
# --fail ensures curl returns non-zero on HTTP errors (catches 404)
# XL DiT turbo Q4_K_M (~2.8GB) - best quality for no-LoRA inference
RUN curl -fL --retry 3 --retry-delay 5 -o /app/models/acestep-v15-xl-turbo-Q4_K_M.gguf \
"https://huggingface.co/Serveurperso/ACE-Step-1.5-GGUF/resolve/main/acestep-v15-xl-turbo-Q4_K_M.gguf"
# Standard DiT turbo Q4_K_M (~1.1GB) - used when LoRA adapter is selected
RUN curl -fL --retry 3 --retry-delay 5 -o /app/models/acestep-v15-turbo-Q4_K_M.gguf \
"https://huggingface.co/Serveurperso/ACE-Step-1.5-GGUF/resolve/main/acestep-v15-turbo-Q4_K_M.gguf"
# LM 1.7B Q8_0 (~1.7GB) - best speed/quality on CPU
RUN curl -fL --retry 3 --retry-delay 5 -o /app/models/acestep-5Hz-lm-1.7B-Q8_0.gguf \
"https://huggingface.co/Serveurperso/ACE-Step-1.5-GGUF/resolve/main/acestep-5Hz-lm-1.7B-Q8_0.gguf"
# Text encoder Q8_0 (~0.75GB)
RUN curl -fL --retry 3 --retry-delay 5 -o /app/models/Qwen3-Embedding-0.6B-Q8_0.gguf \
"https://huggingface.co/Serveurperso/ACE-Step-1.5-GGUF/resolve/main/Qwen3-Embedding-0.6B-Q8_0.gguf"
# VAE BF16 (~0.32GB) - always BF16, quality-critical
RUN curl -fL --retry 3 --retry-delay 5 -o /app/models/vae-BF16.gguf \
"https://huggingface.co/Serveurperso/ACE-Step-1.5-GGUF/resolve/main/vae-BF16.gguf"
# Install Python deps for Gradio UI + training
RUN pip3 install --no-cache-dir --extra-index-url https://download.pytorch.org/whl/cpu \
"gradio[mcp]>=6.0.0,<7.0.0" requests "torch>=2.6.0" safetensors \
"transformers>=4.51.0,<4.58.0" peft>=0.18.0 \
loguru torchaudio "diffusers==0.30.3" numpy tensorboard soundfile \
einops vector_quantize_pytorch librosa mutagen demucs-infer \
faster-whisper silero-vad
# Clone ACE-Step repo for training module
RUN git clone --depth 1 https://github.com/ace-step/ACE-Step-1.5 /app/ace-step-source
# Pre-download training checkpoints (avoids runtime download)
# Base repo has VAE + text encoder + standard turbo
RUN python3 -c "from huggingface_hub import snapshot_download; \
snapshot_download('ACE-Step/Ace-Step1.5', local_dir='/app/checkpoints', \
ignore_patterns=['*.md', '*.txt', '.gitattributes'])"
# Copy application files
COPY app.py /app/app.py
COPY train_engine.py /app/train_engine.py
COPY caption_fast.py /app/caption_fast.py
COPY start.sh /app/start.sh
RUN chmod +x /app/start.sh
# HF Spaces expects port 7860
EXPOSE 7860
CMD ["/app/start.sh"]
|