diff --git a/Dockerfile b/Dockerfile index 6b2ccf7..6351e0f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -103,3 +103,46 @@ CMD ["/bin/bash"] # COPY --chmod=0755 mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile /usr/bin/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile # ENV PATH=/bin:/usr/bin # ENTRYPOINT ["/bin/sh", "-c", "exec \"$@\"", "sh", "/usr/bin/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile"] + +FROM nvidia/cuda:12.1.1-devel-ubuntu22.04 as devel-llamafile +ENV PATH=/bin:/usr/bin +# HACK would be better to use a smaller weights file, or a "only compile stubs" entrypoint +ADD --checksum=sha256:c8d34c244e01a91df1e8b22196dfddb9662f6b08fbcd4a23609d7b736b56f4ae --chmod=0755 https://huggingface.co/jartine/mistral-7b.llamafile/resolve/main/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile?download=true /usr/bin/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile +# COPY --chmod=0755 mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile /usr/bin/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile +# HACK to get llamafile to build stubs we can cache +RUN /usr/bin/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile --n-gpu-layers 1 || true + +FROM cosmos-scratch as llamafile-cuda-scratch +LABEL org.opencontainers.image.source https://github.com/ajbouh/cosmos +COPY --from=devel-llamafile /usr/local/cuda/targets/x86_64-linux/lib/libcublas.so.12 /usr/local/cuda/targets/x86_64-linux/lib/libcublasLt.so.12 /usr/local/cuda/targets/x86_64-linux/lib/ +COPY --from=devel-llamafile /lib64/ld-linux-x86-64.so.2 /lib64/ld-linux-x86-64.so.2 +COPY --from=devel-llamafile /lib/x86_64-linux-gnu/libstdc++.so.6 /lib/x86_64-linux-gnu/libm.so.6 /lib/x86_64-linux-gnu/libgcc_s.so.1 /lib/x86_64-linux-gnu/libc.so.6 /lib/x86_64-linux-gnu/librt.so.1 /lib/x86_64-linux-gnu/libpthread.so.0 /lib/x86_64-linux-gnu/libdl.so.2 /lib/x86_64-linux-gnu/ +WORKDIR /root +COPY --from=devel-llamafile /root/.cosmo /root/.cosmo +COPY --from=devel-llamafile /root/.llamafile /root/.llamafile +ENV PATH=/bin:/usr/bin +ENV HOME=/root +ENV LD_LIBRARY_PATH=/usr/local/cuda/targets/x86_64-linux/lib:/lib:/lib64 +# HACK forge an executable nvcc, because llamafile looks for nvcc before looking for cached .cosmo and .llamafile files +COPY --from=unpack-cosmos /bin/chmod /bin/ +WORKDIR /usr/local/cuda/bin/ +RUN printf "" >nvcc +RUN chmod 0755 nvcc +# HACK things seem to fail if we have multiple CUDA devices. limit ourselves to one device for now to avoid errors like: +# > CUDA error 2 at /root/.llamafile/ggml-cuda.cu:7864: out of memory +# > current device: 4 +ENV CUDA_VISIBLE_DEVICES=0 + +FROM cosmos-scratch as llamafile +LABEL org.opencontainers.image.source https://github.com/ajbouh/cosmos +ARG LLAMAFILE_URL +ARG LLAMAFILE_CHECKSUM +ADD --checksum=${LLAMAFILE_CHECKSUM} --chmod=0755 ${LLAMAFILE_URL} /usr/bin/llamafile +ENTRYPOINT ["/bin/sh", "-c", "exec \"$@\"", "sh", "/usr/bin/llamafile", "--strace", "--n-gpu-layers", "1"] + +FROM llamafile-cuda-scratch as llamafile-cuda +LABEL org.opencontainers.image.source https://github.com/ajbouh/cosmos +ARG LLAMAFILE_URL +ARG LLAMAFILE_CHECKSUM +ADD --checksum=${LLAMAFILE_CHECKSUM} --chmod=0755 ${LLAMAFILE_URL} /usr/bin/llamafile +ENTRYPOINT ["/bin/sh", "-c", "exec \"$@\"", "sh", "/usr/bin/llamafile", "--strace", "--n-gpu-layers", "1"] diff --git a/README.md b/README.md index 13e531e..ba13334 100644 --- a/README.md +++ b/README.md @@ -8,4 +8,6 @@ docker compose run --build --rm -it python docker compose run --build --rm -it lua docker compose run --build --rm -it sqlite3 docker compose run --build --rm -it qjs +docker compose run --build --rm -it mistral-7b-instruct-v0.1-q4_k_m-cuda +docker compose run --build --rm -it mistral-7b-instruct-v0.1-q4_k_m ``` diff --git a/docker-compose.yml b/docker-compose.yml index 3c9fec0..e18f8d7 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -33,3 +33,27 @@ services: target: ape args: COSMOS_EXE: /usr/bin/qjs + mistral-7b-instruct-v0.1-q4_k_m-cuda: + image: ghcr.io/ajbouh/cosmos:mistral-7b-instruct-v0.1-q4_k_m-cuda-12.1.1-cosmo-3.1.1 + build: + dockerfile: Dockerfile + target: llamafile-cuda + args: + LLAMAFILE_URL: https://huggingface.co/jartine/mistral-7b.llamafile/resolve/main/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile?download=true + LLAMAFILE_CHECKSUM: sha256:c8d34c244e01a91df1e8b22196dfddb9662f6b08fbcd4a23609d7b736b56f4ae + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: all + capabilities: + - gpu + mistral-7b-instruct-v0.1-q4_k_m: + image: ghcr.io/ajbouh/cosmos:mistral-7b-instruct-v0.1-q4_k_m-cosmo-3.1.1 + build: + dockerfile: Dockerfile + target: llamafile + args: + LLAMAFILE_URL: https://huggingface.co/jartine/mistral-7b.llamafile/resolve/main/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile?download=true + LLAMAFILE_CHECKSUM: sha256:c8d34c244e01a91df1e8b22196dfddb9662f6b08fbcd4a23609d7b736b56f4ae