Skip to content

Commit

Permalink
Add a few llamafile images (with CUDA support!)
Browse files Browse the repository at this point in the history
  • Loading branch information
ajbouh committed Nov 30, 2023
1 parent b3609af commit 8059f77
Show file tree
Hide file tree
Showing 3 changed files with 69 additions and 0 deletions.
43 changes: 43 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -103,3 +103,46 @@ CMD ["/bin/bash"]
# COPY --chmod=0755 mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile /usr/bin/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile
# ENV PATH=/bin:/usr/bin
# ENTRYPOINT ["/bin/sh", "-c", "exec \"$@\"", "sh", "/usr/bin/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile"]

FROM nvidia/cuda:12.1.1-devel-ubuntu22.04 as devel-llamafile
ENV PATH=/bin:/usr/bin
# HACK would be better to use a smaller weights file, or a "only compile stubs" entrypoint
ADD --checksum=sha256:c8d34c244e01a91df1e8b22196dfddb9662f6b08fbcd4a23609d7b736b56f4ae --chmod=0755 https://huggingface.co/jartine/mistral-7b.llamafile/resolve/main/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile?download=true /usr/bin/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile
# COPY --chmod=0755 mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile /usr/bin/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile
# HACK to get llamafile to build stubs we can cache
RUN /usr/bin/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile --n-gpu-layers 1 || true

FROM cosmos-scratch as llamafile-cuda-scratch
LABEL org.opencontainers.image.source https://github.com/ajbouh/cosmos
COPY --from=devel-llamafile /usr/local/cuda/targets/x86_64-linux/lib/libcublas.so.12 /usr/local/cuda/targets/x86_64-linux/lib/libcublasLt.so.12 /usr/local/cuda/targets/x86_64-linux/lib/
COPY --from=devel-llamafile /lib64/ld-linux-x86-64.so.2 /lib64/ld-linux-x86-64.so.2
COPY --from=devel-llamafile /lib/x86_64-linux-gnu/libstdc++.so.6 /lib/x86_64-linux-gnu/libm.so.6 /lib/x86_64-linux-gnu/libgcc_s.so.1 /lib/x86_64-linux-gnu/libc.so.6 /lib/x86_64-linux-gnu/librt.so.1 /lib/x86_64-linux-gnu/libpthread.so.0 /lib/x86_64-linux-gnu/libdl.so.2 /lib/x86_64-linux-gnu/
WORKDIR /root
COPY --from=devel-llamafile /root/.cosmo /root/.cosmo
COPY --from=devel-llamafile /root/.llamafile /root/.llamafile
ENV PATH=/bin:/usr/bin
ENV HOME=/root
ENV LD_LIBRARY_PATH=/usr/local/cuda/targets/x86_64-linux/lib:/lib:/lib64
# HACK forge an executable nvcc, because llamafile looks for nvcc before looking for cached .cosmo and .llamafile files
COPY --from=unpack-cosmos /bin/chmod /bin/
WORKDIR /usr/local/cuda/bin/
RUN printf "" >nvcc
RUN chmod 0755 nvcc
# HACK things seem to fail if we have multiple CUDA devices. limit ourselves to one device for now to avoid errors like:
# > CUDA error 2 at /root/.llamafile/ggml-cuda.cu:7864: out of memory
# > current device: 4
ENV CUDA_VISIBLE_DEVICES=0

FROM cosmos-scratch as llamafile
LABEL org.opencontainers.image.source https://github.com/ajbouh/cosmos
ARG LLAMAFILE_URL
ARG LLAMAFILE_CHECKSUM
ADD --checksum=${LLAMAFILE_CHECKSUM} --chmod=0755 ${LLAMAFILE_URL} /usr/bin/llamafile
ENTRYPOINT ["/bin/sh", "-c", "exec \"$@\"", "sh", "/usr/bin/llamafile", "--strace", "--n-gpu-layers", "1"]

FROM llamafile-cuda-scratch as llamafile-cuda
LABEL org.opencontainers.image.source https://github.com/ajbouh/cosmos
ARG LLAMAFILE_URL
ARG LLAMAFILE_CHECKSUM
ADD --checksum=${LLAMAFILE_CHECKSUM} --chmod=0755 ${LLAMAFILE_URL} /usr/bin/llamafile
ENTRYPOINT ["/bin/sh", "-c", "exec \"$@\"", "sh", "/usr/bin/llamafile", "--strace", "--n-gpu-layers", "1"]
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,6 @@ docker compose run --build --rm -it python
docker compose run --build --rm -it lua
docker compose run --build --rm -it sqlite3
docker compose run --build --rm -it qjs
docker compose run --build --rm -it mistral-7b-instruct-v0.1-q4_k_m-cuda
docker compose run --build --rm -it mistral-7b-instruct-v0.1-q4_k_m
```
24 changes: 24 additions & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,27 @@ services:
target: ape
args:
COSMOS_EXE: /usr/bin/qjs
mistral-7b-instruct-v0.1-q4_k_m-cuda:
image: ghcr.io/ajbouh/cosmos:mistral-7b-instruct-v0.1-q4_k_m-cuda-12.1.1-cosmo-3.1.1
build:
dockerfile: Dockerfile
target: llamafile-cuda
args:
LLAMAFILE_URL: https://huggingface.co/jartine/mistral-7b.llamafile/resolve/main/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile?download=true
LLAMAFILE_CHECKSUM: sha256:c8d34c244e01a91df1e8b22196dfddb9662f6b08fbcd4a23609d7b736b56f4ae
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities:
- gpu
mistral-7b-instruct-v0.1-q4_k_m:
image: ghcr.io/ajbouh/cosmos:mistral-7b-instruct-v0.1-q4_k_m-cosmo-3.1.1
build:
dockerfile: Dockerfile
target: llamafile
args:
LLAMAFILE_URL: https://huggingface.co/jartine/mistral-7b.llamafile/resolve/main/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile?download=true
LLAMAFILE_CHECKSUM: sha256:c8d34c244e01a91df1e8b22196dfddb9662f6b08fbcd4a23609d7b736b56f4ae

0 comments on commit 8059f77

Please sign in to comment.