Skip to content

Commit

Permalink
Support llamafile images (with CUDA support)
Browse files Browse the repository at this point in the history
  • Loading branch information
ajbouh committed Nov 30, 2023
1 parent b3609af commit 8e3aa84
Show file tree
Hide file tree
Showing 3 changed files with 72 additions and 6 deletions.
52 changes: 46 additions & 6 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,8 @@ ADD --checksum=sha256:241dc90f3e92b22c9e08cfb5f6df2e920da258e3c461d9677f267ab7a
WORKDIR /opt/cosmos
RUN unzip /dl/cosmos.zip
WORKDIR /opt/cosmos/bin
RUN /usr/bin/assimilate-x86_64.elf -c dd \
RUN /usr/bin/assimilate-x86_64.elf -c assimilate \
&& /usr/bin/assimilate-x86_64.elf -c dd \
&& /usr/bin/assimilate-x86_64.elf -c cp \
&& /usr/bin/assimilate-x86_64.elf -c mv \
&& /usr/bin/assimilate-x86_64.elf -c echo \
Expand Down Expand Up @@ -98,8 +99,47 @@ CMD ["/bin/bash"]
# COPY --from=unpack-cosmos /usr/bin/ /usr/bin/
# CMD /bin/bash

# FROM cosmos-scratch as mistral-7b-instruct-v0.1-Q4_K_M-main
# LABEL org.opencontainers.image.source https://github.com/ajbouh/cosmos
# COPY --chmod=0755 mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile /usr/bin/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile
# ENV PATH=/bin:/usr/bin
# ENTRYPOINT ["/bin/sh", "-c", "exec \"$@\"", "sh", "/usr/bin/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile"]
FROM cosmos-scratch as llamafile
LABEL org.opencontainers.image.source https://github.com/ajbouh/cosmos
ARG LLAMAFILE_URL
ARG LLAMAFILE_CHECKSUM
ADD --checksum=${LLAMAFILE_CHECKSUM} --chmod=0755 ${LLAMAFILE_URL} /usr/bin/llamafile
ENTRYPOINT ["/bin/sh", "-c", "exec \"$@\"", "sh", "/usr/bin/llamafile", "--strace", "--n-gpu-layers", "1"]

FROM nvidia/cuda:12.1.1-devel-ubuntu22.04 as devel-llamafile
# HACK get llamafile to build stubs we can cache. would be better to use a "only compile stubs" entrypoint, or a smaller weights file
ADD --checksum=sha256:c8d34c244e01a91df1e8b22196dfddb9662f6b08fbcd4a23609d7b736b56f4ae --chmod=0755 https://huggingface.co/jartine/mistral-7b.llamafile/resolve/main/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile?download=true /usr/bin/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile
COPY --from=unpack-cosmos /usr/bin/assimilate /usr/bin/
# HACK we need to assimilate so this can run on github actions...
RUN /usr/bin/assimilate -c /usr/bin/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile \
&& (/usr/bin/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile --n-gpu-layers 1 || true) \
&& [ -e /root/.cosmo ] && [ -e /root/.llamafile ] \
&& rm /usr/bin/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile

FROM cosmos-scratch as llamafile-cuda-scratch
LABEL org.opencontainers.image.source https://github.com/ajbouh/cosmos
COPY --from=devel-llamafile /usr/local/cuda/targets/x86_64-linux/lib/libcublas.so.12 /usr/local/cuda/targets/x86_64-linux/lib/libcublasLt.so.12 /usr/local/cuda/targets/x86_64-linux/lib/
COPY --from=devel-llamafile /lib64/ld-linux-x86-64.so.2 /lib64/ld-linux-x86-64.so.2
COPY --from=devel-llamafile /lib/x86_64-linux-gnu/libstdc++.so.6 /lib/x86_64-linux-gnu/libm.so.6 /lib/x86_64-linux-gnu/libgcc_s.so.1 /lib/x86_64-linux-gnu/libc.so.6 /lib/x86_64-linux-gnu/librt.so.1 /lib/x86_64-linux-gnu/libpthread.so.0 /lib/x86_64-linux-gnu/libdl.so.2 /lib/x86_64-linux-gnu/
WORKDIR /root
COPY --from=devel-llamafile /root/.cosmo /root/.cosmo
COPY --from=devel-llamafile /root/.llamafile /root/.llamafile
ENV PATH=/bin:/usr/bin
ENV HOME=/root
ENV LD_LIBRARY_PATH=/usr/local/cuda/targets/x86_64-linux/lib:/lib:/lib64
# HACK forge an executable nvcc, because llamafile looks for nvcc before looking for cached .cosmo and .llamafile files
COPY --from=unpack-cosmos /bin/chmod /bin/
WORKDIR /usr/local/cuda/bin/
RUN printf "" >nvcc
RUN chmod 0755 nvcc
# HACK things seem to fail if we have multiple CUDA devices. limit ourselves to one device for now to avoid errors like:
# > CUDA error 2 at /root/.llamafile/ggml-cuda.cu:7864: out of memory
# > current device: 4
ENV CUDA_VISIBLE_DEVICES=0

FROM llamafile-cuda-scratch as llamafile-cuda
LABEL org.opencontainers.image.source https://github.com/ajbouh/cosmos
ARG LLAMAFILE_URL
ARG LLAMAFILE_CHECKSUM
ADD --checksum=${LLAMAFILE_CHECKSUM} --chmod=0755 ${LLAMAFILE_URL} /usr/bin/llamafile
ENTRYPOINT ["/bin/sh", "-c", "exec \"$@\"", "sh", "/usr/bin/llamafile", "--strace", "--n-gpu-layers", "1"]
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,6 @@ docker compose run --build --rm -it python
docker compose run --build --rm -it lua
docker compose run --build --rm -it sqlite3
docker compose run --build --rm -it qjs
docker compose run --build --rm -it mistral-7b-instruct-v0.1-q4_k_m-cuda
docker compose run --build --rm -it mistral-7b-instruct-v0.1-q4_k_m
```
24 changes: 24 additions & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,27 @@ services:
target: ape
args:
COSMOS_EXE: /usr/bin/qjs
mistral-7b-instruct-v0.1-q4_k_m-cuda:
image: ghcr.io/ajbouh/cosmos:mistral-7b-instruct-v0.1-q4_k_m-cuda-12.1.1-cosmo-3.1.1
build:
dockerfile: Dockerfile
target: llamafile-cuda
args:
LLAMAFILE_URL: https://huggingface.co/jartine/mistral-7b.llamafile/resolve/main/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile?download=true
LLAMAFILE_CHECKSUM: sha256:c8d34c244e01a91df1e8b22196dfddb9662f6b08fbcd4a23609d7b736b56f4ae
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities:
- gpu
mistral-7b-instruct-v0.1-q4_k_m:
image: ghcr.io/ajbouh/cosmos:mistral-7b-instruct-v0.1-q4_k_m-cosmo-3.1.1
build:
dockerfile: Dockerfile
target: llamafile
args:
LLAMAFILE_URL: https://huggingface.co/jartine/mistral-7b.llamafile/resolve/main/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile?download=true
LLAMAFILE_CHECKSUM: sha256:c8d34c244e01a91df1e8b22196dfddb9662f6b08fbcd4a23609d7b736b56f4ae

0 comments on commit 8e3aa84

Please sign in to comment.