Skip to content

Commit

Permalink
Add openvino support && fix build failure (#20)
Browse files Browse the repository at this point in the history
* add oneflow build script

* update source.sh

* update Dockerfile

* add openvino support

* update doc

* update LD_LIBRARY_PATH

* update LD_LIBRARY_PATH

* add source

* update dockerfile, add cpio

* use ordered list

* tabs

* tabs

* update doc

* update build-gcc7.sh

* remove source

* update oneflow-cuda.cmake
  • Loading branch information
zzk0 authored Mar 16, 2022
1 parent 970b7f7 commit 2cc89a6
Show file tree
Hide file tree
Showing 7 changed files with 103 additions and 29 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ jobs:
extra_docker_args+=" --env TRITON_VERSION=${{ env.TRITON_VERSION }}"
extra_docker_args+=" --env ONEFLOW_CI_SRC_DIR=$PWD/oneflow"
extra_docker_args+=" --env ONEFLOW_CI_BUILD_DIR=$manylinux_cache_dir/build"
extra_docker_args+=" --env ONEFLOW_CI_BUILD_SCRIPT=$PWD/oneflow/ci/manylinux/build-gcc7.sh"
extra_docker_args+=" --env ONEFLOW_CI_BUILD_SCRIPT=$PWD/ci/build/build-gcc7.sh"
extra_docker_args+=" --env WHEELHOUSE_DIR=$manylinux_cache_dir/wheelhouse"
extra_docker_args+=" --env ONEFLOW_CI_CMAKE_INIT_CACHE=$PWD/cmake/ci/oneflow-cuda.cmake"
extra_docker_args+=" --env ONEFLOW_CI_PYTHON_EXE=python3"
Expand All @@ -94,7 +94,7 @@ jobs:
run: |
set -x
manylinux_cache_dir=${{ env.MANYLINUX_CACHE_DIR }}
docker exec -w $PWD/test/test_resnet50 ${{ env.container_name }} bash -c "source $manylinux_cache_dir/build/source.sh && python3 export_model.py"
docker exec -w $PWD/test/test_resnet50 ${{ env.container_name }} bash -c "source $manylinux_cache_dir/build/ci_source.sh && python3 export_model.py"
- name: Remove container
run: |
docker container rm -f ${{ env.container_name }}
Expand Down
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
build
build-*
build-*/
checkpoints
model
log
.vscode
.clangd
.cache

*.so
Expand Down
40 changes: 40 additions & 0 deletions ci/build/build-gcc7.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
#!/usr/bin/env bash
# copy from https://github.com/Oneflow-Inc/oneflow/blob/master/ci/manylinux/build-gcc7.sh

set -euxo pipefail
ONEFLOW_CI_BUILD_PARALLEL=${ONEFLOW_CI_BUILD_PARALLEL:-$(nproc)}
gcc --version
ld --version
# clean python dir
cd ${ONEFLOW_CI_SRC_DIR}
${ONEFLOW_CI_PYTHON_EXE} -m pip install -i https://mirrors.aliyun.com/pypi/simple --user -r ci/fixed-dev-requirements.txt
cd python

function clean_artifacts {
git clean -nXd -e \!dist -e \!dist/**
git clean -fXd -e \!dist -e \!dist/**
}

clean_artifacts

# cmake config
mkdir -p ${ONEFLOW_CI_BUILD_DIR}
cd ${ONEFLOW_CI_BUILD_DIR}
find ${ONEFLOW_CI_BUILD_DIR} -name CMakeCache.txt
find ${ONEFLOW_CI_BUILD_DIR} -name CMakeCache.txt -delete
if [ ! -f "$ONEFLOW_CI_CMAKE_INIT_CACHE" ]; then
echo "$ONEFLOW_CI_CMAKE_INIT_CACHE does not exist."
exit 1
fi
export PATH="${PATH}:$(dirname ${ONEFLOW_CI_PYTHON_EXE})"
export PYTHON_BIN_PATH=${ONEFLOW_CI_PYTHON_EXE}
cmake -S ${ONEFLOW_CI_SRC_DIR} -C ${ONEFLOW_CI_CMAKE_INIT_CACHE} -DPython3_EXECUTABLE=${ONEFLOW_CI_PYTHON_EXE}

# cmake build
cd ${ONEFLOW_CI_BUILD_DIR}
cmake --build . --parallel ${ONEFLOW_CI_BUILD_PARALLEL}

# build pip
cd ${ONEFLOW_CI_SRC_DIR}
cd python
${ONEFLOW_CI_PYTHON_EXE} setup.py bdist_wheel
3 changes: 3 additions & 0 deletions cmake/ci/oneflow-cuda.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -16,3 +16,6 @@ set(CMAKE_CUDA_COMPILER_LAUNCHER ccache CACHE STRING "")
set(CMAKE_INTERPROCEDURAL_OPTIMIZATION OFF CACHE BOOL "")
set(CUDA_NVCC_THREADS_NUMBER 8 CACHE STRING "")
set(WITH_TENSORRT YES CACHE BOOL "")
set(WITH_OPENVINO YES CACHE BOOL "")
set(OPENVINO_ROOT "/opt/intel/openvino_2021" CACHE STRING "")
set(BUILD_HWLOC OFF CACHE BOOL "")
60 changes: 35 additions & 25 deletions doc/build.md
Original file line number Diff line number Diff line change
@@ -1,34 +1,44 @@
# Build From Source

To build from source, you need to build liboneflow first.

Build liboneflow from source
You can pull the docker images, and follow the instructions below to build in docker container.

```
git clone https://github.com/Oneflow-Inc/oneflow --depth=1
cd oneflow
mkdir build && cd build
cmake -C ../cmake/caches/cn/cuda.cmake -DBUILD_CPP_API=ON -DBUILD_SHARED_LIBS=ON -DWITH_MLIR=ON -G Ninja ..
ninja
docker pull registry.cn-beijing.aliyuncs.com/oneflow/triton-devel
```

Build oneflow backend from source
To build from source, you need to build liboneflow first.

```
mkdir build && cd build
cmake -DCMAKE_PREFIX_PATH=/path/to/liboneflow_cpp/share -DTRITON_RELATED_REPO_TAG=r21.10 -DTRITON_ENABLE_GPU=ON -G Ninja ..
ninja
```
1. Build liboneflow from source

Launch triton server
```
git clone https://github.com/Oneflow-Inc/oneflow --depth=1
cd oneflow
mkdir build && cd build
cmake -C ../cmake/caches/cn/cuda.cmake -DBUILD_CPP_API=ON -DBUILD_SHARED_LIBS=ON \
-DWITH_MLIR=ON -G Ninja ..
ninja
```
```
cd ../ # back to root of the serving
docker run --runtime=nvidia --rm --network=host \
-v$(pwd)/examples:/models \
-v$(pwd)/build/libtriton_oneflow.so:/backends/oneflow/libtriton_oneflow.so \
-v$(pwd)/oneflow/build/liboneflow_cpp/lib/:/mylib nvcr.io/nvidia/tritonserver:21.10-py3 \
bash -c 'LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/mylib/ /opt/tritonserver/bin/tritonserver \
--model-repository=/models --backend-directory=/backends'
curl -v localhost:8000/v2/health/ready # ready check
```
2. Build oneflow backend from source
```
mkdir build && cd build
cmake -DCMAKE_PREFIX_PATH=/path/to/liboneflow_cpp/share -DTRITON_RELATED_REPO_TAG=r21.10 \
-DTRITON_ENABLE_GPU=ON -G Ninja -DTHIRD_PARTY_MIRROR=aliyun ..
ninja
```
3. Launch triton server
```
cd ../ # back to root of the serving
docker run --runtime=nvidia --rm --network=host \
-v$(pwd)/examples:/models \
-v$(pwd)/build/libtriton_oneflow.so:/backends/oneflow/libtriton_oneflow.so \
-v$(pwd)/oneflow/build/liboneflow_cpp/lib/:/mylib nvcr.io/nvidia/tritonserver:21.10-py3 \
bash -c 'LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/mylib/ /opt/tritonserver/bin/tritonserver \
--model-repository=/models --backend-directory=/backends'
curl -v localhost:8000/v2/health/ready # ready check
```
18 changes: 17 additions & 1 deletion docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ ENV DEBIAN_FRONTEND=noninteractive
# example libcurl only needed for GCS?)
RUN sed -i 's/archive.ubuntu.com/mirrors.ustc.edu.cn/g' /etc/apt/sources.list && \
apt-get update && \
apt-get install -y --no-install-recommends software-properties-common libb64-0d libcurl4-openssl-dev libre2-5 git dirmngr libnuma-dev curl python3 python3-dev build-essential autoconf automake libtool make gcc g++ curl wget tar ccache rsync libopenblas-dev nasm python3-pip libssl-dev libonig-dev zlib1g-dev libboost-all-dev libre2-dev libb64-dev rapidjson-dev ninja-build libjpeg-dev && \
apt-get install -y --no-install-recommends software-properties-common libb64-0d libcurl4-openssl-dev libre2-5 git dirmngr libnuma-dev curl python3 python3-dev build-essential autoconf automake libtool make gcc g++ curl wget tar ccache rsync libopenblas-dev nasm python3-pip libssl-dev libonig-dev zlib1g-dev libboost-all-dev libre2-dev libb64-dev rapidjson-dev ninja-build libjpeg-dev cpio && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
pip3 config set global.index-url https://mirrors.bfsu.edu.cn/pypi/web/simple && \
Expand All @@ -59,9 +59,25 @@ RUN wget https://oneflow-static.oss-cn-beijing.aliyuncs.com/tensorrt/nv-tensorrt
apt-get update && \
apt-get install -y tensorrt && \
apt-get clean && \
rm -rf /var/nv-tensorrt-repo-ubuntu2004-cuda11.3-trt8.0.3.4-ga-20210831 && \
rm -rf /var/lib/apt/lists/* && \
rm -f nv-tensorrt-repo-ubuntu2004-cuda11.3-trt8.0.3.4-ga-20210831_1-1_amd64.deb

# Install OpenVINO
RUN wget https://oneflow-static.oss-cn-beijing.aliyuncs.com/openvino/l_openvino_toolkit_p_2021.4.752.tgz && \
tar -xvzf l_openvino_toolkit_p_2021.4.752.tgz && \
cd l_openvino_toolkit_p_2021.4.752 && \
sed -i 's/decline/accept/g' silent.cfg && \
./install.sh -s silent.cfg && \
cd .. && \
rm -rf l_openvino_toolkit_p_2021.4.752 && \
rm -rf l_openvino_toolkit_p_2021.4.752.tgz

ENV LD_LIBRARY_PATH="/opt/intel/openvino_2021/inference_engine/lib/intel64/:${LD_LIBRARY_PATH}"
ENV LD_LIBRARY_PATH="/opt/intel/openvino_2021/deployment_tools/ngraph/lib/:${LD_LIBRARY_PATH}"
ENV LD_LIBRARY_PATH="/opt/intel/openvino_2021/deployment_tools/inference_engine/lib/intel64/:${LD_LIBRARY_PATH}"
ENV LD_LIBRARY_PATH="/opt/intel/openvino_2021/deployment_tools/inference_engine/external/tbb/lib/:${LD_LIBRARY_PATH}"

ENV NVIDIA_BUILD_ID 28453983
LABEL com.nvidia.build.id=28453983
LABEL com.nvidia.build.ref=a8c3497c460014286e5293d32fcd8df9c99621c7
Expand Down
4 changes: 4 additions & 0 deletions src/triton/model_state.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <string>
#include <vector>

#include "oneflow_utils.h"
#include "triton/backend/backend_common.h"
#include "triton/core/tritonserver.h"

Expand Down Expand Up @@ -226,6 +227,9 @@ ModelState::LoadModel(
if (IsXrtTensorrt(xrt_kind_)) {
(*graph)->enable_tensorrt();
}
if (IsXrtOpenvino(xrt_kind_)) {
(*graph)->enable_openvino();
}

return nullptr;
}
Expand Down

0 comments on commit 2cc89a6

Please sign in to comment.