Skip to content

Commit 30cd67c

Browse files
author
Wei
authored
Merge pull request #1137 from pytorch/circleci-editor/245/circleci-project-setup
CI/CD setup
2 parents 52e686e + d2375fc commit 30cd67c

File tree

3 files changed

+282
-12
lines changed

3 files changed

+282
-12
lines changed

.circleci/config.yml

Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
# Use the latest 2.1 version of CircleCI pipeline process engine.
2+
# See: https://circleci.com/docs/2.0/configuration-reference
3+
version: 2.1
4+
5+
# Define a job to be invoked later in a workflow.
6+
# See: https://circleci.com/docs/2.0/configuration-reference/#jobs
7+
jobs:
8+
build:
9+
machine:
10+
# Primary container image where all steps run.
11+
# image: nvcr.io/nvidia/tensorrt:22.01-py3 # does not work with customized image
12+
# https://circleci.com/docs/2.0/configuration-reference#available-linux-gpu-images
13+
image: ubuntu-2004-cuda-11.4:202110-01
14+
resource_class: gpu.nvidia.large
15+
steps:
16+
- checkout
17+
- run:
18+
name: install cudnn + tensorrt + bazel
19+
command: |
20+
cd ~
21+
OS=ubuntu2004
22+
CUDNN_VERSION=8.2.1.*-1+cuda11.3
23+
TRT_VERSION=8.2.4-1+cuda11.4
24+
BAZEL_VERSION=5.1.1
25+
26+
wget https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/cuda-${OS}.pin
27+
sudo mv cuda-${OS}.pin /etc/apt/preferences.d/cuda-repository-pin-600
28+
sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/7fa2af80.pub
29+
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 536F8F1DE80F6A35
30+
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC
31+
sudo add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/ /"
32+
sudo apt-get update
33+
sudo apt-get install libcudnn8=${CUDNN_VERSION}
34+
sudo apt-get install libcudnn8-dev=${CUDNN_VERSION}
35+
36+
sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/{OS}/x86_64/3bf863cc.pub
37+
sudo add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/ /"
38+
sudo apt-get update
39+
40+
sudo apt-get install libnvinfer8=${TRT_VERSION} libnvonnxparsers8=${TRT_VERSION} libnvparsers8=${TRT_VERSION} libnvinfer-plugin8=${TRT_VERSION} libnvinfer-dev=${TRT_VERSION} libnvonnxparsers-dev=${TRT_VERSION} libnvparsers-dev=${TRT_VERSION} libnvinfer-plugin-dev=${TRT_VERSION} python3-libnvinfer=${TRT_VERSION}
41+
# check available version, apt list libnvinfer8 -a
42+
sudo wget -q https://github.com/bazelbuild/bazel/releases/download/${BAZEL_VERSION}/bazel-${BAZEL_VERSION}-linux-x86_64 -O /usr/bin/bazel
43+
sudo chmod a+x /usr/bin/bazel
44+
45+
- run:
46+
name: set up python environment
47+
command: |
48+
pip3 install nvidia-pyindex
49+
pip3 install nvidia-tensorrt==8.2.4.2
50+
pip3 install --pre torch==1.13.0.dev20220618 torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/cu113
51+
pip3 install pytest parameterized expecttest
52+
# install torch_tensorrt
53+
mv WORKSPACE.ci WORKSPACE
54+
cd py
55+
python3 setup.py install
56+
57+
# install fx2trt
58+
# cd py/torch_tensorrt/fx/setup
59+
# python3 setup.py install
60+
- run:
61+
name: run fx2trt tests
62+
command: |
63+
# one fix pending to enable below
64+
# cd py/torch_tensorrt/fx/test
65+
# pytest $(find . -name '*.py' | grep -v test_dispatch* | grep -v test_setitem*)
66+
67+
cd py/torch_tensorrt/fx/test
68+
pushd converters/acc_op
69+
pytest
70+
popd
71+
pushd passes
72+
list_passes=$(ls | grep -v test_setitem*)
73+
pytest $list_passes
74+
popd
75+
pushd core
76+
pytest
77+
popd
78+
# pushd quant
79+
# pytest
80+
# popd
81+
pushd tools
82+
pytest
83+
popd
84+
pushd trt_lower
85+
pytest
86+
popd
87+
pushd tracer
88+
list_tracer=$(ls | grep -v test_dispatch_*)
89+
pytest $list_tracer
90+
popd
91+
# Invoke jobs via workflows
92+
# See: https://circleci.com/docs/2.0/configuration-reference/#workflows
93+
workflows:
94+
build_run:
95+
jobs:
96+
- build

WORKSPACE.ci

Lines changed: 147 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,147 @@
1+
workspace(name = "Torch-TensorRT")
2+
3+
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
4+
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
5+
6+
http_archive(
7+
name = "rules_python",
8+
sha256 = "778197e26c5fbeb07ac2a2c5ae405b30f6cb7ad1f5510ea6fdac03bded96cc6f",
9+
url = "https://github.com/bazelbuild/rules_python/releases/download/0.2.0/rules_python-0.2.0.tar.gz",
10+
)
11+
12+
load("@rules_python//python:pip.bzl", "pip_install")
13+
14+
http_archive(
15+
name = "rules_pkg",
16+
sha256 = "038f1caa773a7e35b3663865ffb003169c6a71dc995e39bf4815792f385d837d",
17+
urls = [
18+
"https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.4.0/rules_pkg-0.4.0.tar.gz",
19+
"https://github.com/bazelbuild/rules_pkg/releases/download/0.4.0/rules_pkg-0.4.0.tar.gz",
20+
],
21+
)
22+
23+
load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
24+
25+
rules_pkg_dependencies()
26+
27+
git_repository(
28+
name = "googletest",
29+
commit = "703bd9caab50b139428cea1aaff9974ebee5742e",
30+
remote = "https://github.com/google/googletest",
31+
shallow_since = "1570114335 -0400",
32+
)
33+
34+
# External dependency for torch_tensorrt if you already have precompiled binaries.
35+
local_repository(
36+
name = "torch_tensorrt",
37+
path = "/opt/conda/lib/python3.8/site-packages/torch_tensorrt"
38+
)
39+
40+
# CUDA should be installed on the system locally
41+
new_local_repository(
42+
name = "cuda",
43+
build_file = "@//third_party/cuda:BUILD",
44+
path = "/usr/local/cuda/",
45+
)
46+
47+
new_local_repository(
48+
name = "cublas",
49+
build_file = "@//third_party/cublas:BUILD",
50+
path = "/usr",
51+
)
52+
#############################################################################################################
53+
# Tarballs and fetched dependencies (default - use in cases when building from precompiled bin and tarballs)
54+
#############################################################################################################
55+
56+
#http_archive(
57+
# name = "libtorch",
58+
# build_file = "@//third_party/libtorch:BUILD",
59+
# sha256 = "8d9e829ce9478db4f35bdb7943308cf02e8a2f58cf9bb10f742462c1d57bf287",
60+
# strip_prefix = "libtorch",
61+
# urls = ["https://download.pytorch.org/libtorch/cu113/libtorch-cxx11-abi-shared-with-deps-1.11.0%2Bcu113.zip"],
62+
#)
63+
#
64+
#http_archive(
65+
# name = "libtorch_pre_cxx11_abi",
66+
# build_file = "@//third_party/libtorch:BUILD",
67+
# sha256 = "90159ecce3ff451f3ef3f657493b6c7c96759c3b74bbd70c1695f2ea2f81e1ad",
68+
# strip_prefix = "libtorch",
69+
# urls = ["https://download.pytorch.org/libtorch/cu113/libtorch-shared-with-deps-1.11.0%2Bcu113.zip"],
70+
#)
71+
72+
# Download these tarballs manually from the NVIDIA website
73+
# Either place them in the distdir directory in third_party and use the --distdir flag
74+
# or modify the urls to "file:///<PATH TO TARBALL>/<TARBALL NAME>.tar.gz
75+
76+
#http_archive(
77+
# name = "cudnn",
78+
# build_file = "@//third_party/cudnn/archive:BUILD",
79+
# sha256 = "0e5d2df890b9967efa6619da421310d97323565a79f05a1a8cb9b7165baad0d7",
80+
# strip_prefix = "cuda",
81+
# urls = [
82+
# "https://developer.nvidia.com/compute/machine-learning/cudnn/secure/8.2.4/11.4_20210831/cudnn-11.4-linux-x64-v8.2.4.15.tgz",
83+
# ],
84+
#)
85+
#
86+
#http_archive(
87+
# name = "tensorrt",
88+
# build_file = "@//third_party/tensorrt/archive:BUILD",
89+
# sha256 = "826180eaaecdf9a7e76116855b9f1f3400ea9b06e66b06a3f6a0747ba6f863ad",
90+
# strip_prefix = "TensorRT-8.2.4.2",
91+
# urls = [
92+
# "https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/8.2.4/tars/tensorrt-8.2.4.2.linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz",
93+
# ],
94+
#)
95+
96+
####################################################################################
97+
# Locally installed dependencies (use in cases of custom dependencies or aarch64)
98+
####################################################################################
99+
100+
# NOTE: In the case you are using just the pre-cxx11-abi path or just the cxx11 abi path
101+
# with your local libtorch, just point deps at the same path to satisfy bazel.
102+
103+
# NOTE: NVIDIA's aarch64 PyTorch (python) wheel file uses the CXX11 ABI unlike PyTorch's standard
104+
# x86_64 python distribution. If using NVIDIA's version just point to the root of the package
105+
# for both versions here and do not use --config=pre-cxx11-abi
106+
107+
new_local_repository(
108+
name = "libtorch",
109+
path = "/opt/circleci/.pyenv/versions/3.9.4/lib/python3.9/site-packages/torch",
110+
build_file = "third_party/libtorch/BUILD"
111+
)
112+
113+
new_local_repository(
114+
name = "libtorch_pre_cxx11_abi",
115+
path = "/opt/circleci/.pyenv/versions/3.9.4/lib/python3.9/site-packages/torch",
116+
build_file = "third_party/libtorch/BUILD"
117+
)
118+
119+
new_local_repository(
120+
name = "cudnn",
121+
path = "/usr/",
122+
build_file = "@//third_party/cudnn/local:BUILD"
123+
)
124+
125+
new_local_repository(
126+
name = "tensorrt",
127+
path = "/usr/",
128+
build_file = "@//third_party/tensorrt/local:BUILD"
129+
)
130+
131+
# #########################################################################
132+
# # Testing Dependencies (optional - comment out on aarch64)
133+
# #########################################################################
134+
# pip_install(
135+
# name = "torch_tensorrt_py_deps",
136+
# requirements = "//py:requirements.txt",
137+
# )
138+
139+
# pip_install(
140+
# name = "py_test_deps",
141+
# requirements = "//tests/py:requirements.txt",
142+
# )
143+
144+
pip_install(
145+
name = "pylinter_deps",
146+
requirements = "//tools/linter:requirements.txt",
147+
)

py/setup.py

Lines changed: 39 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,14 @@
2323
JETPACK_VERSION = None
2424

2525
__version__ = '1.2.0a0'
26-
26+
FX_ONLY = False
2727

2828
def get_git_revision_short_hash() -> str:
2929
return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode('ascii').strip()
3030

31+
if "--fx-only" in sys.argv:
32+
FX_ONLY = True
33+
sys.argv.remove("--fx-only")
3134

3235
if "--release" not in sys.argv:
3336
__version__ = __version__ + "+" + get_git_revision_short_hash()
@@ -138,11 +141,14 @@ def finalize_options(self):
138141
develop.finalize_options(self)
139142

140143
def run(self):
141-
global CXX11_ABI
142-
build_libtorchtrt_pre_cxx11_abi(develop=True, cxx11_abi=CXX11_ABI)
143-
gen_version_file()
144-
copy_libtorchtrt()
145-
develop.run(self)
144+
if FX_ONLY:
145+
develop.run(self)
146+
else:
147+
global CXX11_ABI
148+
build_libtorchtrt_pre_cxx11_abi(develop=True, cxx11_abi=CXX11_ABI)
149+
gen_version_file()
150+
copy_libtorchtrt()
151+
develop.run(self)
146152

147153

148154
class InstallCommand(install):
@@ -155,11 +161,14 @@ def finalize_options(self):
155161
install.finalize_options(self)
156162

157163
def run(self):
158-
global CXX11_ABI
159-
build_libtorchtrt_pre_cxx11_abi(develop=False, cxx11_abi=CXX11_ABI)
160-
gen_version_file()
161-
copy_libtorchtrt()
162-
install.run(self)
164+
if FX_ONLY:
165+
install.run(self)
166+
else:
167+
global CXX11_ABI
168+
build_libtorchtrt_pre_cxx11_abi(develop=False, cxx11_abi=CXX11_ABI)
169+
gen_version_file()
170+
copy_libtorchtrt()
171+
install.run(self)
163172

164173

165174
class BdistCommand(bdist_wheel):
@@ -254,6 +263,23 @@ def run(self):
254263
] + (["-D_GLIBCXX_USE_CXX11_ABI=1"] if CXX11_ABI else ["-D_GLIBCXX_USE_CXX11_ABI=0"]),
255264
undef_macros=["NDEBUG"])
256265
]
266+
if FX_ONLY:
267+
ext_modules=None
268+
packages=[
269+
"torch_tensorrt.fx",
270+
"torch_tensorrt.fx.converters",
271+
"torch_tensorrt.fx.passes",
272+
"torch_tensorrt.fx.tools",
273+
"torch_tensorrt.fx.tracer.acc_tracer",
274+
]
275+
package_dir={
276+
"torch_tensorrt.fx": "torch_tensorrt/fx",
277+
"torch_tensorrt.fx.converters": "torch_tensorrt/fx/converters",
278+
"torch_tensorrt.fx.passes": "torch_tensorrt/fx/passes",
279+
"torch_tensorrt.fx.tools": "torch_tensorrt/fx/tools",
280+
"torch_tensorrt.fx.tracer.acc_tracer": "torch_tensorrt/fx/tracer/acc_tracer",
281+
}
282+
257283

258284
with open("README.md", "r", encoding="utf-8") as fh:
259285
long_description = fh.read()
@@ -282,7 +308,8 @@ def run(self):
282308
},
283309
zip_safe=False,
284310
license="BSD",
285-
packages=find_packages(),
311+
packages=packages if FX_ONLY else find_packages(),
312+
package_dir=package_dir if FX_ONLY else {},
286313
classifiers=[
287314
"Development Status :: 5 - Stable", "Environment :: GPU :: NVIDIA CUDA",
288315
"License :: OSI Approved :: BSD License", "Intended Audience :: Developers",

0 commit comments

Comments
 (0)