Skip to content
This repository was archived by the owner on Apr 28, 2023. It is now read-only.

Commit f776141

Browse files
authored
Merge pull request #358 from facebookresearch/fix-xenial-docker
Use gcc 5.4 on xenial non-conda + use pytorch 0.4 official + fix broken python API
2 parents 6577d4f + 8ccf885 commit f776141

File tree

6 files changed

+18
-12
lines changed

6 files changed

+18
-12
lines changed

.jenkins/build.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ if [[ "$DISTRIB_RELEASE" == 14.04 ]]; then
7272
source activate tc-env
7373
conda install -y pyyaml mkl-include
7474
conda install -yc conda-forge pytest
75-
conda install -y pytorch-nightly=2018.04.17 -c pytorch
75+
conda install -y pytorch -c pytorch
7676
WITH_PYTHON_C2=OFF CORES=$(nproc) CLANG_PREFIX=/usr/local/clang+llvm-tapir5.0 BUILD_TYPE=Release ./build.sh --all
7777
else
7878
echo "Building TC in non-conda env"
@@ -87,7 +87,7 @@ if [[ "$DISTRIB_RELEASE" == 16.04 ]]; then
8787
source activate tc-env
8888
conda install -y pyyaml mkl-include
8989
conda install -yc conda-forge pytest
90-
conda install -y pytorch-nightly=2018.04.17 cuda90 -c pytorch
90+
conda install -y pytorch cuda90 -c pytorch
9191
WITH_PYTHON_C2=OFF CORES=$(nproc) CLANG_PREFIX=/usr/local/clang+llvm-tapir5.0 BUILD_TYPE=Release ./build.sh --all
9292
else
9393
echo "Building TC in non-conda env"

docker/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ git submodule update --init --recursive
7878
# build TC
7979
conda install -y mkl-include pyyaml
8080
conda install -yc conda-forge pytest
81-
conda install -y pytorch-nightly=2018.04.17 -c pytorch # OR conda install -y pytorch-nightly=2018.04.17 cuda90 -c pytorch
81+
conda install -y pytorch -c pytorch # OR conda install -y pytorch cuda90 -c pytorch
8282
CORES=$(nproc) WITH_CAFFE2=ON CLANG_PREFIX=/usr/local/clang+llvm-tapir5.0 BUILD_TYPE=Release ./build.sh --all
8383
# Test the TC build is fine
8484
./test.sh

docker/common/install_base.sh

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -34,11 +34,15 @@ apt-get install -y --no-install-recommends \
3434
apt-get clean
3535
rm -rf /var/lib/apt/lists/*
3636
# setup gcc
37-
add-apt-repository ppa:ubuntu-toolchain-r/test
38-
apt-get update
39-
apt-get install -y --no-install-recommends libcilkrts5 gcc-$GCC_VERSION g++-$GCC_VERSION
40-
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-$GCC_VERSION 50
41-
update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-$GCC_VERSION 50
37+
if [[ "$GCC_VERSION" == 4.9 ]]; then
38+
add-apt-repository ppa:ubuntu-toolchain-r/test
39+
apt-get update
40+
apt-get install -y --no-install-recommends libcilkrts5 gcc-$GCC_VERSION g++-$GCC_VERSION
41+
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-$GCC_VERSION 50
42+
update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-$GCC_VERSION 50
43+
else
44+
apt-get install -y --no-install-recommends libcilkrts5 gcc g++
45+
fi
4246

4347
# Install ccache from source. Needs 3.4 or later for ccbin support
4448
# Needs specific branch to work with nvcc (ccache/ccache#145)

tensor_comprehensions/tc_unit.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ def set_gflags(
6666
def check_cache_file_exists(cache_file):
6767
# for autotuning, we save two files: .cuda and .options, we will check that
6868
# these two files exists for the validity of cache
69-
if os.path.exists(cache_file + ".options") and os.path.exists(cache_file + ".cuda"):
69+
if os.path.exists(cache_file + ".options"):
7070
return True
7171
return False
7272

@@ -254,7 +254,7 @@ def autotune(self, *inputs, **kwargs):
254254
cache_file = "/tmp/{}_{}".format(hash_key, str(uuid.uuid4()))
255255
elif isinstance(kwargs["cache"], str):
256256
cache_file = kwargs["cache"]
257-
logger.info('Autotuning cache will be saved to: {}.cuda/options'.format(cache_file))
257+
logger.info('Autotuning cache will be saved to: {}.options'.format(cache_file))
258258
else:
259259
logger.warning("Autotuning results won't be cached. 'cache' option is not set")
260260

@@ -297,7 +297,7 @@ def autotune(self, *inputs, **kwargs):
297297

298298
if cache_file:
299299
cache_file = cache_file + "_backward"
300-
logger.info('Backwards autotuning cache will be saved to: {}.cuda/options'.format(cache_file))
300+
logger.info('Backwards autotuning cache will be saved to: {}.options'.format(cache_file))
301301
kwargs["type"] = "backward"
302302
options = get_options_from_kwargs_and_tuner_cache(backward_name, cache_file, options_cache, *inputs, **kwargs)
303303
backward_best_options = self.tune_and_store(

test_python/test_tc.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,9 @@ def matmul(float(M,N) A, float(N,K) B) -> (output) {
7979
inputs = [mat1, mat2]
8080
handle = cu.compile("matmul", [mat1, mat2], options="mlp")
8181
outputs = cu.run(handle, "matmul", inputs)
82+
torch.cuda.synchronize()
8283
expected = torch.mm(mat1, mat2)
84+
torch.cuda.synchronize()
8385
diff = outputs[0] - expected
8486
self.assert_almost_equal(diff, inputs, 4)
8587

test_python/test_tc_torch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,7 @@ def test_autotuner_cachefile_first(self):
181181
def test_autotuner_cachefile_load_automatic(self):
182182
lang = MATMUL_LANG
183183
cache_file = "{}/matmul_100_400_500".format(PATH_PREFIX) # use argparse if input from command line
184-
assert os.path.isfile("{}.cuda".format(cache_file)), "looks like the cache_file doesn't exist"
184+
assert os.path.isfile("{}.options".format(cache_file)), "looks like the cache_file doesn't exist"
185185

186186
matmul = tc.define(lang, name="matmul")
187187
mat1, mat2 = torch.randn(100, 400).cuda(), torch.randn(400, 500).cuda()

0 commit comments

Comments
 (0)