diff --git a/WORKSPACE b/WORKSPACE index 4b3e8970e..c9c28cda0 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -37,16 +37,17 @@ http_archive( urls = ["https://github.com/quantumlib/qsim/archive/refs/tags/v0.13.3.zip"], ) +local_repository( + name = "python", + path = "third_party/python_legacy", +) + http_archive( name = "org_tensorflow", - patches = [ - "//third_party/tf:tf.patch", - ], - sha256 = "f771db8d96ca13c72f73c85c9cfb6f5358e2de3dd62a97a9ae4b672fe4c6d094", - strip_prefix = "tensorflow-2.15.0", - urls = [ - "https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.15.0.zip", - ], + patches = ["//third_party/tf:tf.patch"], + sha256 = "c8c8936e7b6156e669e08b3c388452bb973c1f41538149fce7ed4a4849c7a012", + strip_prefix = "tensorflow-2.16.2", + urls = ["https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.16.2.zip"], ) diff --git a/configure.sh b/configure.sh index 0ca428c85..fbe381597 100755 --- a/configure.sh +++ b/configure.sh @@ -13,23 +13,23 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== +set -euo pipefail + PLATFORM="$(uname -s | tr 'A-Z' 'a-z')" -function write_to_bazelrc() { - echo "$1" >> .bazelrc -} -function write_action_env_to_bazelrc() { - write_to_bazelrc "build --action_env $1=\"$2\"" +# --- helpers --------------------------------------------------------------- +function write_bazelrc() { + echo "${1}" >> .bazelrc } -function write_linkopt_dir_to_bazelrc() { - write_to_bazelrc "build --linkopt -Wl,-rpath,$1" >> .bazelrc +function write_tf_rc() { + echo "${1}" >> .tf_configure.bazelrc } - -function is_linux() { - [[ "${PLATFORM}" == "linux" ]] +function die() { + echo "ERROR: $*" >&2 + exit 1 } function is_macos() { @@ -37,143 +37,209 @@ function is_macos() { } function is_windows() { - # On windows, the shell script is actually running in msys [[ "${PLATFORM}" =~ msys_nt*|mingw*|cygwin*|uwin* ]] } -function is_ppc64le() { - [[ "$(uname -m)" == "ppc64le" ]] +function write_legacy_python_repo() { + mkdir -p third_party/python_legacy + + # empty WORKSPACE + cat > third_party/python_legacy/WORKSPACE <<'EOF' +# intentionally empty +EOF + + # simple BUILD that exports defs.bzl + cat > third_party/python_legacy/BUILD <<'EOF' +package(default_visibility = ["//visibility:public"]) +exports_files(["defs.bzl"]) +EOF + + # defs.bzl MUST define 'interpreter' as a string, not a function. + # We also export py_runtime to satisfy older loads. + cat > third_party/python_legacy/defs.bzl <= 3.10 + if ! command -v python3 >/dev/null 2>&1; then + die "python3 not found. Pass --python=/path/to/python3.10+ or set PYTHON_BIN_PATH." + fi + if ! python3 - <<'PY' +import sys +raise SystemExit(0 if sys.version_info[:2] >= (3, 10) else 1) +PY + then + die "Python 3.10+ required for TensorFlow Quantum; found $(python3 -V 2>&1). Pass --python=/path/to/python3.10+ or set PYTHON_BIN_PATH." + fi -# Check if it's installed -# if [[ $(pip show tensorflow) == *tensorflow* ]] || [[ $(pip show tf-nightly) == *tf-nightly* ]]; then -# echo 'Using installed tensorflow' -# else -# # Uninstall CPU version if it is installed. -# if [[ $(pip show tensorflow-cpu) == *tensorflow-cpu* ]]; then -# echo 'Already have tensorflow non-gpu installed. Uninstalling......\n' -# pip uninstall tensorflow -# elif [[ $(pip show tf-nightly-cpu) == *tf-nightly-cpu* ]]; then -# echo 'Already have tensorflow non-gpu installed. Uninstalling......\n' -# pip uninstall tf-nightly -# fi -# # Install GPU version -# echo 'Installing tensorflow .....\n' -# pip install tensorflow -# fi - - - -TF_CFLAGS=( $(python -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_compile_flags()))') ) -TF_LFLAGS="$(python -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_link_flags()))')" - - -write_to_bazelrc "build --experimental_repo_remote_exec" -write_to_bazelrc "build --spawn_strategy=standalone" -write_to_bazelrc "build --strategy=Genrule=standalone" -write_to_bazelrc "build -c opt" -write_to_bazelrc "build --cxxopt=\"-D_GLIBCXX_USE_CXX11_ABI=1\"" -write_to_bazelrc "build --cxxopt=\"-std=c++17\"" - -# The transitive inclusion of build rules from TensorFlow ends up including -# and building two copies of zlib (one from bazel_rules, one from the TF code -# baase itself). The version of zlib you get (at least in TF 2.15.0) ends up -# producing many compiler warnings that "a function declaration without a -# prototype is deprecated". It's difficult to patch the particular build rules -# involved, so the approach taken here is to silence those warnings for stuff -# in external/. TODO: figure out how to patch the BUILD files and put it there. -write_to_bazelrc "build --per_file_copt=external/.*@-Wno-deprecated-non-prototype" -write_to_bazelrc "build --host_per_file_copt=external/.*@-Wno-deprecated-non-prototype" - -# Similarly, these are other harmless warnings about unused functions coming -# from things pulled in by the TF bazel config rules. -write_to_bazelrc "build --per_file_copt=external/com_google_protobuf/.*@-Wno-unused-function" -write_to_bazelrc "build --host_per_file_copt=external/com_google_protobuf/.*@-Wno-unused-function" + PY="$(command -v python3)" +fi -# The following supress warnings coming from qsim. -# TODO: fix the code in qsim & update TFQ to use the updated version. -write_to_bazelrc "build --per_file_copt=tensorflow_quantum/core/ops/noise/tfq_.*@-Wno-unused-but-set-variable" -write_to_bazelrc "build --host_per_file_copt=tensorflow_quantum/core/ops/noise/tfq_.*@-Wno-unused-but-set-variable" -write_to_bazelrc "build --per_file_copt=tensorflow_quantum/core/ops/math_ops/tfq_.*@-Wno-deprecated-declarations" -write_to_bazelrc "build --host_per_file_copt=tensorflow_quantum/core/ops/math_ops/tfq_.*@-Wno-deprecated-declarations" +# Normalize to an absolute path (readlink -f is GNU; fall back to python) +if command -v readlink >/dev/null 2>&1; then + PY_ABS="$(readlink -f "${PY}" 2>/dev/null || true)" +fi +if [[ -z "${PY_ABS:-}" ]]; then + PY_ABS="$("${PY}" -c 'import os,sys; print(os.path.abspath(sys.executable))')" +fi +PYTHON_BIN_PATH="${PY_ABS}" -if is_windows; then - # Use pywrap_tensorflow instead of tensorflow_framework on Windows - SHARED_LIBRARY_DIR=${TF_CFLAGS:2:-7}"python" -else - SHARED_LIBRARY_DIR=${TF_LFLAGS:2} -fi -SHARED_LIBRARY_NAME=$(echo $TF_LFLAGS | rev | cut -d":" -f1 | rev) -if ! [[ $TF_LFLAGS =~ .*:.* ]]; then - if is_macos; then - SHARED_LIBRARY_NAME="libtensorflow_framework.dylib" - elif is_windows; then - # Use pywrap_tensorflow's import library on Windows. It is in the same dir as the dll/pyd. - SHARED_LIBRARY_NAME="_pywrap_tensorflow_internal.lib" - else - SHARED_LIBRARY_NAME="libtensorflow_framework.so" - fi +# --- choose CPU/GPU like upstream script (default CPU) --------------------- +TF_NEED_CUDA="" +while [[ -z "${TF_NEED_CUDA}" ]]; do + read -p "Build against TensorFlow CPU pip package? [Y/n] " INPUT || true + case "${INPUT:-Y}" in + [Yy]* ) echo "CPU build selected."; TF_NEED_CUDA=0;; + [Nn]* ) echo "GPU build selected."; TF_NEED_CUDA=1;; + * ) echo "Please answer Y or n.";; + esac +done + +# For TF >= 2.1 this value isn’t actually consulted by TFQ, +# but we keep a compatible prompt/flag. +TF_CUDA_VERSION="11" + +# --- sanity: python is importable and has TF ------------------------------- +if [[ ! -x "${PYTHON_BIN_PATH}" ]]; then + die "${PYTHON_BIN_PATH} not found/executable." fi -HEADER_DIR=${TF_CFLAGS:2} -if is_windows; then - SHARED_LIBRARY_DIR=${SHARED_LIBRARY_DIR//\\//} - SHARED_LIBRARY_NAME=${SHARED_LIBRARY_NAME//\\//} - HEADER_DIR=${HEADER_DIR//\\//} +# Ensure TF is importable from system python (user should have installed it). +tf_output=$("${PYTHON_BIN_PATH}" - <<'PY' +import sys +import os +import glob + +try: + import tensorflow as tf + import tensorflow.sysconfig as sc +except ImportError: + sys.exit(1) + +print(sc.get_include()) + +lib_path = sc.get_lib() +lib_dir = lib_path if os.path.isdir(lib_path) else os.path.dirname(lib_path) +print(lib_dir) + +cands = (glob.glob(os.path.join(lib_dir, 'libtensorflow_framework.so*')) or + glob.glob(os.path.join(lib_dir, 'libtensorflow.so*')) or + glob.glob(os.path.join(lib_dir, '_pywrap_tensorflow_internal.*'))) +print(os.path.basename(cands[0]) if cands else 'libtensorflow_framework.so.2') +PY +) + +if [[ $? -ne 0 ]]; then + echo "ERROR: tensorflow not importable by Python (${PYTHON_BIN_PATH})" >&2 + exit 1 fi -write_action_env_to_bazelrc "TF_HEADER_DIR" ${HEADER_DIR} -write_action_env_to_bazelrc "TF_SHARED_LIBRARY_DIR" ${SHARED_LIBRARY_DIR} -write_action_env_to_bazelrc "TF_SHARED_LIBRARY_NAME" ${SHARED_LIBRARY_NAME} -write_action_env_to_bazelrc "TF_NEED_CUDA" ${TF_NEED_CUDA} +{ + read -r HDR + read -r LIBDIR + read -r LIBNAME +} <<< "${tf_output}" + +echo "Detected:" +echo " PYTHON_BIN_PATH=$PYTHON_BIN_PATH" +echo " TF_HEADER_DIR=$HDR" +echo " TF_SHARED_LIBRARY_DIR=$LIBDIR" +echo " TF_SHARED_LIBRARY_NAME=$LIBNAME" + +# --- write .tf_configure.bazelrc (repo_env for repository rules) ----------- +write_tf_rc "build --repo_env=PYTHON_BIN_PATH=$PYTHON_BIN_PATH" +write_tf_rc "build --repo_env=TF_HEADER_DIR=$HDR" +write_tf_rc "build --repo_env=TF_SHARED_LIBRARY_DIR=$LIBDIR" +write_tf_rc "build --repo_env=TF_SHARED_LIBRARY_NAME=$LIBNAME" +write_tf_rc "build --repo_env=TF_NEED_CUDA=$TF_NEED_CUDA" +write_tf_rc "build --repo_env=TF_CUDA_VERSION=$TF_CUDA_VERSION" + +# Make sure repo rules and sub-config see legacy Keras (keras 2 instead of Keras 3) +write_tf_rc "build --repo_env=TF_USE_LEGACY_KERAS=1" + +# --- write third_party/python_legacy/ with interpreter -------------------- +write_legacy_python_repo + +# --- write .bazelrc (imports TF config usual flags) ----------------- +write_bazelrc "try-import %workspace%/.tf_configure.bazelrc" +write_bazelrc "build --experimental_repo_remote_exec" +write_bazelrc "build --spawn_strategy=standalone" +write_bazelrc "build --strategy=Genrule=standalone" +write_bazelrc "build -c opt" +write_bazelrc "build --cxxopt=\"-D_GLIBCXX_USE_CXX11_ABI=1\"" +write_bazelrc "build --cxxopt=\"-std=c++17\"" +write_bazelrc "build --action_env=PYTHON_BIN_PATH=$PYTHON_BIN_PATH" +write_bazelrc "build --action_env=TF_USE_LEGACY_KERAS=1" +write_bazelrc "test --action_env=TF_USE_LEGACY_KERAS=1" + + +# zlib / protobuf warning suppressions +write_bazelrc "build --per_file_copt=external/.*@-Wno-deprecated-non-prototype" +write_bazelrc "build --host_per_file_copt=external/.*@-Wno-deprecated-non-prototype" +write_bazelrc "build --per_file_copt=external/com_google_protobuf/.*@-Wno-unused-function" +write_bazelrc "build --host_per_file_copt=external/com_google_protobuf/.*@-Wno-unused-function" + +# qsim warnings +# The following supress warnings coming from qsim. +# TODO: fix the code in qsim & update TFQ to use the updated version. +write_bazelrc "build --per_file_copt=tensorflow_quantum/core/ops/noise/tfq_.*@-Wno-unused-but-set-variable" +write_bazelrc "build --host_per_file_copt=tensorflow_quantum/core/ops/noise/tfq_.*@-Wno-unused-but-set-variable" +write_bazelrc "build --per_file_copt=tensorflow_quantum/core/ops/math_ops/tfq_.*@-Wno-deprecated-declarations" +write_bazelrc "build --host_per_file_copt=tensorflow_quantum/core/ops/math_ops/tfq_.*@-Wno-deprecated-declarations" + + +# rpath so the dynamic linker finds TF’s shared lib if ! is_windows; then - write_linkopt_dir_to_bazelrc ${SHARED_LIBRARY_DIR} + write_bazelrc "build --linkopt=-Wl,-rpath,${LIBDIR}" fi -# TODO(yifeif): do not hardcode path +# CUDA toggle if [[ "$TF_NEED_CUDA" == "1" ]]; then - write_to_bazelrc "build:cuda --define=using_cuda=true --define=using_cuda_nvcc=true" - write_to_bazelrc "build:cuda --@local_config_cuda//:enable_cuda" - write_to_bazelrc "build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain" - - write_action_env_to_bazelrc "TF_CUDA_VERSION" ${TF_CUDA_VERSION} - write_action_env_to_bazelrc "TF_CUDNN_VERSION" "8" + write_bazelrc "build:cuda --define=using_cuda=true --define=using_cuda_nvcc=true" + write_bazelrc "build:cuda --@local_config_cuda//:enable_cuda" + write_bazelrc "build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain" if is_windows; then - write_action_env_to_bazelrc "CUDNN_INSTALL_PATH" "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${TF_CUDA_VERSION}" - write_action_env_to_bazelrc "CUDA_TOOLKIT_PATH" "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${TF_CUDA_VERSION}" + write_tf_rc "build --repo_env=CUDNN_INSTALL_PATH=C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${TF_CUDA_VERSION}" + write_tf_rc "build --repo_env=CUDA_TOOLKIT_PATH=C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${TF_CUDA_VERSION}" else - write_action_env_to_bazelrc "CUDNN_INSTALL_PATH" "/usr/lib/x86_64-linux-gnu" - write_action_env_to_bazelrc "CUDA_TOOLKIT_PATH" "/usr/local/cuda" + write_tf_rc "build --repo_env=CUDNN_INSTALL_PATH=/usr/lib/x86_64-linux-gnu" + write_tf_rc "build --repo_env=CUDA_TOOLKIT_PATH=/usr/local/cuda" fi - write_to_bazelrc "build --config=cuda" - write_to_bazelrc "test --config=cuda" + write_bazelrc "build --config=cuda" + write_bazelrc "test --config=cuda" fi +echo +echo "Wrote .tf_configure.bazelrc and .bazelrc successfully." diff --git a/release/build_pip_package.sh b/release/build_pip_package.sh index 8bed5b909..5613c63f0 100755 --- a/release/build_pip_package.sh +++ b/release/build_pip_package.sh @@ -1,12 +1,12 @@ #!/bin/bash # Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -16,44 +16,66 @@ set -e set -x +# Pick the Python that TFQ/TensorFlow used during configure/build. +# Order: explicit env -> python3 (>= 3.10) +PY="${PYTHON_BIN_PATH:-}" +if [[ -z "${PY}" ]]; then + if ! command -v python3 >/dev/null 2>&1; then + echo "ERROR: python3 not found. Set PYTHON_BIN_PATH to a Python 3.10+ interpreter." >&2 + exit 2 + fi + + # Require Python >= 3.10 for TFQ. + if ! python3 - <<'PY' +import sys +sys.exit(0 if sys.version_info[:2] >= (3, 10) else 1) +PY + then + echo "ERROR: Python 3.10+ required for TensorFlow Quantum; found $(python3 -V 2>&1)." >&2 + exit 2 + fi + + PY="$(command -v python3)" +fi +echo "Using Python: ${PY}" + +# Ensure packaging tools are present in THIS interpreter +if ! "${PY}" -m pip show -q setuptools wheel >/dev/null 2>&1; then + "${PY}" -m pip install --upgrade pip setuptools wheel +fi + EXPORT_DIR="bazel-bin/release/build_pip_package.runfiles/__main__" -function main() { - DEST=${1} - EXTRA_FLAGS=${2} +main() { + DEST="$1" + EXTRA_FLAGS="$2" - if [[ -z ${DEST} ]]; then + if [[ -z "${DEST}" ]]; then echo "No destination directory provided." exit 1 fi - mkdir -p ${DEST} + mkdir -p "${DEST}" echo "=== destination directory: ${DEST}" - TMPDIR=$(mktemp -d -t tmp.XXXXXXXXXX) - - echo $(date) : "=== Using tmpdir: ${TMPDIR}" - + # Build the pip package in a temporary directory. + TMPDIR="$(mktemp -d -t tmp.XXXXXXXXXX)" + echo "$(date) : === Using tmpdir: ${TMPDIR}" echo "=== Copy TFQ files" # Copy over files necessary to run setup.py - cp ${EXPORT_DIR}/release/setup.py "${TMPDIR}" - cp ${EXPORT_DIR}/release/MANIFEST.in "${TMPDIR}" - - # Copy over all files in the tensorflow_quantum/ directory that are included in the BUILD - # rule. - mkdir "${TMPDIR}"/tensorflow_quantum - cp -r -v ${EXPORT_DIR}/tensorflow_quantum/* "${TMPDIR}"/tensorflow_quantum/ - - pushd ${TMPDIR} - echo $(date) : "=== Building wheel" - - python3 setup.py bdist_wheel ${EXTRA_FLAGS} > /dev/null + cp "${EXPORT_DIR}/release/setup.py" "${TMPDIR}" + cp "${EXPORT_DIR}/release/MANIFEST.in" "${TMPDIR}" + mkdir "${TMPDIR}/tensorflow_quantum" + cp -r -v "${EXPORT_DIR}/tensorflow_quantum/"* "${TMPDIR}/tensorflow_quantum/" + pushd "${TMPDIR}" + echo "$(date) : === Building wheel" + "${PY}" setup.py bdist_wheel ${EXTRA_FLAGS} > /dev/null cp dist/*.whl "${DEST}" popd - rm -rf ${TMPDIR} - echo $(date) : "=== Output wheel file is in: ${DEST}" + rm -rf "${TMPDIR}" + echo "$(date) : === Output wheel file is in: ${DEST}" } main "$@" diff --git a/release/setup.py b/release/setup.py index 571a11861..73e590258 100644 --- a/release/setup.py +++ b/release/setup.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""TensorFlow Quantum adds qauntum computing primitives to TensorFlow. +"""TensorFlow Quantum adds quantum computing primitives to TensorFlow. TensorFlow Quantum is an open source library for high performance batch quantum computation on quantum simulators and quantum computers. The goal @@ -20,29 +20,28 @@ of quantum data and quantum systems via hybrid models. TensorFlow Quantum was created in an ongoing collaboration between the -University of Waterloo and the Quantum AI team at Google along with help from -many other contributors within Google. +University of Waterloo and the Quantum AI team at Google along with help +from many other contributors within Google. """ + from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys - from datetime import date + from setuptools import Extension from setuptools import find_packages from setuptools import setup -from setuptools.dist import Distribution from setuptools.command.install import install +from setuptools.dist import Distribution - -DOCLINES = __doc__.split('\n') +DOCLINES = __doc__.split("\n") class InstallPlatlib(install): - """Workaround so .so files in generated wheels - can be seen by auditwheel.""" + """Workaround so .so files in generated wheels are visible to auditwheel.""" def finalize_options(self): install.finalize_options(self) @@ -50,67 +49,68 @@ def finalize_options(self): self.install_lib = self.install_platlib -REQUIRED_PACKAGES = ['cirq-core==1.3.0', 'cirq-google==1.3.0', 'sympy == 1.12'] +REQUIRED_PACKAGES = [ + "cirq-core==1.3.0", + "cirq-google==1.3.0", + "sympy==1.14", +] + +# Placed as extras to avoid overwriting existing nightly TF installs. +EXTRA_PACKAGES = ["tensorflow>=2.16,<2.17"] -# placed as extra to not have required overwrite existing nightly installs if -# they exist. -EXTRA_PACKAGES = ['tensorflow == 2.15.0'] -CUR_VERSION = '0.7.4' +CUR_VERSION = "0.7.4" class BinaryDistribution(Distribution): - """This class is needed in order to create OS specific wheels.""" + """Create OS-specific wheels.""" def has_ext_modules(self): return True -nightly = False -if '--nightly' in sys.argv: - nightly = True - sys.argv.remove('--nightly') +NIGHTLY_FLAG = False +if "--nightly" in sys.argv: + NIGHTLY_FLAG = True + sys.argv.remove("--nightly") -project_name = 'tensorflow-quantum' -build_version = CUR_VERSION -if nightly: - project_name = 'tfq-nightly' - build_version = CUR_VERSION + '.dev' + str(date.today()).replace('-', '') +PROJECT_NAME = "tensorflow-quantum" +BUILD_VERSION = CUR_VERSION +if NIGHTLY_FLAG: + PROJECT_NAME = "tfq-nightly" + BUILD_VERSION = CUR_VERSION + ".dev" + str(date.today()).replace("-", "") setup( - name=project_name, - version=build_version, - description= - 'TensorFlow Quantum is a library for hybrid quantum-classical machine learning.', - long_description='\n'.join(DOCLINES[2:]), - author='Google Inc.', - author_email='no-reply@google.com', - url='https://github.com/tensorflow/quantum/', + name=PROJECT_NAME, + version=BUILD_VERSION, + description="Library for hybrid quantum-classical machine learning.", + long_description="\n".join(DOCLINES[2:]), + author="The TensorFlow Quantum Authors", + author_email="tensorflow-quantum-team@google.com", + url="https://github.com/tensorflow/quantum/", packages=find_packages(), install_requires=REQUIRED_PACKAGES, - extras_require={'extras': EXTRA_PACKAGES}, - # Add in any packaged data. + extras_require={"extras": EXTRA_PACKAGES}, include_package_data=True, - #ext_modules=[Extension('_foo', ['stub.cc'])], + # ext_modules=[Extension('_foo', ['stub.cc'])], zip_safe=False, distclass=BinaryDistribution, - # PyPI package information. classifiers=[ - 'Development Status :: 3 - Alpha', - 'Intended Audience :: Developers', - 'Intended Audience :: Education', - 'Intended Audience :: Science/Research', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10', - 'Programming Language :: Python :: 3.11', - 'Topic :: Scientific/Engineering', - 'Topic :: Scientific/Engineering :: Artificial Intelligence', - 'Topic :: Scientific/Engineering :: Mathematics', - 'Topic :: Scientific/Engineering :: Physics', - 'Topic :: Scientific/Engineering :: Quantum Computing', + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Intended Audience :: Education", + "Intended Audience :: Science/Research", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Scientific/Engineering :: Mathematics", + "Topic :: Scientific/Engineering :: Physics", + "Topic :: Scientific/Engineering :: Quantum Computing", ], - license='Apache 2.0', - keywords='tensorflow machine learning quantum qml', - cmdclass={'install': InstallPlatlib}) + license="Apache 2.0", + keywords="tensorflow machine learning quantum qml", + cmdclass={"install": InstallPlatlib}, +) diff --git a/requirements.txt b/requirements.txt index 9fe2d0446..dc7d9d20c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,10 @@ -cirq-core==1.3.0 -cirq-google==1.3.0 -sympy==1.12 -numpy==1.24.2 # TensorFlow can detect if it was built against other versions. +cirq-core==1.3.* +cirq-google==1.3.* +sympy==1.14 +numpy>=1.26.4,<2.0 # TensorFlow can detect if it was built against other versions. nbformat==5.1.3 pylint==3.3.3 yapf==0.43.0 -tensorflow==2.15.0 +tensorflow==2.16.2 +tf-keras~=2.16.0 +nbclient==0.6.5 diff --git a/scripts/ci_validate_tutorials.sh b/scripts/ci_validate_tutorials.sh index e58355faf..fe61a1932 100755 --- a/scripts/ci_validate_tutorials.sh +++ b/scripts/ci_validate_tutorials.sh @@ -13,11 +13,19 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== +#!/bin/bash +set -e + +# Use legacy tf.keras (Keras 2) with TF 2.16 +export TF_USE_LEGACY_KERAS=1 + +# Tools for running notebooks non-interactively +pip install \ + "nbclient==0.6.5" \ + "jupyter-client==7.4.9" \ + "ipython>=8.10.0" \ + "ipykernel>=6.29.0" -# Run the tutorials using the installed pip package -pip install jupyter nbclient==0.6.5 jupyter-client==6.1.12 ipython==7.22.0 -# Workaround for ipykernel - see https://github.com/ipython/ipykernel/issues/422 -pip install ipykernel==5.1.1 # OpenAI Gym pip package needed for the quantum reinforcement learning tutorial pip install gym==0.24.1 # seaborn has also numpy dependency, it requires version >= 0.12.0. @@ -26,12 +34,14 @@ pip install seaborn==0.12.0 pip install -q git+https://github.com/tensorflow/docs # Leave the quantum directory, otherwise errors may occur cd .. + examples_output=$(python3 quantum/scripts/test_tutorials.py) exit_code=$? + if [ "$exit_code" == "0" ]; then exit 0; else echo "Tutorials failed to run to completion:" echo "{$examples_output}" exit 64; -fi +fi \ No newline at end of file diff --git a/scripts/test_tutorials.py b/scripts/test_tutorials.py index 08a9d85d9..4d06010c6 100644 --- a/scripts/test_tutorials.py +++ b/scripts/test_tutorials.py @@ -13,40 +13,51 @@ # limitations under the License. # ============================================================================== """Module to ensure all notebooks execute without error by pytesting them.""" + +import os import glob import re from absl.testing import parameterized import nbformat import nbclient -import tensorflow as tf + +# Ensure we always use legacy tf.keras (Keras 2) when running tutorials. +# This must be set before importing TensorFlow so it picks up tf_keras. +os.environ.setdefault("TF_USE_LEGACY_KERAS", "1") + +# Pylint doesn't like code before imports, but we need the env var set first. +import tensorflow as tf # pylint: disable=wrong-import-position # Must be run from the directory containing `quantum` repo. NOTEBOOKS = glob.glob("quantum/docs/tutorials/*.ipynb") class ExamplesTest(tf.test.TestCase, parameterized.TestCase): + """Execute all tutorial notebooks and check they run without errors.""" @parameterized.parameters(NOTEBOOKS) def test_notebook(self, path): - """Test that notebooks open/run correctly.""" + """Test that notebooks open and run correctly.""" nb = nbformat.read(path, as_version=4) # Scrub any magic from the notebook before running. for cell in nb.get("cells"): - if cell['cell_type'] == 'code': - src = cell['source'] - # Comment out lines containing '!' but not '!=' - src = re.sub(r'\!(?!=)', r'#!', src) + if cell["cell_type"] == "code": + src = cell["source"] + # Comment out lines containing '!' but not '!='. + src = re.sub(r"\!(?!=)", r"#!", src) # For mnist.ipynb to reduce runtime in test. - src = re.sub('NUM_EXAMPLES ?= ?.*', 'NUM_EXAMPLES = 10', src) - # For quantum_reinforcement_learning.ipynb to reduce runtime in test. - src = re.sub('n_episodes ?= ?.*', 'n_episodes = 50', src) + src = re.sub(r"NUM_EXAMPLES ?= ?.*", "NUM_EXAMPLES = 10", src) + # For quantum_reinforcement_learning.ipynb: + # reduce runtime in test by limiting episodes. + src = re.sub(r"n_episodes ?= ?.*", "n_episodes = 50", src) # For noise.ipynb to reduce runtime in test. - src = re.sub('n_epochs ?= ?.*', 'n_epochs = 2', src) - cell['source'] = src + src = re.sub(r"n_epochs ?= ?.*", "n_epochs = 2", src) + cell["source"] = src _ = nbclient.execute(nb, timeout=900, kernel_name="python3") + if __name__ == "__main__": tf.test.main() diff --git a/tensorflow_quantum/__init__.py b/tensorflow_quantum/__init__.py index 7c781f882..3b6a0ad7c 100644 --- a/tensorflow_quantum/__init__.py +++ b/tensorflow_quantum/__init__.py @@ -64,4 +64,4 @@ del core # pylint: enable=undefined-variable -__version__ = '0.7.2' +__version__ = '0.7.4' diff --git a/third_party/tf/tf.patch b/third_party/tf/tf.patch index 4ce7dc753..e32a38ad3 100644 --- a/third_party/tf/tf.patch +++ b/third_party/tf/tf.patch @@ -1,74 +1,75 @@ -diff --git tensorflow/tools/toolchains/cpus/aarch64/aarch64_compiler_configure.bzl tensorflow/tools/toolchains/cpus/aarch64/aarch64_compiler_configure.bzl -index a2bdd6a7eed..ec25c23d8d4 100644 ---- tensorflow/tools/toolchains/cpus/aarch64/aarch64_compiler_configure.bzl -+++ tensorflow/tools/toolchains/cpus/aarch64/aarch64_compiler_configure.bzl -@@ -2,7 +2,7 @@ +# Patch used for tf 2.15, for tf 2.16> it is not needed anymore. +# diff --git tensorflow/tools/toolchains/cpus/aarch64/aarch64_compiler_configure.bzl tensorflow/tools/toolchains/cpus/aarch64/aarch64_compiler_configure.bzl +# index a2bdd6a7eed..ec25c23d8d4 100644 +# --- tensorflow/tools/toolchains/cpus/aarch64/aarch64_compiler_configure.bzl +# +++ tensorflow/tools/toolchains/cpus/aarch64/aarch64_compiler_configure.bzl +# @@ -2,7 +2,7 @@ - load("//tensorflow/tools/toolchains:cpus/aarch64/aarch64.bzl", "remote_aarch64_configure") - load("//third_party/remote_config:remote_platform_configure.bzl", "remote_platform_configure") --load("//third_party/py:python_configure.bzl", "remote_python_configure") -+load("//third_party/py/non_hermetic:python_configure.bzl", "remote_python_configure") +# load("//tensorflow/tools/toolchains:cpus/aarch64/aarch64.bzl", "remote_aarch64_configure") +# load("//third_party/remote_config:remote_platform_configure.bzl", "remote_platform_configure") +# -load("//third_party/py:python_configure.bzl", "remote_python_configure") +# +load("//third_party/py/non_hermetic:python_configure.bzl", "remote_python_configure") - def ml2014_tf_aarch64_configs(name_container_map, env): - for name, container in name_container_map.items(): -diff --git tensorflow/tools/toolchains/remote_config/rbe_config.bzl tensorflow/tools/toolchains/remote_config/rbe_config.bzl -index 9f71a414bf7..57f70752323 100644 ---- tensorflow/tools/toolchains/remote_config/rbe_config.bzl -+++ tensorflow/tools/toolchains/remote_config/rbe_config.bzl -@@ -1,6 +1,6 @@ - """Macro that creates external repositories for remote config.""" +# def ml2014_tf_aarch64_configs(name_container_map, env): +# for name, container in name_container_map.items(): +# diff --git tensorflow/tools/toolchains/remote_config/rbe_config.bzl tensorflow/tools/toolchains/remote_config/rbe_config.bzl +# index 9f71a414bf7..57f70752323 100644 +# --- tensorflow/tools/toolchains/remote_config/rbe_config.bzl +# +++ tensorflow/tools/toolchains/remote_config/rbe_config.bzl +# @@ -1,6 +1,6 @@ +# """Macro that creates external repositories for remote config.""" --load("//third_party/py:python_configure.bzl", "local_python_configure", "remote_python_configure") -+load("//third_party/py/non_hermetic:python_configure.bzl", "local_python_configure", "remote_python_configure") - load("//third_party/gpus:cuda_configure.bzl", "remote_cuda_configure") - load("//third_party/nccl:nccl_configure.bzl", "remote_nccl_configure") - load("//third_party/gpus:rocm_configure.bzl", "remote_rocm_configure") -diff --git tensorflow/workspace2.bzl tensorflow/workspace2.bzl -index 7e9faa558a4..5b18cb0969a 100644 ---- tensorflow/workspace2.bzl -+++ tensorflow/workspace2.bzl -@@ -8,7 +8,7 @@ load("//third_party/gpus:rocm_configure.bzl", "rocm_configure") - load("//third_party/tensorrt:tensorrt_configure.bzl", "tensorrt_configure") - load("//third_party/nccl:nccl_configure.bzl", "nccl_configure") - load("//third_party/git:git_configure.bzl", "git_configure") --load("//third_party/py:python_configure.bzl", "python_configure") -+load("//third_party/py/non_hermetic:python_configure.bzl", "python_configure") - load("//third_party/systemlibs:syslibs_configure.bzl", "syslibs_configure") - load("//tensorflow/tools/toolchains:cpus/aarch64/aarch64_compiler_configure.bzl", "aarch64_compiler_configure") - load("//tensorflow/tools/toolchains:cpus/arm/arm_compiler_configure.bzl", "arm_compiler_configure") -diff --git third_party/py/non_hermetic/python_configure.bzl third_party/py/non_hermetic/python_configure.bzl -index 300cbfb6c71..09d98505dd9 100644 ---- third_party/py/non_hermetic/python_configure.bzl -+++ third_party/py/non_hermetic/python_configure.bzl -@@ -206,7 +206,7 @@ def _create_local_python_repository(repository_ctx): - # Resolve all labels before doing any real work. Resolving causes the - # function to be restarted with all previous state being lost. This - # can easily lead to a O(n^2) runtime in the number of labels. -- build_tpl = repository_ctx.path(Label("//third_party/py:BUILD.tpl")) -+ build_tpl = repository_ctx.path(Label("//third_party/py/non_hermetic:BUILD.tpl")) +# -load("//third_party/py:python_configure.bzl", "local_python_configure", "remote_python_configure") +# +load("//third_party/py/non_hermetic:python_configure.bzl", "local_python_configure", "remote_python_configure") +# load("//third_party/gpus:cuda_configure.bzl", "remote_cuda_configure") +# load("//third_party/nccl:nccl_configure.bzl", "remote_nccl_configure") +# load("//third_party/gpus:rocm_configure.bzl", "remote_rocm_configure") +# diff --git tensorflow/workspace2.bzl tensorflow/workspace2.bzl +# index 7e9faa558a4..5b18cb0969a 100644 +# --- tensorflow/workspace2.bzl +# +++ tensorflow/workspace2.bzl +# @@ -8,7 +8,7 @@ load("//third_party/gpus:rocm_configure.bzl", "rocm_configure") +# load("//third_party/tensorrt:tensorrt_configure.bzl", "tensorrt_configure") +# load("//third_party/nccl:nccl_configure.bzl", "nccl_configure") +# load("//third_party/git:git_configure.bzl", "git_configure") +# -load("//third_party/py:python_configure.bzl", "python_configure") +# +load("//third_party/py/non_hermetic:python_configure.bzl", "python_configure") +# load("//third_party/systemlibs:syslibs_configure.bzl", "syslibs_configure") +# load("//tensorflow/tools/toolchains:cpus/aarch64/aarch64_compiler_configure.bzl", "aarch64_compiler_configure") +# load("//tensorflow/tools/toolchains:cpus/arm/arm_compiler_configure.bzl", "arm_compiler_configure") +# diff --git third_party/py/non_hermetic/python_configure.bzl third_party/py/non_hermetic/python_configure.bzl +# index 300cbfb6c71..09d98505dd9 100644 +# --- third_party/py/non_hermetic/python_configure.bzl +# +++ third_party/py/non_hermetic/python_configure.bzl +# @@ -206,7 +206,7 @@ def _create_local_python_repository(repository_ctx): +# # Resolve all labels before doing any real work. Resolving causes the +# # function to be restarted with all previous state being lost. This +# # can easily lead to a O(n^2) runtime in the number of labels. +# - build_tpl = repository_ctx.path(Label("//third_party/py:BUILD.tpl")) +# + build_tpl = repository_ctx.path(Label("//third_party/py/non_hermetic:BUILD.tpl")) - python_bin = get_python_bin(repository_ctx) - _check_python_bin(repository_ctx, python_bin) -diff --git third_party/py/numpy/BUILD third_party/py/numpy/BUILD -index 97c7907fc38..c80cc5287bc 100644 ---- third_party/py/numpy/BUILD -+++ third_party/py/numpy/BUILD -@@ -2,14 +2,15 @@ licenses(["restricted"]) +# python_bin = get_python_bin(repository_ctx) +# _check_python_bin(repository_ctx, python_bin) +# diff --git third_party/py/numpy/BUILD third_party/py/numpy/BUILD +# index 97c7907fc38..c80cc5287bc 100644 +# --- third_party/py/numpy/BUILD +# +++ third_party/py/numpy/BUILD +# @@ -2,14 +2,15 @@ licenses(["restricted"]) - package(default_visibility = ["//visibility:public"]) +# package(default_visibility = ["//visibility:public"]) --alias( -+py_library( - name = "numpy", -- actual = "@pypi_numpy//:pkg", -+ srcs = ["tf_numpy_dummy.py"], -+ srcs_version = "PY3", - ) +# -alias( +# +py_library( +# name = "numpy", +# - actual = "@pypi_numpy//:pkg", +# + srcs = ["tf_numpy_dummy.py"], +# + srcs_version = "PY3", +# ) - alias( - name = "headers", -- actual = "@pypi_numpy//:numpy_headers", -+ actual = "@local_config_python//:numpy_headers", - ) +# alias( +# name = "headers", +# - actual = "@pypi_numpy//:numpy_headers", +# + actual = "@local_config_python//:numpy_headers", +# ) - genrule( \ No newline at end of file +# genrule( \ No newline at end of file