Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 9 additions & 8 deletions WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -37,16 +37,17 @@ http_archive(
urls = ["https://github.com/quantumlib/qsim/archive/refs/tags/v0.13.3.zip"],
)

local_repository(
name = "python",
path = "third_party/python_legacy",
)

http_archive(
name = "org_tensorflow",
patches = [
"//third_party/tf:tf.patch",
],
sha256 = "f771db8d96ca13c72f73c85c9cfb6f5358e2de3dd62a97a9ae4b672fe4c6d094",
strip_prefix = "tensorflow-2.15.0",
urls = [
"https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.15.0.zip",
],
patches = ["//third_party/tf:tf.patch"],
sha256 = "c8c8936e7b6156e669e08b3c388452bb973c1f41538149fce7ed4a4849c7a012",
strip_prefix = "tensorflow-2.16.2",
urls = ["https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.16.2.zip"],
)


Expand Down
312 changes: 189 additions & 123 deletions configure.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,167 +13,233 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
set -euo pipefail

PLATFORM="$(uname -s | tr 'A-Z' 'a-z')"

function write_to_bazelrc() {
echo "$1" >> .bazelrc
}

function write_action_env_to_bazelrc() {
write_to_bazelrc "build --action_env $1=\"$2\""
# --- helpers ---------------------------------------------------------------
function write_bazelrc() {
echo "${1}" >> .bazelrc
}

function write_linkopt_dir_to_bazelrc() {
write_to_bazelrc "build --linkopt -Wl,-rpath,$1" >> .bazelrc
function write_tf_rc() {
echo "${1}" >> .tf_configure.bazelrc
}


function is_linux() {
[[ "${PLATFORM}" == "linux" ]]
function die() {
echo "ERROR: $*" >&2
exit 1
}

function is_macos() {
[[ "${PLATFORM}" == "darwin" ]]
}

function is_windows() {
# On windows, the shell script is actually running in msys
[[ "${PLATFORM}" =~ msys_nt*|mingw*|cygwin*|uwin* ]]
}

function is_ppc64le() {
[[ "$(uname -m)" == "ppc64le" ]]
function write_legacy_python_repo() {
mkdir -p third_party/python_legacy

# empty WORKSPACE
cat > third_party/python_legacy/WORKSPACE <<'EOF'
# intentionally empty
EOF

# simple BUILD that exports defs.bzl
cat > third_party/python_legacy/BUILD <<'EOF'
package(default_visibility = ["//visibility:public"])
exports_files(["defs.bzl"])
EOF

# defs.bzl MUST define 'interpreter' as a string, not a function.
# We also export py_runtime to satisfy older loads.
cat > third_party/python_legacy/defs.bzl <<EOF
# AUTOGENERATED by configure.sh
load("@bazel_tools//tools/python:toolchain.bzl", "py_runtime_pair")
# Absolute path to the python interpreter Bazel/TF should use:
interpreter = "${PYTHON_BIN_PATH}"
py_runtime = native.py_runtime
EOF

echo "Wrote third_party/python_legacy with interpreter=${PYTHON_BIN_PATH}"
}

# --- start fresh -----------------------------------------------------------
rm -f .bazelrc .tf_configure.bazelrc

# Remove .bazelrc if it already exist
[ -e .bazelrc ] && rm .bazelrc

# Check if we are building GPU or CPU ops, default CPU
while [[ "$TF_NEED_CUDA" == "" ]]; do
read -p "Do you want to build ops again TensorFlow CPU pip package?"\
" Y or enter for CPU (tensorflow-cpu), N for GPU (tensorflow). [Y/n] " INPUT
case $INPUT in
[Yy]* ) echo "Build with CPU pip package."; TF_NEED_CUDA=0;;
[Nn]* ) echo "Build with GPU pip package."; TF_NEED_CUDA=1;;
"" ) echo "Build with CPU pip package."; TF_NEED_CUDA=0;;
* ) echo "Invalid selection: " $INPUT;;
# --- parse args ------------------------------------------------------------
USER_PY=""
for arg in "$@"; do
case "$arg" in
--python=*) USER_PY="${arg#--python=}" ;;
*) echo "Unknown arg: $arg" ;;
esac
done

while [[ "$TF_CUDA_VERSION" == "" ]]; do
read -p "Are you building against TensorFlow 2.1(including RCs) or newer?[Y/n] " INPUT
case $INPUT in
[Yy]* ) echo "Build against TensorFlow 2.1 or newer."; TF_CUDA_VERSION=11;;
[Nn]* ) echo "Build against TensorFlow <2.1."; TF_CUDA_VERSION=10.0;;
"" ) echo "Build against TensorFlow 2.1 or newer."; TF_CUDA_VERSION=11;;
* ) echo "Invalid selection: " $INPUT;;
esac
done
# --- choose interpreter (venv/conda/system) --------------------------------
if [[ -n "${USER_PY}" ]]; then
# 1) Explicit --python=... flag
PY="${USER_PY}"
elif [[ -n "${PYTHON_BIN_PATH:-}" ]]; then
# 2) Explicit environment override
PY="${PYTHON_BIN_PATH}"
elif [[ -n "${CONDA_PREFIX:-}" && -x "${CONDA_PREFIX}/bin/python" ]]; then
# 3) Conda environment python, if available
PY="${CONDA_PREFIX}/bin/python"
else
# 4) Fallback: system python3, but require >= 3.10
if ! command -v python3 >/dev/null 2>&1; then
die "python3 not found. Pass --python=/path/to/python3.10+ or set PYTHON_BIN_PATH."
fi

if ! python3 - <<'PY'
import sys
raise SystemExit(0 if sys.version_info[:2] >= (3, 10) else 1)
PY
then
die "Python 3.10+ required for TensorFlow Quantum; found $(python3 -V 2>&1). Pass --python=/path/to/python3.10+ or set PYTHON_BIN_PATH."
fi

# Check if it's installed
# if [[ $(pip show tensorflow) == *tensorflow* ]] || [[ $(pip show tf-nightly) == *tf-nightly* ]]; then
# echo 'Using installed tensorflow'
# else
# # Uninstall CPU version if it is installed.
# if [[ $(pip show tensorflow-cpu) == *tensorflow-cpu* ]]; then
# echo 'Already have tensorflow non-gpu installed. Uninstalling......\n'
# pip uninstall tensorflow
# elif [[ $(pip show tf-nightly-cpu) == *tf-nightly-cpu* ]]; then
# echo 'Already have tensorflow non-gpu installed. Uninstalling......\n'
# pip uninstall tf-nightly
# fi
# # Install GPU version
# echo 'Installing tensorflow .....\n'
# pip install tensorflow
# fi



TF_CFLAGS=( $(python -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_compile_flags()))') )
TF_LFLAGS="$(python -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_link_flags()))')"


write_to_bazelrc "build --experimental_repo_remote_exec"
write_to_bazelrc "build --spawn_strategy=standalone"
write_to_bazelrc "build --strategy=Genrule=standalone"
write_to_bazelrc "build -c opt"
write_to_bazelrc "build --cxxopt=\"-D_GLIBCXX_USE_CXX11_ABI=1\""
write_to_bazelrc "build --cxxopt=\"-std=c++17\""

# The transitive inclusion of build rules from TensorFlow ends up including
# and building two copies of zlib (one from bazel_rules, one from the TF code
# baase itself). The version of zlib you get (at least in TF 2.15.0) ends up
# producing many compiler warnings that "a function declaration without a
# prototype is deprecated". It's difficult to patch the particular build rules
# involved, so the approach taken here is to silence those warnings for stuff
# in external/. TODO: figure out how to patch the BUILD files and put it there.
write_to_bazelrc "build --per_file_copt=external/.*@-Wno-deprecated-non-prototype"
write_to_bazelrc "build --host_per_file_copt=external/.*@-Wno-deprecated-non-prototype"

# Similarly, these are other harmless warnings about unused functions coming
# from things pulled in by the TF bazel config rules.
write_to_bazelrc "build --per_file_copt=external/com_google_protobuf/.*@-Wno-unused-function"
write_to_bazelrc "build --host_per_file_copt=external/com_google_protobuf/.*@-Wno-unused-function"
PY="$(command -v python3)"
fi

# The following supress warnings coming from qsim.
# TODO: fix the code in qsim & update TFQ to use the updated version.
write_to_bazelrc "build --per_file_copt=tensorflow_quantum/core/ops/noise/tfq_.*@-Wno-unused-but-set-variable"
write_to_bazelrc "build --host_per_file_copt=tensorflow_quantum/core/ops/noise/tfq_.*@-Wno-unused-but-set-variable"
write_to_bazelrc "build --per_file_copt=tensorflow_quantum/core/ops/math_ops/tfq_.*@-Wno-deprecated-declarations"
write_to_bazelrc "build --host_per_file_copt=tensorflow_quantum/core/ops/math_ops/tfq_.*@-Wno-deprecated-declarations"
# Normalize to an absolute path (readlink -f is GNU; fall back to python)
if command -v readlink >/dev/null 2>&1; then
PY_ABS="$(readlink -f "${PY}" 2>/dev/null || true)"
fi
if [[ -z "${PY_ABS:-}" ]]; then
PY_ABS="$("${PY}" -c 'import os,sys; print(os.path.abspath(sys.executable))')"
fi
PYTHON_BIN_PATH="${PY_ABS}"


if is_windows; then
# Use pywrap_tensorflow instead of tensorflow_framework on Windows
SHARED_LIBRARY_DIR=${TF_CFLAGS:2:-7}"python"
else
SHARED_LIBRARY_DIR=${TF_LFLAGS:2}
fi
SHARED_LIBRARY_NAME=$(echo $TF_LFLAGS | rev | cut -d":" -f1 | rev)
if ! [[ $TF_LFLAGS =~ .*:.* ]]; then
if is_macos; then
SHARED_LIBRARY_NAME="libtensorflow_framework.dylib"
elif is_windows; then
# Use pywrap_tensorflow's import library on Windows. It is in the same dir as the dll/pyd.
SHARED_LIBRARY_NAME="_pywrap_tensorflow_internal.lib"
else
SHARED_LIBRARY_NAME="libtensorflow_framework.so"
fi
# --- choose CPU/GPU like upstream script (default CPU) ---------------------
TF_NEED_CUDA=""
while [[ -z "${TF_NEED_CUDA}" ]]; do
read -p "Build against TensorFlow CPU pip package? [Y/n] " INPUT || true
case "${INPUT:-Y}" in
[Yy]* ) echo "CPU build selected."; TF_NEED_CUDA=0;;
[Nn]* ) echo "GPU build selected."; TF_NEED_CUDA=1;;
* ) echo "Please answer Y or n.";;
esac
done

# For TF >= 2.1 this value isn’t actually consulted by TFQ,
# but we keep a compatible prompt/flag.
TF_CUDA_VERSION="11"

# --- sanity: python is importable and has TF -------------------------------
if [[ ! -x "${PYTHON_BIN_PATH}" ]]; then
die "${PYTHON_BIN_PATH} not found/executable."
fi

HEADER_DIR=${TF_CFLAGS:2}
if is_windows; then
SHARED_LIBRARY_DIR=${SHARED_LIBRARY_DIR//\\//}
SHARED_LIBRARY_NAME=${SHARED_LIBRARY_NAME//\\//}
HEADER_DIR=${HEADER_DIR//\\//}
# Ensure TF is importable from system python (user should have installed it).
tf_output=$("${PYTHON_BIN_PATH}" - <<'PY'
import sys
import os
import glob

try:
import tensorflow as tf
import tensorflow.sysconfig as sc
except ImportError:
sys.exit(1)

print(sc.get_include())

lib_path = sc.get_lib()
lib_dir = lib_path if os.path.isdir(lib_path) else os.path.dirname(lib_path)
print(lib_dir)

cands = (glob.glob(os.path.join(lib_dir, 'libtensorflow_framework.so*')) or
glob.glob(os.path.join(lib_dir, 'libtensorflow.so*')) or
glob.glob(os.path.join(lib_dir, '_pywrap_tensorflow_internal.*')))
print(os.path.basename(cands[0]) if cands else 'libtensorflow_framework.so.2')
PY
)

if [[ $? -ne 0 ]]; then
echo "ERROR: tensorflow not importable by Python (${PYTHON_BIN_PATH})" >&2
exit 1
fi
write_action_env_to_bazelrc "TF_HEADER_DIR" ${HEADER_DIR}
write_action_env_to_bazelrc "TF_SHARED_LIBRARY_DIR" ${SHARED_LIBRARY_DIR}
write_action_env_to_bazelrc "TF_SHARED_LIBRARY_NAME" ${SHARED_LIBRARY_NAME}
write_action_env_to_bazelrc "TF_NEED_CUDA" ${TF_NEED_CUDA}

{
read -r HDR
read -r LIBDIR
read -r LIBNAME
} <<< "${tf_output}"

echo "Detected:"
echo " PYTHON_BIN_PATH=$PYTHON_BIN_PATH"
echo " TF_HEADER_DIR=$HDR"
echo " TF_SHARED_LIBRARY_DIR=$LIBDIR"
echo " TF_SHARED_LIBRARY_NAME=$LIBNAME"

# --- write .tf_configure.bazelrc (repo_env for repository rules) -----------
write_tf_rc "build --repo_env=PYTHON_BIN_PATH=$PYTHON_BIN_PATH"
write_tf_rc "build --repo_env=TF_HEADER_DIR=$HDR"
write_tf_rc "build --repo_env=TF_SHARED_LIBRARY_DIR=$LIBDIR"
write_tf_rc "build --repo_env=TF_SHARED_LIBRARY_NAME=$LIBNAME"
write_tf_rc "build --repo_env=TF_NEED_CUDA=$TF_NEED_CUDA"
write_tf_rc "build --repo_env=TF_CUDA_VERSION=$TF_CUDA_VERSION"

# Make sure repo rules and sub-config see legacy Keras (keras 2 instead of Keras 3)
write_tf_rc "build --repo_env=TF_USE_LEGACY_KERAS=1"

# --- write third_party/python_legacy/ with interpreter --------------------
write_legacy_python_repo

# --- write .bazelrc (imports TF config usual flags) -----------------
write_bazelrc "try-import %workspace%/.tf_configure.bazelrc"
write_bazelrc "build --experimental_repo_remote_exec"
write_bazelrc "build --spawn_strategy=standalone"
write_bazelrc "build --strategy=Genrule=standalone"
write_bazelrc "build -c opt"
write_bazelrc "build --cxxopt=\"-D_GLIBCXX_USE_CXX11_ABI=1\""
write_bazelrc "build --cxxopt=\"-std=c++17\""
write_bazelrc "build --action_env=PYTHON_BIN_PATH=$PYTHON_BIN_PATH"
write_bazelrc "build --action_env=TF_USE_LEGACY_KERAS=1"
write_bazelrc "test --action_env=TF_USE_LEGACY_KERAS=1"


# zlib / protobuf warning suppressions
write_bazelrc "build --per_file_copt=external/.*@-Wno-deprecated-non-prototype"
write_bazelrc "build --host_per_file_copt=external/.*@-Wno-deprecated-non-prototype"
write_bazelrc "build --per_file_copt=external/com_google_protobuf/.*@-Wno-unused-function"
write_bazelrc "build --host_per_file_copt=external/com_google_protobuf/.*@-Wno-unused-function"

# qsim warnings
# The following supress warnings coming from qsim.
# TODO: fix the code in qsim & update TFQ to use the updated version.
write_bazelrc "build --per_file_copt=tensorflow_quantum/core/ops/noise/tfq_.*@-Wno-unused-but-set-variable"
write_bazelrc "build --host_per_file_copt=tensorflow_quantum/core/ops/noise/tfq_.*@-Wno-unused-but-set-variable"
write_bazelrc "build --per_file_copt=tensorflow_quantum/core/ops/math_ops/tfq_.*@-Wno-deprecated-declarations"
write_bazelrc "build --host_per_file_copt=tensorflow_quantum/core/ops/math_ops/tfq_.*@-Wno-deprecated-declarations"


# rpath so the dynamic linker finds TF’s shared lib
if ! is_windows; then
write_linkopt_dir_to_bazelrc ${SHARED_LIBRARY_DIR}
write_bazelrc "build --linkopt=-Wl,-rpath,${LIBDIR}"
fi

# TODO(yifeif): do not hardcode path
# CUDA toggle
if [[ "$TF_NEED_CUDA" == "1" ]]; then
write_to_bazelrc "build:cuda --define=using_cuda=true --define=using_cuda_nvcc=true"
write_to_bazelrc "build:cuda --@local_config_cuda//:enable_cuda"
write_to_bazelrc "build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain"

write_action_env_to_bazelrc "TF_CUDA_VERSION" ${TF_CUDA_VERSION}
write_action_env_to_bazelrc "TF_CUDNN_VERSION" "8"
write_bazelrc "build:cuda --define=using_cuda=true --define=using_cuda_nvcc=true"
write_bazelrc "build:cuda --@local_config_cuda//:enable_cuda"
write_bazelrc "build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain"
if is_windows; then
write_action_env_to_bazelrc "CUDNN_INSTALL_PATH" "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${TF_CUDA_VERSION}"
write_action_env_to_bazelrc "CUDA_TOOLKIT_PATH" "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${TF_CUDA_VERSION}"
write_tf_rc "build --repo_env=CUDNN_INSTALL_PATH=C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${TF_CUDA_VERSION}"
write_tf_rc "build --repo_env=CUDA_TOOLKIT_PATH=C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${TF_CUDA_VERSION}"
else
write_action_env_to_bazelrc "CUDNN_INSTALL_PATH" "/usr/lib/x86_64-linux-gnu"
write_action_env_to_bazelrc "CUDA_TOOLKIT_PATH" "/usr/local/cuda"
write_tf_rc "build --repo_env=CUDNN_INSTALL_PATH=/usr/lib/x86_64-linux-gnu"
write_tf_rc "build --repo_env=CUDA_TOOLKIT_PATH=/usr/local/cuda"
fi
write_to_bazelrc "build --config=cuda"
write_to_bazelrc "test --config=cuda"
write_bazelrc "build --config=cuda"
write_bazelrc "test --config=cuda"
fi

echo
echo "Wrote .tf_configure.bazelrc and .bazelrc successfully."
Loading
Loading